diff --git a/.babelrc b/.babelrc new file mode 100644 index 0000000..10823cf --- /dev/null +++ b/.babelrc @@ -0,0 +1,15 @@ +{ + "ignore": [ + "node_modules/**/*.js" + ], + "compact": false, + "retainLines": false, + "presets": [ + ["env", { + "targets": { + "browsers": ["last 2 versions", "safari >= 7"], + "node": "4.0" + } + }] + ] +} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..442aed3 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,21 @@ +*.sh text eol=lf +*.bat text eol=crlf +*.php text eol=lf +*.inc text eol=lf +*.html text eol=lf +*.json text eol=lf +*.js text eol=lf +*.css text eol=lf +*.less text eol=lf +*.sass text eol=lf +*.ini text eol=lf +*.txt text eol=lf +*.xml text eol=lf +*.md text eol=lf +*.markdown text eol=lf +*.json5 text eol=lf + +*.pdf binary +*.psd binary +*.pptx binary +*.xlsx binary diff --git a/.gitignore b/.gitignore index 6482f85..a08585b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,9 @@ -parser.js +.DS_Store node_modules/ +npm-debug.log # Editor bak files *~ *.bak *.orig + diff --git a/.npmignore b/.npmignore index e69de29..60dcbed 100644 --- a/.npmignore +++ b/.npmignore @@ -0,0 +1,21 @@ +.DS_Store +node_modules/ +npm-debug.log + +# Editor backup files +*.bak +*~ + +# scratch space +/tmp/ + +# Ignore build/publish scripts, etc. +Makefile + +# Sources which are compiled through jison +ebnf.y +bnf.y +bnf.l + +# misc files which are used during development +__patch_*.js diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..f0913fb --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: node_js +sudo: false + +node_js: + - 8 + - 7 + - 6 + - 5 + - 4 + - node + diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..e8fcb80 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2009-2017 Zachary Carter + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Makefile b/Makefile index b732fab..28d04e4 100644 --- a/Makefile +++ b/Makefile @@ -1,24 +1,81 @@ -all: install build test +JISON_VERSION := $(shell node ../../dist/cli-cjs-es5.js -V 2> /dev/null ) -install: +ifndef JISON_VERSION + JISON = sh node_modules/.bin/jison +else + JISON = node ../../dist/cli-cjs-es5.js +endif + +ROLLUP = node_modules/.bin/rollup +BABEL = node_modules/.bin/babel +MOCHA = node_modules/.bin/mocha + + + + +all: build test + +prep: npm-install + +npm-install: npm install +npm-update: + ncu -a --packageFile=package.json + build: - node ./node_modules/.bin/jison bnf.y bnf.l +ifeq ($(wildcard ./node_modules/.bin/jison),) + $(error "### FAILURE: Make sure you have run 'make prep' before as the jison compiler is unavailable! ###") +endif + + node __patch_version_in_js.js + + $(JISON) -m es bnf.y bnf.l mv bnf.js parser.js - node ./node_modules/.bin/jison ebnf.y + $(JISON) -m es ebnf.y mv ebnf.js transform-parser.js + node __patch_prelude_in_js.js + + -mkdir -p dist + $(ROLLUP) -c + $(BABEL) dist/ebnf-parser-cjs.js -o dist/ebnf-parser-cjs-es5.js + $(BABEL) dist/ebnf-parser-umd.js -o dist/ebnf-parser-umd-es5.js + test: - node tests/all-tests.js + $(MOCHA) --timeout 18000 --check-leaks --globals assert tests/ + + +# increment the XXX number in the package.json file: version ..- +bump: + +git-tag: + +publish: + npm run pub + + clean: + -rm -f parser.js + -rm -f transform-parser.js + -rm -f bnf.js + -rm -f ebnf.js + -rm -rf dist/ + -rm -rf node_modules/ + -rm -f package-lock.json superclean: clean -find . -type d -name 'node_modules' -exec rm -rf "{}" \; + + + + +.PHONY: all prep npm-install build test clean superclean bump git-tag publish npm-update + diff --git a/README.md b/README.md index ea2f316..99b7099 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,51 @@ -# ebnf-parser +# ebnf-parser \[OBSOLETED] + + +[![Join the chat at https://gitter.im/jison-parsers-lexers/Lobby](https://badges.gitter.im/jison-parsers-lexers/Lobby.svg)](https://gitter.im/jison-parsers-lexers/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://travis-ci.org/GerHobbelt/ebnf-parser.svg?branch=master)](https://travis-ci.org/GerHobbelt/ebnf-parser) +[![NPM version](https://badge.fury.io/js/jison-gho.svg)](https://badge.fury.io/js/jison-gho) +[![Dependency Status](https://img.shields.io/david/GerHobbelt/ebnf-parser.svg)](https://david-dm.org/GerHobbelt/ebnf-parser) +[![npm](https://img.shields.io/npm/dm/@gerhobbelt/ebnf-parser.svg?maxAge=2592000)]() + A parser for BNF and EBNF grammars used by jison. + +> +> # deprecation notice +> +> From today (2017/oct/15) the ebnf-parser repository is **obsoleted** +> for the `ebnf-parser` package/codebase: the **primary source** is the +> [jison](https://github.com/GerHobbelt/jison) +> [monorepo](https://medium.com/netscape/the-case-for-monorepos-907c1361708a)'s `packages/ebnf-parser/` +> directory. See also https://github.com/GerHobbelt/jison/issues/16. +> +> (For a comparable argument, see also ["Why is Babel a monorepo?"](https://github.com/babel/babel/blob/master/doc/design/monorepo.md)) +> +> Issues, pull requests, etc. for `ebnf-parser` should be filed there; hence +> we do not accept issue reports in this secondary repository any more. +> +> This repository will track the primary source for a while still, but be +> *very aware* that this particular repository will always be lagging behind! +> + + + ## install - npm install ebnf-parser + npm install @gerhobbelt/ebnf-parser ## build -To build the parser yourself, clone the git repo then run: - - make +To build the library yourself, follow the install & build directions of the [monorepo](https://github.com/GerHobbelt/jison). -This will generate `parser.js`, which is required by `ebnf-parser.js`. ## usage The parser translates a string grammar or JSON grammar into a JSON grammar that jison can use (ENBF is transformed into BNF). - var ebnfParser = require('ebnf-parser'); + var ebnfParser = require('@gerhobbelt/ebnf-parser'); // parse a bnf or ebnf string grammar ebnfParser.parse("%start ... %"); @@ -32,186 +58,471 @@ The parser translates a string grammar or JSON grammar into a JSON grammar that The parser can parse its own BNF grammar, shown below: - %start spec - - /* grammar for parsing jison grammar files */ - - %{ - var transform = require('./ebnf-transform').transform; - var ebnf = false; - %} - - %% - - spec - : declaration_list '%%' grammar optional_end_block EOF - {$$ = $1; return extend($$, $3);} - | declaration_list '%%' grammar '%%' CODE EOF - {$$ = $1; yy.addDeclaration($$,{include:$5}); return extend($$, $3);} - ; - - optional_end_block - : - | '%%' - ; - - declaration_list - : declaration_list declaration - {$$ = $1; yy.addDeclaration($$, $2);} - | - {$$ = {};} - ; - - declaration - : START id - {$$ = {start: $2};} - | LEX_BLOCK - {$$ = {lex: $1};} - | operator - {$$ = {operator: $1};} - | ACTION - {$$ = {include: $1};} - ; - - operator - : associativity token_list - {$$ = [$1]; $$.push.apply($$, $2);} - ; - - associativity - : LEFT - {$$ = 'left';} - | RIGHT - {$$ = 'right';} - | NONASSOC - {$$ = 'nonassoc';} - ; - - token_list - : token_list symbol - {$$ = $1; $$.push($2);} - | symbol - {$$ = [$1];} - ; - - grammar - : production_list - {$$ = $1;} - ; - - production_list - : production_list production - {$$ = $1; - if($2[0] in $$) $$[$2[0]] = $$[$2[0]].concat($2[1]); - else $$[$2[0]] = $2[1];} - | production - {$$ = {}; $$[$1[0]] = $1[1];} - ; - - production - : id ':' handle_list ';' - {$$ = [$1, $3];} - ; - - handle_list - : handle_list '|' handle_action - {$$ = $1; $$.push($3);} - | handle_action - {$$ = [$1];} - ; - - handle_action - : handle prec action - {$$ = [($1.length ? $1.join(' ') : '')]; - if($3) $$.push($3); - if($2) $$.push($2); - if ($$.length === 1) $$ = $$[0]; +``` +%start spec + +// %parse-param options + + +/* grammar for parsing jison grammar files */ + +%{ +var fs = require('fs'); +var transform = require('./ebnf-transform').transform; +var ebnf = false; +var XRegExp = require('xregexp'); // for helping out the `%options xregexp` in the lexer +%} + +%% + +spec + : declaration_list '%%' grammar optional_end_block EOF + { + $$ = $declaration_list; + if ($optional_end_block && $optional_end_block.trim() !== '') { + yy.addDeclaration($$, { include: $optional_end_block }); + } + return extend($$, $grammar); + } + ; + +optional_end_block + : %empty + | '%%' extra_parser_module_code + { $$ = $extra_parser_module_code; } + ; + +optional_action_header_block + : %empty + { $$ = {}; } + | optional_action_header_block ACTION + { + $$ = $optional_action_header_block; + yy.addDeclaration($$, { actionInclude: $ACTION }); + } + | optional_action_header_block include_macro_code + { + $$ = $optional_action_header_block; + yy.addDeclaration($$, { actionInclude: $include_macro_code }); + } + ; + +declaration_list + : declaration_list declaration + { $$ = $declaration_list; yy.addDeclaration($$, $declaration); } + | %epsilon + { $$ = {}; } + ; + +declaration + : START id + { $$ = {start: $id}; } + | LEX_BLOCK + { $$ = {lex: {text: $LEX_BLOCK, position: @LEX_BLOCK}}; } + | operator + { $$ = {operator: $operator}; } + | TOKEN full_token_definitions + { $$ = {token_list: $full_token_definitions}; } + | ACTION + { $$ = {include: $ACTION}; } + | include_macro_code + { $$ = {include: $include_macro_code}; } + | parse_params + { $$ = {parseParams: $parse_params}; } + | parser_type + { $$ = {parserType: $parser_type}; } + | options + { $$ = {options: $options}; } + | DEBUG + { $$ = {options: [['debug', true]]}; } + | UNKNOWN_DECL + { $$ = {unknownDecl: $UNKNOWN_DECL}; } + | IMPORT import_name import_path + { $$ = {imports: {name: $import_name, path: $import_path}}; } + | INIT_CODE import_name action_ne + { $$ = {initCode: {qualifier: $import_name, include: $action_ne}}; } + ; + +import_name + : ID + | STRING + ; + +import_path + : ID + | STRING + ; + +options + : OPTIONS option_list OPTIONS_END + { $$ = $option_list; } + ; + +option_list + : option_list option + { $$ = $option_list; $$.push($option); } + | option + { $$ = [$option]; } + ; + +option + : NAME[option] + { $$ = [$option, true]; } + | NAME[option] '=' OPTION_VALUE[value] + { $$ = [$option, $value]; } + | NAME[option] '=' NAME[value] + { $$ = [$option, $value]; } + ; + +parse_params + : PARSE_PARAM token_list + { $$ = $token_list; } + ; + +parser_type + : PARSER_TYPE symbol + { $$ = $symbol; } + ; + +operator + : associativity token_list + { $$ = [$associativity]; $$.push.apply($$, $token_list); } + ; + +associativity + : LEFT + { $$ = 'left'; } + | RIGHT + { $$ = 'right'; } + | NONASSOC + { $$ = 'nonassoc'; } + ; + +token_list + : token_list symbol + { $$ = $token_list; $$.push($symbol); } + | symbol + { $$ = [$symbol]; } + ; + +// As per http://www.gnu.org/software/bison/manual/html_node/Token-Decl.html +full_token_definitions + : optional_token_type id_list + { + var rv = []; + var lst = $id_list; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if ($optional_token_type) { + m.type = $optional_token_type; + } + rv.push(m); + } + $$ = rv; + } + | optional_token_type one_full_token + { + var m = $one_full_token; + if ($optional_token_type) { + m.type = $optional_token_type; + } + $$ = [m]; + } + ; + +one_full_token + : id token_value token_description + { + $$ = { + id: $id, + value: $token_value + }; + } + | id token_description + { + $$ = { + id: $id, + description: $token_description + }; + } + | id token_value + { + $$ = { + id: $id, + value: $token_value, + description: $token_description + }; + } + ; + +optional_token_type + : %epsilon + { $$ = false; } + | TOKEN_TYPE + ; + +token_value + : INTEGER + ; + +token_description + : STRING + ; + +id_list + : id_list id + { $$ = $id_list; $$.push($id); } + | id + { $$ = [$id]; } + ; + +// token_id +// : TOKEN_TYPE id +// { $$ = $id; } +// | id +// { $$ = $id; } +// ; + +grammar + : optional_action_header_block production_list + { + $$ = $optional_action_header_block; + $$.grammar = $production_list; + } + ; + +production_list + : production_list production + { + $$ = $production_list; + if ($production[0] in $$) { + $$[$production[0]] = $$[$production[0]].concat($production[1]); + } else { + $$[$production[0]] = $production[1]; + } + } + | production + { $$ = {}; $$[$production[0]] = $production[1]; } + ; + +production + : id ':' handle_list ';' + {$$ = [$id, $handle_list];} + ; + +handle_list + : handle_list '|' handle_action + { + $$ = $handle_list; + $$.push($handle_action); + } + | handle_action + { + $$ = [$handle_action]; + } + ; + +handle_action + : handle prec action + { + $$ = [($handle.length ? $handle.join(' ') : '')]; + if ($action) { + $$.push($action); } - ; - - handle - : handle expression_suffix - {$$ = $1; $$.push($2)} - | - {$$ = [];} - ; - - handle_sublist - : handle_sublist '|' handle - {$$ = $1; $$.push($3.join(' '));} - | handle - {$$ = [$1.join(' ')];} - ; - - expression_suffix - : expression suffix - {$$ = $expression + $suffix; } - ; - - expression - : ID - {$$ = $1; } - | STRING - {$$ = ebnf ? "'"+$1+"'" : $1; } - | '(' handle_sublist ')' - {$$ = '(' + $handle_sublist.join(' | ') + ')'; } - ; - - suffix - : {$$ = ''} - | '*' - | '?' - | '+' - ; - - prec - : PREC symbol - {$$ = {prec: $2};} - | - {$$ = null;} - ; - - symbol - : id - {$$ = $1;} - | STRING - {$$ = yytext;} - ; - - id - : ID - {$$ = yytext;} - ; - - action - : '{' action_body '}' - {$$ = $2;} - | ACTION - {$$ = $1;} - | ARROW_ACTION - {$$ = '$$ ='+$1+';';} - | - {$$ = '';} - ; - - action_body - : - {$$ = '';} - | ACTION_BODY - {$$ = yytext;} - | action_body '{' action_body '}' ACTION_BODY - {$$ = $1+$2+$3+$4+$5;} - | action_body '{' action_body '}' - {$$ = $1+$2+$3+$4;} - ; - - %% - - // transform ebnf to bnf if necessary - function extend (json, grammar) { - json.bnf = ebnf ? transform(grammar) : grammar; - return json; + if ($prec) { + $$.push($prec); + } + if ($$.length === 1) { + $$ = $$[0]; + } + } + | EPSILON action + // %epsilon may only be used to signal this is an empty rule alt; + // hence it can only occur by itself + // (with an optional action block, but no alias what-so-ever). + { + $$ = ['']; + if ($action) { + $$.push($action); + } + if ($$.length === 1) { + $$ = $$[0]; + } + } + ; + +handle + : handle expression_suffix + { + $$ = $handle; + $$.push($expression_suffix); + } + | %epsilon + { + $$ = []; + } + ; + +handle_sublist + : handle_sublist '|' handle + { + $$ = $handle_sublist; + $$.push($handle.join(' ')); + } + | handle + { + $$ = [$handle.join(' ')]; + } + ; + +expression_suffix + : expression suffix ALIAS + { + $$ = $expression + $suffix + "[" + $ALIAS + "]"; + } + | expression suffix + { + $$ = $expression + $suffix; + } + ; + +expression + : ID + { + $$ = $ID; + } + | STRING + { + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + if ($STRING.indexOf("'") >= 0) { + $$ = '"' + $STRING + '"'; + } else { + $$ = "'" + $STRING + "'"; + } + } + | '(' handle_sublist ')' + { + $$ = '(' + $handle_sublist.join(' | ') + ')'; + } + ; + +suffix + : %epsilon + { $$ = ''; } + | '*' + | '?' + | '+' + ; + +prec + : PREC symbol + { + $$ = { prec: $symbol }; + } + | %epsilon + { + $$ = null; + } + ; + +symbol + : id + { $$ = $id; } + | STRING + { $$ = $STRING; } + ; + +id + : ID + { $$ = $ID; } + ; + +action_ne + : '{' action_body '}' + { $$ = $action_body; } + | ACTION + { $$ = $ACTION; } + | include_macro_code + { $$ = $include_macro_code; } + | ARROW_ACTION + { $$ = '$$ = ' + $ARROW_ACTION; } + ; + +action + : action_ne + { $$ = $action_ne; } + | %epsilon + { $$ = ''; } + ; + +action_body + : %epsilon + { $$ = ''; } + | action_comments_body + { $$ = $action_comments_body; } + | action_body '{' action_body '}' action_comments_body + { $$ = $1 + $2 + $3 + $4 + $5; } + | action_body '{' action_body '}' + { $$ = $1 + $2 + $3 + $4; } + ; + +action_comments_body + : ACTION_BODY + { $$ = $ACTION_BODY; } + | action_comments_body ACTION_BODY + { $$ = $action_comments_body + $ACTION_BODY; } + ; + +extra_parser_module_code + : optional_module_code_chunk + { $$ = $optional_module_code_chunk; } + | optional_module_code_chunk include_macro_code extra_parser_module_code + { $$ = $optional_module_code_chunk + $include_macro_code + $extra_parser_module_code; } + ; + +include_macro_code + : INCLUDE PATH + { + var fileContent = fs.readFileSync($PATH, { encoding: 'utf-8' }); + // And no, we don't support nested '%include': + $$ = '\n// Included by Jison: ' + $PATH + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + $PATH + '\n\n'; + } + | INCLUDE error + { + console.error("%include MUST be followed by a valid file path"); + } + ; + +module_code_chunk + : CODE + { $$ = $CODE; } + | module_code_chunk CODE + { $$ = $module_code_chunk + $CODE; } + ; + +optional_module_code_chunk + : module_code_chunk + { $$ = $module_code_chunk; } + | %epsilon + { $$ = ''; } + ; + +%% + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + json.bnf = ebnf ? transform(grammar.grammar) : grammar.grammar; + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; } + return json; +} +``` + ## license diff --git a/__patch_prelude_in_js.js b/__patch_prelude_in_js.js new file mode 100644 index 0000000..5f96ce4 --- /dev/null +++ b/__patch_prelude_in_js.js @@ -0,0 +1,33 @@ + +// hack until jison properly supports the `%code imports %{...%}` feature: + +const globby = require('globby'); +const fs = require('fs'); + +const prelude = fs.readFileSync('ebnf-parser-prelude.js', 'utf8'); + +globby(['parser.js', 'transform-parser.js']).then(paths => { + var count = 0; + + //console.log(paths); + paths.forEach(path => { + var updated = false; + + //console.log('path: ', path); + + var src = fs.readFileSync(path, 'utf8'); + src = prelude + src.replace(/^[^]+?\/\/ end of prelude/, ''); + updated = true; + + if (updated) { + count++; + console.log('updated: ', path); + fs.writeFileSync(path, src, { + encoding: 'utf8', + flags: 'w' + }); + } + }); + + console.log('\nUpdated', count, 'files\' prelude chunk'); +}); diff --git a/__patch_version_in_js.js b/__patch_version_in_js.js new file mode 100644 index 0000000..262a54f --- /dev/null +++ b/__patch_version_in_js.js @@ -0,0 +1,37 @@ + +// fetch the version from package.json and patch the specified files + +const version = require('./package.json').version; +const globby = require('globby'); +const fs = require('fs'); + + +globby(['ebnf-parser*.js']).then(paths => { + var count = 0; + + //console.log(paths); + paths.forEach(path => { + var updated = false; + + //console.log('path: ', path); + + var src = fs.readFileSync(path, 'utf8'); + src = src.replace(/^(\s*var version = )([^;]+;)/gm, function repl(s, m1, m2) { + if (m2 !== "'" + version + "';") { + updated = true; + } + return m1 + "'" + version + "';"; + }); + + if (updated) { + count++; + console.log('updated: ', path); + fs.writeFileSync(path, src, { + encoding: 'utf8', + flags: 'w' + }); + } + }); + + console.log('\nUpdated', count, 'files\' version info to version', version); +}); diff --git a/bnf.l b/bnf.l index 2433772..f12f8fa 100644 --- a/bnf.l +++ b/bnf.l @@ -1,58 +1,349 @@ -id [a-zA-Z][a-zA-Z0-9_-]* +%code imports %{ + import helpers from 'jison-helpers-lib'; +%} -%x action code + + +ASCII_LETTER [a-zA-z] +// \p{Alphabetic} already includes [a-zA-z], hence we don't need to merge +// with {UNICODE_LETTER} (though jison has code to optimize if you *did* +// include the `[a-zA-Z]` anyway): +UNICODE_LETTER [\p{Alphabetic}] +ALPHA [{UNICODE_LETTER}_] +DIGIT [\p{Number}] +WHITESPACE [\s\r\n\p{Separator}] +ALNUM [{ALPHA}{DIGIT}] + +NAME [{ALPHA}](?:[{ALNUM}-]*{ALNUM})? +ID [{ALPHA}]{ALNUM}* +DECIMAL_NUMBER [1-9][0-9]* +HEX_NUMBER "0"[xX][0-9a-fA-F]+ +BR \r\n|\n|\r +// WhiteSpace MUST NOT match CR/LF and the regex `\s` DOES, so we cannot use +// that one directly. Instead we define the {WS} macro here: +WS [^\S\r\n] + +// Quoted string content: support *escaped* quotes inside strings: +QUOTED_STRING_CONTENT (?:\\\'|\\[^\']|[^\\\'\r\n])* +DOUBLEQUOTED_STRING_CONTENT (?:\\\"|\\[^\"]|[^\\\"\r\n])* +// backquoted ES6/ES2017 string templates MAY span multiple lines: +ES2017_STRING_CONTENT (?:\\\`|\\[^\`]|[^\\\`])* + +// Regex for matching all the possible stuff which can be placed between those `%lex.../lex` markers: +// multiple lines of arbitrary material. Use a non-gready `*?` in there to ensure that the regex +// doesn't also consume the terminating `/lex` token! +LEX_CONTENT {WS}*(?:{BR}[^]*?)?{BR}{WS}* + + + +%x action code path options option_values +%s token %s bnf ebnf + + +%options easy_keyword_rules +%options ranges +%options xregexp + + + %% -"%%" this.pushState('code');return '%%'; - -"(" return '('; -")" return ')'; -"*" return '*'; -"?" return '?'; -"+" return '+'; - -\s+ /* skip whitespace */ -"//".* /* skip comment */ -"/*"(.|\n|\r)*?"*/" /* skip comment */ -"["{id}"]" yytext = yytext.substr(1, yyleng-2); return 'ALIAS'; -{id} return 'ID'; -'"'[^"]+'"' yytext = yytext.substr(1, yyleng-2); return 'STRING'; -"'"[^']+"'" yytext = yytext.substr(1, yyleng-2); return 'STRING'; -":" return ':'; -";" return ';'; -"|" return '|'; -"%%" this.pushState(ebnf ? 'ebnf' : 'bnf'); return '%%'; -"%ebnf" if (!yy.options) yy.options = {}; ebnf = yy.options.ebnf = true; -"%prec" return 'PREC'; -"%start" return 'START'; -"%left" return 'LEFT'; -"%right" return 'RIGHT'; -"%nonassoc" return 'NONASSOC'; -"%parse-param" return 'PARSE_PARAM'; -"%options" return 'OPTIONS'; -"%lex"[\w\W]*?"/lex" return 'LEX_BLOCK'; -"%"[a-zA-Z]+[^\r\n]* /* ignore unrecognized decl */ -"<"[a-zA-Z]*">" /* ignore type */ -"{{"[\w\W]*?"}}" yytext = yytext.substr(2, yyleng-4); return 'ACTION'; -"%{"(.|\r|\n)*?"%}" yytext = yytext.substr(2, yytext.length-4); return 'ACTION'; -"{" yy.depth = 0; this.pushState('action'); return '{'; -"->".* yytext = yytext.substr(2, yyleng-2); return 'ARROW_ACTION'; -. /* ignore bad characters */ -<*><> return 'EOF'; - -"/*"(.|\n|\r)*?"*/" return 'ACTION_BODY'; -"//".* return 'ACTION_BODY'; -"/"[^ /]*?['"{}'][^ ]*?"/" return 'ACTION_BODY'; // regexp with braces or quotes (and no spaces) -\"("\\\\"|'\"'|[^"])*\" return 'ACTION_BODY'; -"'"("\\\\"|"\'"|[^'])*"'" return 'ACTION_BODY'; -[/"'][^{}/"']+ return 'ACTION_BODY'; -[^{}/"']+ return 'ACTION_BODY'; -"{" yy.depth++; return '{'; -"}" if (yy.depth==0) this.begin(ebnf ? 'ebnf' : 'bnf'); else yy.depth--; return '}'; - -(.|\n|\r)+ return 'CODE'; +"/*"[^]*?"*/" return 'ACTION_BODY'; +"//"[^\r\n]* return 'ACTION_BODY'; +"/"[^ /]*?['"{}][^ ]*?"/" return 'ACTION_BODY'; // regexp with braces or quotes (and no spaces) +\"{DOUBLEQUOTED_STRING_CONTENT}\" + return 'ACTION_BODY'; +\'{QUOTED_STRING_CONTENT}\' + return 'ACTION_BODY'; +[/"'][^{}/"']+ return 'ACTION_BODY'; +[^{}/"']+ return 'ACTION_BODY'; +"{" yy.depth++; return '{'; +"}" if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + return '}'; + +{BR} this.popState(); +"%%" this.popState(); +";" this.popState(); + +"%%" this.pushState('code'); + return '%%'; + +// Support bison's `%empty` (and our own alias `%epsilon`) to identify an empty rule alt: +"%empty" return 'EPSILON'; +"%epsilon" return 'EPSILON'; +// See also https://en.wikipedia.org/wiki/Epsilon#Glyph_variants +"\u0190" return 'EPSILON'; +"\u025B" return 'EPSILON'; +"\u03B5" return 'EPSILON'; +"\u03F5" return 'EPSILON'; + +"(" return '('; +")" return ')'; +"*" return '*'; +"?" return '?'; +"+" return '+'; + +{NAME} return 'NAME'; +"=" this.pushState('option_values'); + return '='; +{ + +\"{DOUBLEQUOTED_STRING_CONTENT}\" + yytext = unescQuote(this.matches[1], /\\"/g); + this.popState(); + return 'OPTION_STRING_VALUE'; // value is always a string type +\'{QUOTED_STRING_CONTENT}\' + yytext = unescQuote(this.matches[1], /\\'/g); + this.popState(); + return 'OPTION_STRING_VALUE'; // value is always a string type +\`{ES2017_STRING_CONTENT}\` + yytext = unescQuote(this.matches[1], /\\`/g); + this.popState(); + return 'OPTION_STRING_VALUE'; // value is always a string type + +} + +// Comments should be gobbled and discarded anywhere *except* the code/action blocks: +"//"[^\r\n]* + /* skip single-line comment */ +"/*"[^]*?"*/" + /* skip multi-line comment */ + +[^\s\r\n]+ this.popState(); + return 'OPTION_VALUE'; + +{BR}{WS}+(?=\S) /* skip leading whitespace on the next line of input, when followed by more options */ +{BR} this.popState(); return 'OPTIONS_END'; +{WS}+ /* skip whitespace */ + +{WS}+ /* skip whitespace */ +{BR}+ /* skip newlines */ + +"["{ID}"]" yytext = this.matches[1]; return 'ALIAS'; +{ID} return 'ID'; +{NAME} return 'NAME'; +"$end" return 'EOF_ID'; +// `$eof` and `EOF` are synonyms of `$end` ('$eof' is for bison compatibility); +// this is the only place where two symbol names may map to a single symbol ID number +// and we do not want `$eof`/`EOF` to show up in the symbol tables of generated parsers +// as we use `$end` for that one! +"$eof" return 'EOF_ID'; + +\"{DOUBLEQUOTED_STRING_CONTENT}\" %{ + yytext = unescQuote(this.matches[1], /\\"/g); + return 'STRING'; + %} +\'{QUOTED_STRING_CONTENT}\' %{ + yytext = unescQuote(this.matches[1], /\\'/g); + return 'STRING'; + %} + +[^\s\r\n]+ return 'TOKEN_WORD'; +":" return ':'; +";" return ';'; +"|" return '|'; +"%%" this.pushState(yy.ebnf ? 'ebnf' : 'bnf'); return '%%'; +"%ebnf" yy.ebnf = true; return 'EBNF'; +"%debug" return 'DEBUG'; +"%parser-type" return 'PARSER_TYPE'; +"%prec" return 'PREC'; +"%start" return 'START'; +"%left" return 'LEFT'; +"%right" return 'RIGHT'; +"%nonassoc" return 'NONASSOC'; +"%token" this.pushState('token'); return 'TOKEN'; +"%parse-param" return 'PARSE_PARAM'; +"%options" this.pushState('options'); return 'OPTIONS'; +"%lex"{LEX_CONTENT}"/lex" %{ + // remove the %lex../lex wrapper and return the pure lex section: + yytext = this.matches[1]; + return 'LEX_BLOCK'; + %} + +"%code" return 'INIT_CODE'; +"%import" return 'IMPORT'; +"%include" this.pushState('path'); + return 'INCLUDE'; + +"%"{NAME}([^\r\n]*) %{ + /* ignore unrecognized decl */ + this.warn(rmCommonWS` + EBNF: ignoring unsupported parser option ${dquote(yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + yytext = [ + this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + return 'UNKNOWN_DECL'; + %} +"<"{ID}">" yytext = this.matches[1]; + return 'TOKEN_TYPE'; +"{{"([^]*?)"}}" yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + return 'ACTION'; +"%{"([^]*?)"%}" yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + return 'ACTION'; +"{" yy.depth = 0; this.pushState('action'); + return '{'; +"->".* yytext = yytext.substr(2, yyleng - 2).trim(); + return 'ARROW_ACTION'; +"→".* yytext = yytext.substr(1, yyleng - 1).trim(); + return 'ARROW_ACTION'; +"=>".* yytext = yytext.substr(2, yyleng - 2).trim(); + return 'ARROW_ACTION'; +{HEX_NUMBER} yytext = parseInt(yytext, 16); return 'INTEGER'; +{DECIMAL_NUMBER}(?![xX0-9a-fA-F]) yytext = parseInt(yytext, 10); return 'INTEGER'; + + +// in the trailing CODE block, only accept these `%include` macros when +// they appear at the start of a line and make sure the rest of lexer +// regexes account for this one so it'll match that way only: +[^\r\n]*(\r|\n)+ return 'CODE'; +[^\r\n]+ return 'CODE'; // the bit of CODE just before EOF... + + +{BR} this.popState(); this.unput(yytext); + +\"{DOUBLEQUOTED_STRING_CONTENT}\" + yytext = unescQuote(this.matches[1]); + this.popState(); + return 'PATH'; +\'{QUOTED_STRING_CONTENT}\' + yytext = unescQuote(this.matches[1]); + this.popState(); + return 'PATH'; + +{WS}+ // skip whitespace in the line +[^\s\r\n]+ this.popState(); + return 'PATH'; + + +// detect and report unterminated string constants ASAP +// for 'action', 'options', but also for other lexer conditions: +// +// these error catching rules fix https://github.com/GerHobbelt/jison/issues/13 +\" yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +\' yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +\` yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; + +\" yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +\' yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +\` yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; + +<*>\" var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +<*>\' var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; +<*>\` var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + return 'error'; + + +<*>. %{ + /* b0rk on bad characters */ + yyerror(rmCommonWS` + unsupported parser input: ${dquote(yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yylloc)); + %} + +<*><> return 'EOF'; %% + + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; + + +function indent(s, i) { + var a = s.split('\n'); + var pf = (new Array(i + 1)).join(' '); + return pf + a.join('\n' + pf); +} + +// unescape a string value which is wrapped in quotes/doublequotes +function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + a = a.map(function (s) { + return s.replace(/\\'/g, "'").replace(/\\"/g, '"'); + }); + str = a.join('\\\\'); + return str; +} + + +lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } +}; + +lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } +}; diff --git a/bnf.y b/bnf.y index c5f45ee..2bd239c 100644 --- a/bnf.y +++ b/bnf.y @@ -1,216 +1,954 @@ + +%code imports %{ + import XRegExp from '@gerhobbelt/xregexp'; // for helping out the `%options xregexp` in the lexer + import helpers from 'jison-helpers-lib'; + import fs from 'fs'; + import transform from './ebnf-transform'; +%} + + + %start spec +// %parse-param options + + /* grammar for parsing jison grammar files */ %{ -var transform = require('./ebnf-transform').transform; var ebnf = false; %} + +%code error_recovery_reduction %{ + // Note: + // + // This code section is specifically targetting error recovery handling in the + // generated parser when the error recovery is unwinding the parse stack to arrive + // at the targeted error handling production rule. + // + // This code is treated like any production rule action code chunk: + // Special variables `$$`, `$@`, etc. are recognized, while the 'rule terms' can be + // addressed via `$n` macros as in usual rule actions, only here we DO NOT validate + // their usefulness as the 'error reduce action' accepts a variable number of + // production terms (available in `yyrulelength` in case you wish to address the + // input terms directly in the `yyvstack` and `yylstack` arrays, for instance). + // + // This example recovery rule simply collects all parse info stored in the parse + // stacks and which would otherwise be discarded immediately after this call, thus + // keeping all parse info details up to the point of actual error RECOVERY available + // to userland code in the handling 'error rule' in this grammar. +%} + + %% spec : declaration_list '%%' grammar optional_end_block EOF { - $$ = $1; - return extend($$, $3); + $$ = $declaration_list; + if ($optional_end_block.trim() !== '') { + yy.addDeclaration($$, { include: $optional_end_block }); + } + return extend($$, $grammar); + } + | declaration_list '%%' grammar error EOF + { + yyerror(rmCommonWS` + Maybe you did not correctly separate trailing code from the grammar rule set with a '%%' marker on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @grammar)} + `); } - | declaration_list '%%' grammar '%%' CODE EOF + | declaration_list error EOF { - $$ = $1; - yy.addDeclaration($$, { include: $5 }); - return extend($$, $3); + yyerror(rmCommonWS` + Maybe you did not correctly separate the parse 'header section' (token definitions, options, lexer spec, etc.) from the grammar rule set with a '%%' on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @declaration_list)} + `); } ; optional_end_block - : - | '%%' + : %empty + { $$ = ''; } + | '%%' extra_parser_module_code + { + var rv = checkActionBlock($extra_parser_module_code, @extra_parser_module_code); + if (rv) { + yyerror(rmCommonWS` + The extra parser module code section (a.k.a. 'epilogue') does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@extra_parser_module_code)} + `); + } + $$ = $extra_parser_module_code; + } + ; + +optional_action_header_block + : %empty + { $$ = {}; } + | optional_action_header_block ACTION + { + $$ = $optional_action_header_block; + var rv = checkActionBlock($ACTION, @ACTION); + if (rv) { + yyerror(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@ACTION)} + `); + } + yy.addDeclaration($$, { actionInclude: $ACTION }); + } + | optional_action_header_block include_macro_code + { + $$ = $optional_action_header_block; + var rv = checkActionBlock($include_macro_code, @include_macro_code); + if (rv) { + yyerror(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@include_macro_code)} + `); + } + yy.addDeclaration($$, { actionInclude: $include_macro_code }); + } ; declaration_list : declaration_list declaration - {$$ = $1; yy.addDeclaration($$, $2);} - | - {$$ = {};} + { $$ = $declaration_list; yy.addDeclaration($$, $declaration); } + | %epsilon + { $$ = {}; } + | declaration_list error + { + // TODO ... + yyerror(rmCommonWS` + declaration list error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @declaration_list)} + `); + } ; declaration : START id - {$$ = {start: $2};} + { $$ = {start: $id}; } | LEX_BLOCK - {$$ = {lex: $1};} + { $$ = {lex: {text: $LEX_BLOCK, position: @LEX_BLOCK}}; } | operator - {$$ = {operator: $1};} + { $$ = {operator: $operator}; } + | TOKEN full_token_definitions + { $$ = {token_list: $full_token_definitions}; } | ACTION - {$$ = {include: $1};} - | parse_param - {$$ = {parseParam: $1};} + { + var rv = checkActionBlock($ACTION, @ACTION); + if (rv) { + yyerror(rmCommonWS` + action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@ACTION)} + `); + } + $$ = {include: $ACTION}; + } + | include_macro_code + { + var rv = checkActionBlock($include_macro_code, @include_macro_code); + if (rv) { + yyerror(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@include_macro_code)} + `); + } + $$ = {include: $include_macro_code}; + } + | parse_params + { $$ = {parseParams: $parse_params}; } + | parser_type + { $$ = {parserType: $parser_type}; } | options - {$$ = {options: $1};} + { $$ = {options: $options}; } + | DEBUG + { $$ = {options: [['debug', true]]}; } + | EBNF + { + ebnf = true; + $$ = {options: [['ebnf', true]]}; + } + | UNKNOWN_DECL + { $$ = {unknownDecl: $UNKNOWN_DECL}; } + | IMPORT import_name import_path + { $$ = {imports: {name: $import_name, path: $import_path}}; } + | IMPORT import_name error + { + yyerror(rmCommonWS` + You did not specify a legal file path for the '%import' initialization code statement, which must have the format: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @IMPORT)} + `); + } + | IMPORT error import_path + { + yyerror(rmCommonWS` + Each '%import'-ed initialization code section must be qualified by a name, e.g. 'required' before the import path itself: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @IMPORT)} + `); + } + | INIT_CODE init_code_name action_ne + { + var rv = checkActionBlock($action_ne, @action_ne); + if (rv) { + yyerror(rmCommonWS` + %code "${$init_code_name}" initialization section action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@action_ne, @INIT_CODE)} + `); + } + $$ = { + initCode: { + qualifier: $init_code_name, + include: $action_ne + } + }; + } + | INIT_CODE error action_ne + { + yyerror(rmCommonWS` + Each '%code' initialization code section must be qualified by a name, e.g. 'required' before the action code itself: + + %code qualifier_name {action code} + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @INIT_CODE, @action_ne)} + `); + } + | START error + { + // TODO ... + yyerror(rmCommonWS` + %start token error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @START)} + `); + } + | TOKEN error + { + // TODO ... + yyerror(rmCommonWS` + %token definition list error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @TOKEN)} + `); + } + | IMPORT error + { + // TODO ... + yyerror(rmCommonWS` + %import name or source filename missing maybe? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @IMPORT)} + `); + } +// | INIT_CODE error + ; + +init_code_name + : ID + { $$ = $ID; } + | NAME + { $$ = $NAME; } + | STRING + { $$ = $STRING; } + ; + +import_name + : ID + { $$ = $ID; } + | STRING + { $$ = $STRING; } + ; + +import_path + : ID + { $$ = $ID; } + | STRING + { $$ = $STRING; } ; options - : OPTIONS token_list - {$$ = $2;} + : OPTIONS option_list OPTIONS_END + { $$ = $option_list; } + | OPTIONS error OPTIONS_END + { + // TODO ... + yyerror(rmCommonWS` + %options ill defined / error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @OPTIONS, @OPTIONS_END)} + `); + } + | OPTIONS error + { + // TODO ... + yyerror(rmCommonWS` + %options don't seem terminated? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @OPTIONS)} + `); + } + ; + +option_list + : option_list option + { $$ = $option_list; $$.push($option); } + | option + { $$ = [$option]; } + ; + +option + : NAME[option] + { $$ = [$option, true]; } + | NAME[option] '=' OPTION_STRING_VALUE[value] + { $$ = [$option, $value]; } + | NAME[option] '=' OPTION_VALUE[value] + { $$ = [$option, parseValue($value)]; } + | NAME[option] '=' NAME[value] + { $$ = [$option, parseValue($value)]; } + | NAME[option] '=' error + { + // TODO ... + yyerror(rmCommonWS` + named %option value error for ${$option}? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @option)} + `); + } + | NAME[option] error + { + // TODO ... + yyerror(rmCommonWS` + named %option value assignment error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @option)} + `); + } ; -parse_param +parse_params : PARSE_PARAM token_list - {$$ = $2;} + { $$ = $token_list; } + | PARSE_PARAM error + { + // TODO ... + yyerror(rmCommonWS` + %parse-params declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @PARSE_PARAM)} + `); + } + ; + +parser_type + : PARSER_TYPE symbol + { $$ = $symbol; } + | PARSER_TYPE error + { + // TODO ... + yyerror(rmCommonWS` + %parser-type declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @PARSER_TYPE)} + `); + } ; operator : associativity token_list - {$$ = [$1]; $$.push.apply($$, $2);} + { $$ = [$associativity]; $$.push.apply($$, $token_list); } + | associativity error + { + // TODO ... + yyerror(rmCommonWS` + operator token list error in an associativity statement? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @associativity)} + `); + } ; associativity : LEFT - {$$ = 'left';} + { $$ = 'left'; } | RIGHT - {$$ = 'right';} + { $$ = 'right'; } | NONASSOC - {$$ = 'nonassoc';} + { $$ = 'nonassoc'; } ; token_list : token_list symbol - {$$ = $1; $$.push($2);} + { $$ = $token_list; $$.push($symbol); } | symbol - {$$ = [$1];} + { $$ = [$symbol]; } + ; + +// As per http://www.gnu.org/software/bison/manual/html_node/Token-Decl.html +full_token_definitions + : optional_token_type id_list + { + var rv = []; + var lst = $id_list; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if ($optional_token_type) { + m.type = $optional_token_type; + } + rv.push(m); + } + $$ = rv; + } + | optional_token_type one_full_token + { + var m = $one_full_token; + if ($optional_token_type) { + m.type = $optional_token_type; + } + $$ = [m]; + } + ; + +one_full_token + : id token_value token_description + { + $$ = { + id: $id, + value: $token_value, + description: $token_description + }; + } + | id token_description + { + $$ = { + id: $id, + description: $token_description + }; + } + | id token_value + { + $$ = { + id: $id, + value: $token_value + }; + } + ; + +optional_token_type + : %epsilon + { $$ = false; } + | TOKEN_TYPE + { $$ = $TOKEN_TYPE; } + ; + +token_value + : INTEGER + { $$ = $INTEGER; } + ; + +token_description + : STRING + { $$ = $STRING; } + ; + +id_list + : id_list id + { $$ = $id_list; $$.push($id); } + | id + { $$ = [$id]; } ; +// token_id +// : TOKEN_TYPE id +// { $$ = $id; } +// | id +// { $$ = $id; } +// ; + grammar - : production_list - {$$ = $1;} + : optional_action_header_block production_list + { + $$ = $optional_action_header_block; + $$.grammar = $production_list; + } ; production_list : production_list production { - $$ = $1; - if ($2[0] in $$) - $$[$2[0]] = $$[$2[0]].concat($2[1]); - else - $$[$2[0]] = $2[1]; + $$ = $production_list; + if ($production[0] in $$) { + $$[$production[0]] = $$[$production[0]].concat($production[1]); + } else { + $$[$production[0]] = $production[1]; + } } | production - {$$ = {}; $$[$1[0]] = $1[1];} + { $$ = {}; $$[$production[0]] = $production[1]; } ; production - : id ':' handle_list ';' - {$$ = [$1, $3];} + : production_id handle_list ';' + {$$ = [$production_id, $handle_list];} + | production_id error ';' + { + // TODO ... + yyerror(rmCommonWS` + rule production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @production_id)} + `); + } + | production_id error + { + // TODO ... + yyerror(rmCommonWS` + rule production declaration error: did you terminate the rule production set with a semicolon? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @production_id)} + `); + } + ; + +production_id + : id optional_production_description ':' + { + $$ = $id; + + // TODO: carry rule description support into the parser generator... + } + | id optional_production_description error + { + // TODO ... + yyerror(rmCommonWS` + rule id should be followed by a colon, but that one seems missing? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @id)} + `); + } + ; + +optional_production_description + : STRING + { $$ = $STRING; } + | %epsilon ; handle_list : handle_list '|' handle_action - {$$ = $1; $$.push($3);} + { + $$ = $handle_list; + $$.push($handle_action); + } | handle_action - {$$ = [$1];} + { + $$ = [$handle_action]; + } + | handle_list '|' error + { + // TODO ... + yyerror(rmCommonWS` + rule alternative production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @handle_list)} + `); + } + | handle_list ':' error + { + // TODO ... + yyerror(rmCommonWS` + multiple alternative rule productions should be separated by a '|' pipe character, not a ':' colon! + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @handle_list)} + `); + } ; handle_action : handle prec action { - $$ = [($1.length ? $1.join(' ') : '')]; - if($3) $$.push($3); - if($2) $$.push($2); - if ($$.length === 1) $$ = $$[0]; + $$ = [($handle.length ? $handle.join(' ') : '')]; + if ($action) { + var rv = checkActionBlock($action, @action); + if (rv) { + yyerror(rmCommonWS` + production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@action, @handle)} + `); + } + $$.push($action); + } + if ($prec) { + if ($handle.length === 0) { + yyerror(rmCommonWS` + You cannot specify a precedence override for an epsilon (a.k.a. empty) rule! + + Erroneous area: + ${yylexer.prettyPrintRange(@handle)} + `); + } + $$.push($prec); + } + if ($$.length === 1) { + $$ = $$[0]; + } + } + | EPSILON action + // %epsilon may only be used to signal this is an empty rule alt; + // hence it can only occur by itself + // (with an optional action block, but no alias what-so-ever nor any precedence override). + { + $$ = ['']; + if ($action) { + var rv = checkActionBlock($action, @action); + if (rv) { + yyerror(rmCommonWS` + epsilon production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@action, @EPSILON)} + `); + } + $$.push($action); + } + if ($$.length === 1) { + $$ = $$[0]; + } + } + | EPSILON error + { + // TODO ... + yyerror(rmCommonWS` + %epsilon rule action declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @EPSILON)} + `); } ; handle - : handle expression_suffix - {$$ = $1; $$.push($2)} - | - {$$ = [];} + : handle suffixed_expression + { + $$ = $handle; + $$.push($suffixed_expression); + } + | %epsilon + { + $$ = []; + } ; handle_sublist : handle_sublist '|' handle - {$$ = $1; $$.push($3.join(' '));} + { + $$ = $handle_sublist; + $$.push($handle.join(' ')); + } | handle - {$$ = [$1.join(' ')];} + { + $$ = [$handle.join(' ')]; + } ; -expression_suffix +suffixed_expression : expression suffix ALIAS - {$$ = $expression + $suffix + "[" + $ALIAS + "]"; } + { + $$ = $expression + $suffix + "[" + $ALIAS + "]"; + } | expression suffix - {$$ = $expression + $suffix; } + { + $$ = $expression + $suffix; + } ; expression : ID - {$$ = $1; } + { + $$ = $ID; + } + | EOF_ID + { + $$ = '$end'; + } | STRING - {$$ = ebnf ? "'" + $1 + "'" : $1; } + { + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + $$ = dquote($STRING); + } | '(' handle_sublist ')' - {$$ = '(' + $handle_sublist.join(' | ') + ')'; } + { + $$ = '(' + $handle_sublist.join(' | ') + ')'; + } + | '(' handle_sublist error + { + yyerror(rmCommonWS` + Seems you did not correctly bracket a grammar rule sublist in '( ... )' brackets. + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @1)} + `); + } ; suffix - : {$$ = ''} + : %epsilon + { $$ = ''; } | '*' + { $$ = $1; } | '?' + { $$ = $1; } | '+' + { $$ = $1; } ; prec : PREC symbol - {$$ = {prec: $2};} - | - {$$ = null;} + { + $$ = { prec: $symbol }; + } + | PREC error + { + // TODO ... + yyerror(rmCommonWS` + %prec precedence override declaration error? + + Erroneous precedence declaration: + ${yylexer.prettyPrintRange(@error, @PREC)} + `); + } + | %epsilon + { + $$ = null; + } ; symbol : id - {$$ = $1;} + { $$ = $id; } | STRING - {$$ = yytext;} + { $$ = $STRING; } ; id : ID - {$$ = yytext;} + { $$ = $ID; } ; -action +action_ne : '{' action_body '}' - {$$ = $2;} + { $$ = $action_body; } + | '{' action_body error + { + yyerror(rmCommonWS` + Seems you did not correctly bracket a parser rule action block in curly braces: '{ ... }'. + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @1)} + `); + } | ACTION - {$$ = $1;} + { $$ = $ACTION; } + | include_macro_code + { $$ = $include_macro_code; } + ; + +action + : action_ne + { $$ = $action_ne; } | ARROW_ACTION - {$$ = '$$ =' + $1 + ';';} - | - {$$ = '';} + { $$ = '$$ = ' + $ARROW_ACTION; } + | %epsilon + { $$ = ''; } ; action_body - : - {$$ = '';} + : %epsilon + { $$ = ''; } | action_comments_body - {$$ = $1;} + { $$ = $action_comments_body; } | action_body '{' action_body '}' action_comments_body - {$$ = $1 + $2 + $3 + $4 + $5;} + { $$ = $1 + $2 + $3 + $4 + $5; } | action_body '{' action_body '}' - {$$ = $1 + $2 + $3 + $4;} + { $$ = $1 + $2 + $3 + $4; } + | action_body '{' action_body error + { + yyerror(rmCommonWS` + Seems you did not correctly match curly braces '{ ... }' in a parser rule action block. + + Erroneous area: + ${yylexer.prettyPrintRange(@error, @2)} + `); + } ; action_comments_body : ACTION_BODY - { $$ = yytext; } + { $$ = $ACTION_BODY; } | action_comments_body ACTION_BODY - { $$ = $1+$2; } + { $$ = $action_comments_body + $ACTION_BODY; } + ; + +extra_parser_module_code + : optional_module_code_chunk + { + $$ = $optional_module_code_chunk; + } + | optional_module_code_chunk include_macro_code extra_parser_module_code + { + $$ = $optional_module_code_chunk + $include_macro_code + $extra_parser_module_code; + } + ; + +include_macro_code + : INCLUDE PATH + { + var fileContent = fs.readFileSync($PATH, { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyerror(rmCommonWS` + included action code file "${$PATH}" does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(@PATH, @INCLUDE)} + `); + } + // And no, we don't support nested '%include': + $$ = '\n// Included by Jison: ' + $PATH + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + $PATH + '\n\n'; + } + | INCLUDE error + { + yyerror(rmCommonWS` + %include MUST be followed by a valid file path. + + Erroneous path: + ` + yylexer.prettyPrintRange(@error, @INCLUDE)); + } + ; + +module_code_chunk + : CODE + { $$ = $CODE; } + | module_code_chunk CODE + { $$ = $module_code_chunk + $CODE; } + | error + { + // TODO ... + yyerror(rmCommonWS` + module code declaration error? + + Erroneous area: + ` + yylexer.prettyPrintRange(@error)); + } + ; + +optional_module_code_chunk + : module_code_chunk + { $$ = $module_code_chunk; } + | %epsilon + { $$ = ''; } ; %% + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + + // transform ebnf to bnf if necessary -function extend (json, grammar) { - json.bnf = ebnf ? transform(grammar) : grammar; +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } + else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } return json; } +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; + diff --git a/dist/ebnf-parser-cjs-es5.js b/dist/ebnf-parser-cjs-es5.js new file mode 100644 index 0000000..85cf3eb --- /dev/null +++ b/dist/ebnf-parser-cjs-es5.js @@ -0,0 +1,8834 @@ +'use strict'; + +var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; + +var _templateObject = _taggedTemplateLiteral(['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']), + _templateObject2 = _taggedTemplateLiteral(['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']), + _templateObject3 = _taggedTemplateLiteral(['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject4 = _taggedTemplateLiteral(['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject5 = _taggedTemplateLiteral(['\n declaration list error?\n \n Erroneous area:\n ', '\n '], ['\n declaration list error?\n \n Erroneous area:\n ', '\n ']), + _templateObject6 = _taggedTemplateLiteral(['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject7 = _taggedTemplateLiteral(['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']), + _templateObject8 = _taggedTemplateLiteral(['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']), + _templateObject9 = _taggedTemplateLiteral(['\n %code "', '" initialization section action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n %code "', '" initialization section action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject10 = _taggedTemplateLiteral(['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n \n %code qualifier_name {action code}\n \n Erroneous area:\n ', '\n '], ['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n \n %code qualifier_name {action code}\n \n Erroneous area:\n ', '\n ']), + _templateObject11 = _taggedTemplateLiteral(['\n %start token error?\n \n Erroneous area:\n ', '\n '], ['\n %start token error?\n \n Erroneous area:\n ', '\n ']), + _templateObject12 = _taggedTemplateLiteral(['\n %token definition list error?\n \n Erroneous area:\n ', '\n '], ['\n %token definition list error?\n \n Erroneous area:\n ', '\n ']), + _templateObject13 = _taggedTemplateLiteral(['\n %import name or source filename missing maybe?\n \n Erroneous area:\n ', '\n '], ['\n %import name or source filename missing maybe?\n \n Erroneous area:\n ', '\n ']), + _templateObject14 = _taggedTemplateLiteral(['\n %options ill defined / error?\n \n Erroneous area:\n ', '\n '], ['\n %options ill defined / error?\n \n Erroneous area:\n ', '\n ']), + _templateObject15 = _taggedTemplateLiteral(['\n %options don\'t seem terminated?\n \n Erroneous area:\n ', '\n '], ['\n %options don\'t seem terminated?\n \n Erroneous area:\n ', '\n ']), + _templateObject16 = _taggedTemplateLiteral(['\n named %option value error for ', '?\n \n Erroneous area:\n ', '\n '], ['\n named %option value error for ', '?\n \n Erroneous area:\n ', '\n ']), + _templateObject17 = _taggedTemplateLiteral(['\n named %option value assignment error?\n \n Erroneous area:\n ', '\n '], ['\n named %option value assignment error?\n \n Erroneous area:\n ', '\n ']), + _templateObject18 = _taggedTemplateLiteral(['\n %parse-params declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %parse-params declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject19 = _taggedTemplateLiteral(['\n %parser-type declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %parser-type declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject20 = _taggedTemplateLiteral(['\n operator token list error in an associativity statement?\n \n Erroneous area:\n ', '\n '], ['\n operator token list error in an associativity statement?\n \n Erroneous area:\n ', '\n ']), + _templateObject21 = _taggedTemplateLiteral(['\n rule production declaration error?\n \n Erroneous area:\n ', '\n '], ['\n rule production declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject22 = _taggedTemplateLiteral(['\n rule production declaration error: did you terminate the rule production set with a semicolon?\n \n Erroneous area:\n ', '\n '], ['\n rule production declaration error: did you terminate the rule production set with a semicolon?\n \n Erroneous area:\n ', '\n ']), + _templateObject23 = _taggedTemplateLiteral(['\n rule id should be followed by a colon, but that one seems missing?\n \n Erroneous area:\n ', '\n '], ['\n rule id should be followed by a colon, but that one seems missing?\n \n Erroneous area:\n ', '\n ']), + _templateObject24 = _taggedTemplateLiteral(['\n rule alternative production declaration error?\n \n Erroneous area:\n ', '\n '], ['\n rule alternative production declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject25 = _taggedTemplateLiteral(['\n multiple alternative rule productions should be separated by a \'|\' pipe character, not a \':\' colon!\n \n Erroneous area:\n ', '\n '], ['\n multiple alternative rule productions should be separated by a \'|\' pipe character, not a \':\' colon!\n \n Erroneous area:\n ', '\n ']), + _templateObject26 = _taggedTemplateLiteral(['\n production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject27 = _taggedTemplateLiteral(['\n You cannot specify a precedence override for an epsilon (a.k.a. empty) rule!\n \n Erroneous area:\n ', '\n '], ['\n You cannot specify a precedence override for an epsilon (a.k.a. empty) rule!\n \n Erroneous area:\n ', '\n ']), + _templateObject28 = _taggedTemplateLiteral(['\n epsilon production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n epsilon production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject29 = _taggedTemplateLiteral(['\n %epsilon rule action declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %epsilon rule action declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject30 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a grammar rule sublist in \'( ... )\' brackets.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly bracket a grammar rule sublist in \'( ... )\' brackets.\n \n Erroneous area:\n ', '\n ']), + _templateObject31 = _taggedTemplateLiteral(['\n %prec precedence override declaration error?\n \n Erroneous precedence declaration:\n ', '\n '], ['\n %prec precedence override declaration error?\n \n Erroneous precedence declaration:\n ', '\n ']), + _templateObject32 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a parser rule action block in curly braces: \'{ ... }\'.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly bracket a parser rule action block in curly braces: \'{ ... }\'.\n \n Erroneous area:\n ', '\n ']), + _templateObject33 = _taggedTemplateLiteral(['\n Seems you did not correctly match curly braces \'{ ... }\' in a parser rule action block.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly match curly braces \'{ ... }\' in a parser rule action block.\n \n Erroneous area:\n ', '\n ']), + _templateObject34 = _taggedTemplateLiteral(['\n included action code file "', '" does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n included action code file "', '" does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject35 = _taggedTemplateLiteral(['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n '], ['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n ']), + _templateObject36 = _taggedTemplateLiteral(['\n module code declaration error?\n \n Erroneous area:\n '], ['\n module code declaration error?\n \n Erroneous area:\n ']), + _templateObject37 = _taggedTemplateLiteral(['\n EBNF: ignoring unsupported parser option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n '], ['\n EBNF: ignoring unsupported parser option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n ']), + _templateObject38 = _taggedTemplateLiteral(['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n '], ['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n ']), + _templateObject39 = _taggedTemplateLiteral(['\n unterminated string constant in %options entry.\n\n Erroneous area:\n '], ['\n unterminated string constant in %options entry.\n\n Erroneous area:\n ']), + _templateObject40 = _taggedTemplateLiteral(['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n '], ['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n ']), + _templateObject41 = _taggedTemplateLiteral(['\n unsupported parser input: ', '\n while lexing in ', ' state.\n \n Erroneous area:\n '], ['\n unsupported parser input: ', '\n while lexing in ', ' state.\n \n Erroneous area:\n ']); + +function _taggedTemplateLiteral(strings, raw) { return Object.freeze(Object.defineProperties(strings, { raw: { value: Object.freeze(raw) } })); } + +function _interopDefault(ex) { + return ex && (typeof ex === 'undefined' ? 'undefined' : _typeof(ex)) === 'object' && 'default' in ex ? ex['default'] : ex; +} + +var XRegExp = _interopDefault(require('@gerhobbelt/xregexp')); +var helpers = _interopDefault(require('jison-helpers-lib')); +var fs = _interopDefault(require('fs')); +var jisonlex = _interopDefault(require('@gerhobbelt/lex-parser')); + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError$1(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError$1.prototype, Error.prototype); +} else { + JisonParserError$1.prototype = Object.create(Error.prototype); +} +JisonParserError$1.prototype.constructor = JisonParserError$1; +JisonParserError$1.prototype.name = 'JisonParserError'; + +// helper: reconstruct the productions[] table +function bp$1(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([p[i], r[i]]); + } + return rv; +} + +// helper: reconstruct the 'goto' table +function bt$1(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [m.shift(), g.shift()]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [3]; + } + } + rv.push(q); + } + return rv; +} + +// helper: runlength encoding with increment step: code, length: step (default step = 0) +// `this` references an array +function s$1(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } +} + +// helper: duplicate sequence from *relative* offset and length. +// `this` references an array +function c$1(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } +} + +// helper: unpack an array using helpers and data, all passed in an array argument 'a'. +function u$1(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; +} + +var parser$1 = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + + trace: function no_op_trace() {}, + JisonParserError: JisonParserError$1, + yy: {}, + options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 + }, + symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 + }, + terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" + }, + TERROR: 2, + EOF: 1, + + // internals: defined here so the object *structure* doesn't get modified by parse() et al, + // thus helping JIT compilers like Chrome V8. + originalQuoteName: null, + originalParseError: null, + cleanupAfterParse: null, + constructParseErrorInfo: null, + yyMergeLocationInfo: null, + + __reentrant_call_depth: 0, // INTERNAL USE ONLY + __error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + __error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + + // APIs which will be set up depending on user action code analysis: + //yyRecovering: 0, + //yyErrOk: 0, + //yyClearIn: 0, + + // Helper APIs + // ----------- + + // Helper function which can be overridden by user code later on: put suitable quotes around + // literal IDs in a description string. + quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; + }, + + // Return the name of the given symbol (terminal or non-terminal) as a string, when available. + // + // Return NULL when the symbol is unknown to the parser. + getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; + }, + + // Return a more-or-less human-readable description of the given symbol, when available, + // or the symbol itself, serving as its own 'description' for lack of something better to serve up. + // + // Return NULL when the symbol is unknown to the parser. + describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; + }, + + // Produce a (more or less) human-readable list of expected tokens at the point of failure. + // + // The produced list may contain token or token set descriptions instead of the tokens + // themselves to help turning this output into something that easier to read by humans + // unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, + // expected terminals and nonterminals is produced. + // + // The returned list (array) will not contain any duplicate entries. + collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; + }, + productions_: bp$1({ + pop: u$1([11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, s$1, [17, 4]]), + rule: u$1([2, 1, 3, 0, 1, 1, 2, 3, c$1, [8, 6], 1]) + }), + performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + switch (yystate) { + case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + + case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + + case 2: + /*! Production:: handle_list : handle */ + case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + + case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + + case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + + case 5: + /*! Production:: handle : rule */ + case 13: + /*! Production:: suffix : "*" */ + case 14: + /*! Production:: suffix : "?" */ + case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + + case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + + case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + + case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + + case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + + case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + + case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + + } + }, + table: bt$1({ + len: u$1([8, 1, 1, 7, 0, 10, 0, 9, 0, 0, 6, s$1, [0, 3], 2, s$1, [0, 3], 8, 0]), + symbol: u$1([1, 4, 10, 11, s$1, [13, 4, 1], s$1, [1, 3], 3, 4, 5, 10, c$1, [9, 3], s$1, [3, 8, 1], 17, c$1, [16, 4], s$1, [12, 5, 1], c$1, [19, 4], 9, 10, 3, 5, c$1, [17, 4], c$1, [16, 4]]), + type: u$1([s$1, [2, 3], s$1, [0, 5], 1, s$1, [2, 6], 0, 0, s$1, [2, 9], c$1, [10, 5], s$1, [0, 5], s$1, [2, 12], s$1, [0, 4]]), + state: u$1([s$1, [1, 5, 1], 9, 5, 10, 14, 15, c$1, [8, 3], 19, c$1, [4, 3]]), + mode: u$1([2, s$1, [1, 3], 2, 2, 1, 2, c$1, [5, 3], c$1, [7, 3], c$1, [12, 4], c$1, [13, 9], c$1, [15, 3], c$1, [5, 4]]), + goto: u$1([4, 7, 6, 8, 5, 5, 7, 5, 6, s$1, [12, 4], 11, 12, 13, 12, 12, 4, 7, 4, 6, s$1, [9, 4], 16, 9, 18, 17, c$1, [12, 4]]) + }), + defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 + }, + parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } + }, + parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var symbol = 0; + + var EOF = this.EOF; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + } + + return resultValue; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = this.describeSymbol(symbol) || symbol; + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; + } +}; +parser$1.originalParseError = parser$1.parseError; +parser$1.originalQuoteName = parser$1.quoteName; + +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + +var lexer$1 = function () { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // backtracking: .................... false + // location.ranges: ................. true + // location line+column tracking: ... true + // + // + // Forwarded Parser Analysis flags: + // + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses lexer values: ............... true / true + // location tracking: ............... false + // location assignment: ............. false + // + // + // Lexer Analysis flags: + // + // uses yyleng: ..................... ??? + // uses yylineno: ................... ??? + // uses yytext: ..................... ??? + // uses yylloc: ..................... ??? + // uses ParseError API: ............. ??? + // uses yyerror: .................... ??? + // uses location tracking & editing: ??? + // uses more() API: ................. ??? + // uses unput() API: ................ ??? + // uses reject() API: ............... ??? + // uses less() API: ................. ??? + // uses display APIs pastInput(), upcomingInput(), showPosition(): + // ............................. ??? + // uses describeYYLLOC() API: ....... ??? + // + // --------- END OF REPORT ----------- + + EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': ' + str, this.options.lexerErrorsAreRecoverable); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = this.yylloc ? this.yylloc.last_column : 0; + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', false); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) maxSize = past.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) maxSize = next.length + this._input.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var CONTEXT = 3; + var CONTEXT_TAIL = 1; + var MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, context_loc ? context_loc.first_line : loc.first_line - CONTEXT); + + var l1 = Math.max(1, context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max(2, (lno === loc.last_line ? loc.last_column : line.length) - loc.first_column + 1); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call(this, this.yy, indexed_rule, this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', false); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': Unrecognized text.', this.options.lexerErrorsAreRecoverable); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: *//^(?:\s+)/, + /* 1: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: *//^(?:\$end\b)/, + /* 3: */new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: *//^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: *//^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: *//^(?:\.)/, + /* 7: *//^(?:\()/, + /* 8: *//^(?:\))/, + /* 9: *//^(?:\*)/, + /* 10: *//^(?:\?)/, + /* 11: *//^(?:\|)/, + /* 12: *//^(?:\+)/, + /* 13: *//^(?:$)/], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; +}(); +parser$1.lexer = lexer$1; + +function Parser$1() { + this.yy = {}; +} +Parser$1.prototype = parser$1; +parser$1.Parser = Parser$1; + +function yyparse$1() { + return parser$1.parse.apply(parser$1, arguments); +} + +var parser$2 = { + parser: parser$1, + Parser: Parser$1, + parse: yyparse$1 + +}; + +//import assert from 'assert'; + +var devDebug = 0; + +// WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) +// +// This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! +var ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + +// produce a unique production symbol. +// Use this to produce rule productions from transformed EBNF which are +// guaranteed not to collide with previously generated / already existing +// rules (~ symbols). +function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; +} + +function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; +} + +function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; + } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } + + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [[list.fragment, '$$ = [' + generatePushAction(list, 1) + '];'], [name + ' ' + list.fragment, '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;']]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [['', '$$ = [];'], [name + ' ' + list.fragment, '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;']]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [['', '$$ = undefined;'], [list.fragment, '$$ = ' + generatePushAction(list, 1) + ';']]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { + if (!name) { + name = generateUniqueSymbol(opts.production, '_group', opts); + } + if (devDebug > 2) console.log('group EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [list.fragment, '$$ = ' + generatePushAction(list, 1) + ';']; + }); + } + } + + return has_transformed; +} + +function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); + } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; + } + return tot; + }, []); + + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index + }; +} + +function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar + }; +} + +function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; + + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser$2.parse(handle); + + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp('\\[' + ID_REGEX_BASE + '\\]'); + var term_re = new XRegExp('^' + ID_REGEX_BASE + '$'); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp('(?:[$@]|##)' + ID_REGEX_BASE, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + 'which is not available in production "' + handle + '"; ' + 'it probably got removed by the EBNF rule rewrite process.\n' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */var n_suffixes = ['st', 'nd', 'rd', 'th']; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + 'which is not available in production "' + handle + '"; ' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); + } + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); +} + +var ref_list; +var ref_names; + +// create a deep copy of the input, so we will keep the input constant. +function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || (typeof from === 'undefined' ? 'undefined' : _typeof(from)) !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; +} + +function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; +} + +function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; +} + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + +// helper: reconstruct the productions[] table +function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([p[i], r[i]]); + } + return rv; +} + +// helper: reconstruct the defaultActions[] table +function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; +} + +// helper: reconstruct the 'goto' table +function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [m.shift(), g.shift()]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [3]; + } + } + rv.push(q); + } + return rv; +} + +// helper: runlength encoding with increment step: code, length: step (default step = 0) +// `this` references an array +function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } +} + +// helper: duplicate sequence from *relative* offset and length. +// `this` references an array +function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } +} + +// helper: unpack an array using helpers and data, all passed in an array argument 'a'. +function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; +} + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + + trace: function no_op_trace() {}, + JisonParserError: JisonParserError, + yy: {}, + options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 + }, + symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 + }, + terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" + }, + TERROR: 2, + EOF: 1, + + // internals: defined here so the object *structure* doesn't get modified by parse() et al, + // thus helping JIT compilers like Chrome V8. + originalQuoteName: null, + originalParseError: null, + cleanupAfterParse: null, + constructParseErrorInfo: null, + yyMergeLocationInfo: null, + + __reentrant_call_depth: 0, // INTERNAL USE ONLY + __error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + __error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + + // APIs which will be set up depending on user action code analysis: + //yyRecovering: 0, + //yyErrOk: 0, + //yyClearIn: 0, + + // Helper APIs + // ----------- + + // Helper function which can be overridden by user code later on: put suitable quotes around + // literal IDs in a description string. + quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; + }, + + // Return the name of the given symbol (terminal or non-terminal) as a string, when available. + // + // Return NULL when the symbol is unknown to the parser. + getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; + }, + + // Return a more-or-less human-readable description of the given symbol, when available, + // or the symbol itself, serving as its own 'description' for lack of something better to serve up. + // + // Return NULL when the symbol is unknown to the parser. + describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; + }, + + // Produce a (more or less) human-readable list of expected tokens at the point of failure. + // + // The produced list may contain token or token set descriptions instead of the tokens + // themselves to help turning this output into something that easier to read by humans + // unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, + // expected terminals and nonterminals is produced. + // + // The returned list (array) will not contain any duplicate entries. + collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; + }, + productions_: bp({ + pop: u([s, [47, 3], 48, 48, s, [49, 3], s, [50, 3], s, [51, 20], s, [52, 3], 53, 53, 54, 54, s, [55, 3], 56, 56, s, [57, 6], 58, 58, 59, 59, 60, 60, s, [61, 3], 62, 62, 63, 63, s, [64, 3], 65, s, [65, 4, 1], 68, 69, 70, 70, s, [71, 3], 72, 72, 73, 73, s, [74, 4], s, [75, 3], 76, 76, 77, 77, 78, 78, s, [79, 5], s, [80, 4], s, [81, 3], 82, 82, 83, s, [84, 4], s, [85, 3], s, [86, 5], 87, 87, 88, 88, 89, 89, s, [90, 3], 91, 91]), + rule: u([5, 5, 3, 0, 2, 0, s, [2, 3], c, [4, 3], 1, 1, c, [3, 3], s, [1, 6], s, [3, 5], s, [2, 3], c, [15, 9], c, [11, 4], c, [20, 7], s, [2, 4], s, [1, 3], 2, 1, 2, 2, c, [15, 3], 0, c, [11, 7], c, [36, 4], 3, 3, 1, 0, 3, c, [39, 4], c, [80, 4], c, [9, 3], c, [39, 4], 3, 3, c, [34, 5], c, [40, 5], c, [32, 3], s, [1, 3], 0, 0, 1, 5, 4, 4, c, [53, 3], c, [85, 4], c, [35, 3], 0]) + }), + performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + switch (yystate) { + case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + + case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + + case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject2, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 4: + /*! Production:: optional_end_block : %epsilon */ + case 100: + /*! Production:: suffix : %epsilon */ + case 116: + /*! Production:: action : %epsilon */ + case 117: + /*! Production:: action_body : %epsilon */ + case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + + case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject3, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = yyvstack[yysp]; + break; + + case 6: + /*! Production:: optional_action_header_block : %epsilon */ + case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + + case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ + case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject4, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + + case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1];yy.addDeclaration(this.$, yyvstack[yysp]); + break; + + case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject5, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { start: yyvstack[yysp] }; + break; + + case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { lex: { text: yyvstack[yysp], position: yylstack[yysp] } }; + break; + + case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { operator: yyvstack[yysp] }; + break; + + case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { token_list: yyvstack[yysp] }; + break; + + case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject6, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = { include: yyvstack[yysp] }; + break; + + case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject4, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = { include: yyvstack[yysp] }; + break; + + case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { parseParams: yyvstack[yysp] }; + break; + + case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { parserType: yyvstack[yysp] }; + break; + + case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { options: yyvstack[yysp] }; + break; + + case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { options: [['debug', true]] }; + break; + + case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = { options: [['ebnf', true]] }; + break; + + case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { unknownDecl: yyvstack[yysp] }; + break; + + case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { imports: { name: yyvstack[yysp - 1], path: yyvstack[yysp] } }; + break; + + case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject7, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject8, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject9, $init_code_name, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + + case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject10, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp]))); + break; + + case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject11, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject12, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject13, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 32: + /*! Production:: init_code_name : ID */ + case 33: + /*! Production:: init_code_name : NAME */ + case 34: + /*! Production:: init_code_name : STRING */ + case 35: + /*! Production:: import_name : ID */ + case 36: + /*! Production:: import_name : STRING */ + case 37: + /*! Production:: import_path : ID */ + case 38: + /*! Production:: import_path : STRING */ + case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ + case 68: + /*! Production:: token_value : INTEGER */ + case 69: + /*! Production:: token_description : STRING */ + case 80: + /*! Production:: optional_production_description : STRING */ + case 95: + /*! Production:: expression : ID */ + case 101: + /*! Production:: suffix : "*" */ + case 102: + /*! Production:: suffix : "?" */ + case 103: + /*! Production:: suffix : "+" */ + case 107: + /*! Production:: symbol : id */ + case 108: + /*! Production:: symbol : STRING */ + case 109: + /*! Production:: id : ID */ + case 112: + /*! Production:: action_ne : ACTION */ + case 113: + /*! Production:: action_ne : include_macro_code */ + case 114: + /*! Production:: action : action_ne */ + case 118: + /*! Production:: action_body : action_comments_body */ + case 122: + /*! Production:: action_comments_body : ACTION_BODY */ + case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ + case 128: + /*! Production:: module_code_chunk : CODE */ + case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + + case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ + case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + + case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject14, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp]))); + break; + + case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject15, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 42: + /*! Production:: option_list : option_list option */ + case 59: + /*! Production:: token_list : token_list symbol */ + case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1];this.$.push(yyvstack[yysp]); + break; + + case 43: + /*! Production:: option_list : option */ + case 60: + /*! Production:: token_list : symbol */ + case 71: + /*! Production:: id_list : id */ + case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + + case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + + case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + + case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ + case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + + case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject16, yyvstack[yysp - 2], yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject17, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ + case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + + case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject18, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject19, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]];this.$.push.apply(this.$, yyvstack[yysp]); + break; + + case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject20, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + + case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + + case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + + case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = { id: id }; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + + case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + + case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + + case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + + case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + + case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + + case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + + case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + + case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {};this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + + case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + + case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject21, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject22, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + + case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject23, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + + case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + + case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject24, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject25, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject26, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS(_templateObject27, yylexer.prettyPrintRange(yylstack[yysp - 2]))); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + + case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject28, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + + case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject29, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + + case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + + case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + + case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + + case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + + case 94: + /*! Production:: suffixed_expression : expression suffix */ + case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ + case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + + case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + + case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + + case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject30, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + + case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject31, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + + case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject32, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + + case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject33, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject34, $PATH, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + + case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject35) + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + + case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject36) + yylexer.prettyPrintRange(yylstack[yysp])); + break; + + case 164: + // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + + } + }, + table: bt({ + len: u([20, 1, 25, 5, 19, 18, 3, 18, 18, 5, s, [18, 8], 4, 5, 6, 2, s, [6, 4, -1], 3, 3, 4, 8, 1, 18, 18, 26, c, [18, 3], 1, 4, 21, 3, 3, 5, 5, s, [3, 3], 22, 18, 20, 25, 25, 24, 24, 22, s, [18, 3], 3, 19, 2, 4, 1, 1, 7, 7, c, [40, 3], 17, 4, 20, 18, 23, s, [18, 6], 6, 21, 21, 18, 20, 18, 2, 18, 4, 2, s, [1, 3], s, [3, 4], 4, 3, 5, 3, 15, 11, 2, 2, 19, 20, 18, c, [104, 3], 4, 4, s, [2, 4], 7, 3, 4, 16, 1, 4, 10, 14, c, [122, 3], 18, 18, 9, s, [3, 4], 14, 14, 18, 21, 21, 6, 4, c, [50, 5], 7, 7, s, [15, 4], 3, 9, 3, 14, 18, 18, 8, 5, 3, 9, 4]), + symbol: u([2, s, [14, 10, 1], 27, s, [31, 5, 1], 44, 47, 50, 1, c, [21, 18], 51, 55, s, [58, 4, 1], 89, 15, 24, 44, 49, 69, c, [31, 19], c, [18, 19], 24, 83, c, [39, 38], 36, 63, 65, c, [41, 37], c, [18, 108], 24, 26, 53, 2, 24, 25, 26, 52, c, [9, 3], 62, 82, 83, 2, 45, c, [8, 7], 24, 26, c, [5, 3], 25, 56, 57, c, [9, 3], c, [3, 6], c, [266, 3], 48, c, [275, 3], 70, 71, 72, 83, 89, c, [278, 38], 4, 5, 6, 12, s, [14, 11, 1], 26, c, [24, 6], 37, 42, c, [152, 37], 24, 64, 68, 83, 24, c, [119, 3], 54, c, [27, 11], c, [67, 8], 44, 54, c, [147, 6], 12, 15, 44, 84, 89, c, [5, 8], c, [3, 6], c, [46, 20], c, [201, 3], c, [113, 28], c, [40, 9], c, [177, 23], c, [176, 3], c, [25, 24], 1, c, [26, 4], c, [25, 11], c, [73, 7], 46, c, [24, 24], c, [158, 51], c, [18, 25], 25, 28, 57, c, [21, 12], 28, c, [22, 8], 2, 3, 25, 28, s, [1, 3], 2, 44, 46, 88, 90, 91, c, [425, 3], 24, c, [433, 3], c, [440, 3], c, [3, 3], c, [13, 4], c, [153, 4], 7, 12, 15, 24, 26, 38, 40, 41, 42, 44, 74, 75, 76, 2, 5, 26, 73, c, [151, 12], c, [94, 7], c, [307, 38], 37, 44, 66, 67, c, [685, 109], 12, 13, 43, 86, 87, c, [349, 14], c, [445, 11], c, [84, 46], c, [504, 10], c, [348, 19], c, [58, 19], 25, 29, 30, c, [346, 5], 1, 44, 89, 1, c, [483, 3], c, [3, 6], c, [339, 3], c, [121, 3], c, [496, 3], c, [8, 5], c, [349, 8], c, [348, 4], 78, 79, 81, c, [568, 5], 15, 42, 44, 84, 85, 89, 2, 5, 2, 5, c, [359, 19], c, [19, 11], c, [142, 8], c, [337, 30], c, [180, 26], c, [284, 3], c, [287, 4], c, [4, 4], 25, 28, 25, 28, c, [4, 4], c, [517, 8], c, [168, 6], c, [507, 14], c, [506, 3], c, [189, 7], c, [162, 8], s, [4, 5, 1], c, [190, 8], c, [1024, 6], s, [4, 9, 1], c, [22, 3], s, [39, 4, 1], 44, 80, c, [19, 18], c, [18, 37], c, [16, 3], c, [88, 3], 76, 77, c, [292, 6], c, [3, 6], c, [144, 14], c, [14, 15], c, [480, 39], c, [21, 21], c, [549, 6], c, [6, 3], 1, c, [111, 12], c, [234, 7], c, [7, 7], c, [238, 10], c, [179, 11], c, [15, 40], 6, 8, c, [209, 7], 78, 79, c, [374, 4], c, [313, 14], c, [271, 43], c, [164, 4], c, [169, 4], c, [78, 12], 43]), + type: u([s, [2, 18], 0, 0, 1, c, [21, 20], s, [0, 5], c, [10, 5], s, [2, 39], c, [40, 41], c, [41, 40], s, [2, 108], c, [148, 5], c, [239, 6], c, [159, 6], c, [253, 10], c, [176, 14], c, [36, 7], c, [197, 102], c, [103, 7], c, [108, 21], c, [21, 10], c, [423, 36], c, [373, 149], c, [158, 67], c, [57, 32], c, [322, 8], c, [98, 26], c, [489, 7], c, [721, 173], c, [462, 131], c, [130, 37], c, [375, 11], c, [818, 45], c, [223, 79], c, [124, 24], c, [986, 15], c, [38, 19], c, [57, 20], c, [157, 62], c, [443, 106], c, [106, 103], c, [103, 62], c, [1248, 16], c, [78, 6]]), + state: u([1, 2, 5, 14, 12, 13, 8, 20, 11, 29, 28, 31, 34, 36, 38, 42, 47, 49, 50, 54, 49, 50, 56, 50, 58, 60, 62, 65, 68, 69, 70, 67, 72, 71, 73, 74, 78, 79, 82, 83, 82, 84, 50, 84, 50, 86, 92, 94, 93, 97, 69, 70, 98, 100, 101, 103, 105, 106, 107, 110, 111, 117, 124, 126, 123, 133, 131, 82, 137, 142, 94, 93, 143, 101, 133, 146, 82, 147, 50, 149, 154, 153, 155, 111, 124, 126, 162, 163, 124, 126]), + mode: u([s, [2, 18], s, [1, 18], c, [21, 4], s, [2, 36], c, [42, 5], c, [38, 34], c, [77, 38], s, [2, 108], s, [1, 20], c, [30, 15], c, [134, 100], c, [106, 4], c, [335, 26], c, [151, 16], c, [376, 48], c, [347, 120], c, [63, 75], c, [13, 9], c, [23, 4], c, [4, 3], c, [587, 6], c, [427, 12], c, [9, 15], c, [335, 13], c, [389, 39], c, [45, 43], c, [509, 77], c, [762, 121], c, [129, 9], c, [756, 14], c, [334, 14], c, [41, 6], c, [367, 5], c, [784, 37], c, [208, 63], c, [1142, 20], c, [1081, 10], c, [487, 14], c, [22, 9], c, [151, 17], c, [221, 10], c, [803, 156], c, [318, 61], c, [216, 50], c, [457, 7], c, [455, 38], c, [123, 34], c, [1206, 8], 1]), + goto: u([s, [10, 18], 4, 3, 10, 6, 7, 9, s, [15, 5, 1], 24, 22, 23, 25, 26, 27, 21, s, [6, 3], 30, s, [11, 18], s, [9, 18], 32, 33, s, [13, 18], s, [14, 18], 35, 66, 37, s, [16, 18], s, [17, 18], s, [18, 18], s, [19, 18], s, [20, 18], s, [21, 18], s, [22, 18], s, [23, 18], 39, 40, 41, s, [43, 4, 1], 48, 33, 51, 53, 52, 55, 33, 51, 57, 33, 51, 59, 61, s, [56, 3], s, [57, 3], s, [58, 3], 4, 63, 64, 66, 33, 21, 3, s, [12, 18], s, [29, 18], s, [109, 26], s, [15, 18], s, [30, 18], 33, 67, 75, 76, 77, s, [31, 11], c, [13, 9], s, [35, 3], s, [36, 3], 80, 81, 21, c, [3, 3], s, [32, 3], s, [33, 3], s, [34, 3], s, [54, 11], 33, 51, s, [54, 7], s, [55, 18], s, [60, 20], s, [107, 25], s, [108, 25], s, [126, 24], s, [127, 24], s, [50, 11], 33, 51, s, [50, 7], s, [51, 18], s, [52, 18], s, [53, 18], 61, 85, s, [41, 12], 87, s, [41, 6], 43, 43, 89, 88, 44, 44, 90, 91, 132, 96, 132, 95, s, [72, 3], 33, s, [7, 3], s, [8, 3], s, [74, 4], 99, s, [90, 8], 102, s, [90, 4], 81, 81, 104, s, [61, 11], 33, s, [61, 7], s, [62, 18], s, [71, 12], 109, s, [71, 6], 108, 71, s, [24, 18], s, [25, 18], s, [37, 18], s, [38, 18], s, [26, 18], s, [27, 18], s, [117, 3], s, [112, 22], s, [113, 21], s, [28, 18], s, [59, 20], s, [39, 18], 42, 42, s, [40, 18], 116, 115, 113, 114, 49, 49, 1, 2, 5, 124, 21, 131, 131, 118, s, [128, 3], s, [130, 3], s, [73, 4], 119, 121, 120, 77, 77, 122, 77, 77, s, [83, 3], s, [106, 3], 130, 106, 106, 127, 129, 128, 125, 106, 106, 132, s, [116, 3], 80, 81, 134, 21, 136, 135, 80, 80, s, [70, 19], s, [65, 11], 109, s, [65, 7], s, [64, 18], s, [68, 19], s, [69, 18], 139, 140, 138, s, [118, 3], 141, s, [122, 4], 45, 45, 46, 46, 47, 47, 48, 48, c, [494, 4], s, [129, 3], s, [75, 4], 144, c, [487, 13], 145, s, [76, 4], c, [153, 7], s, [89, 14], 148, 33, 51, s, [100, 6], 150, 151, 152, s, [100, 9], s, [95, 18], s, [96, 18], s, [97, 18], s, [90, 7], s, [87, 3], s, [88, 3], s, [114, 3], s, [115, 3], s, [78, 14], s, [79, 14], s, [63, 18], s, [110, 21], s, [111, 21], c, [526, 4], s, [123, 4], 125, s, [82, 3], s, [84, 3], s, [85, 3], s, [86, 3], s, [104, 7], s, [105, 7], s, [94, 10], 156, s, [94, 4], s, [101, 15], s, [102, 15], s, [103, 15], 158, 159, 157, 92, 92, 130, 92, c, [465, 3], 161, 140, 160, s, [93, 14], s, [98, 18], s, [99, 18], s, [90, 7], s, [120, 3], 112, s, [121, 3], 91, 91, 130, 91, c, [74, 3], s, [119, 3], 141]) + }), + defaultActions: bda({ + idx: u([0, 3, 5, 7, 8, s, [10, 8, 1], 25, 26, 27, s, [30, 6, 1], 37, 40, 41, 44, 45, 46, s, [48, 6, 1], 55, 56, 57, 60, 66, 67, 68, 72, s, [74, 6, 1], s, [81, 7, 1], s, [89, 4, 1], 95, 96, 97, 100, 104, 105, 107, 108, 109, s, [112, 5, 1], 118, 119, 122, 124, s, [127, 13, 1], s, [141, 8, 1], 150, 151, 152, s, [156, 4, 1], 161]), + goto: u([10, 6, 9, 13, 14, s, [16, 8, 1], 56, 57, 58, 3, 12, 29, 109, 15, 30, 67, 35, 36, 32, 33, 34, 55, 60, 107, 108, 126, 127, 51, 52, 53, 43, 7, 8, 74, 62, 24, 25, 37, 38, 26, 27, 112, 113, 28, 59, 39, 42, 40, 49, 1, 2, 5, 128, 130, 73, 83, 80, 70, 64, 68, 69, 122, s, [45, 4, 1], 129, 75, 76, 89, 95, 96, 97, 90, 87, 88, 114, 115, 78, 79, 63, 110, 111, 123, 125, 82, 84, 85, 86, 104, 105, 101, 102, 103, 93, 98, 99, 90, 121]) + }), + parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } + }, + parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = this.options.errorRecoveryTokenDiscardCount | 0 || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if ((typeof src === 'undefined' ? 'undefined' : _typeof(src)) === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + var error_rule_depth = this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1; + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, error_rule_depth >= 0); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = this.describeSymbol(symbol) || symbol; + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, error_rule_depth >= 0); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = symbol === TERROR ? 0 : symbol; // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + var EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = table[newState] && table[newState][symbol] || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { + // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + // read action for current state and first input + t = table[newState] && table[newState][symbol] || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { + // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + // read action for current state and first input + t = table[newState] && table[newState][symbol] || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; + }, + yyError: 1 +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + +var ebnf = false; + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; +} + +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + +var lexer = function () { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // backtracking: .................... false + // location.ranges: ................. true + // location line+column tracking: ... true + // + // + // Forwarded Parser Analysis flags: + // + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses lexer values: ............... true / true + // location tracking: ............... true + // location assignment: ............. true + // + // + // Lexer Analysis flags: + // + // uses yyleng: ..................... ??? + // uses yylineno: ................... ??? + // uses yytext: ..................... ??? + // uses yylloc: ..................... ??? + // uses ParseError API: ............. ??? + // uses yyerror: .................... ??? + // uses location tracking & editing: ??? + // uses more() API: ................. ??? + // uses unput() API: ................ ??? + // uses reject() API: ............... ??? + // uses less() API: ................. ??? + // uses display APIs pastInput(), upcomingInput(), showPosition(): + // ............................. ??? + // uses describeYYLLOC() API: ....... ??? + // + // --------- END OF REPORT ----------- + + EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': ' + str, this.options.lexerErrorsAreRecoverable); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = this.yylloc ? this.yylloc.last_column : 0; + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', false); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) maxSize = past.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) maxSize = next.length + this._input.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var CONTEXT = 3; + var CONTEXT_TAIL = 1; + var MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, context_loc ? context_loc.first_line : loc.first_line - CONTEXT); + + var l1 = Math.max(1, context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max(2, (lno === loc.last_line ? loc.last_column : line.length) - loc.first_column + 1); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call(this, this.yy, indexed_rule, this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', false); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': Unrecognized text.', this.options.lexerErrorsAreRecoverable); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState(yy.ebnf ? 'ebnf' : 'bnf'); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS(_templateObject37, dquote(yy_.yytext), dquote(this.topState())) + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS(_templateObject41, dquote(yy_.yytext), dquote(this.topState())) + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: *//^(?:\/\/[^\r\n]*)/, + /* 2: *//^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: *//^(?:[\/"'][^{}\/"']+)/, + /* 6: *//^(?:[^{}\/"']+)/, + /* 7: *//^(?:\{)/, + /* 8: *//^(?:\})/, + /* 9: *//^(?:(\r\n|\n|\r))/, + /* 10: *//^(?:%%)/, + /* 11: *//^(?:;)/, + /* 12: *//^(?:%%)/, + /* 13: *//^(?:%empty\b)/, + /* 14: *//^(?:%epsilon\b)/, + /* 15: *//^(?:\u0190)/, + /* 16: *//^(?:\u025B)/, + /* 17: *//^(?:\u03B5)/, + /* 18: *//^(?:\u03F5)/, + /* 19: *//^(?:\()/, + /* 20: *//^(?:\))/, + /* 21: *//^(?:\*)/, + /* 22: *//^(?:\?)/, + /* 23: *//^(?:\+)/, + /* 24: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', ''), + /* 25: *//^(?:=)/, + /* 26: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: *//^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: *//^(?:\/\/[^\r\n]*)/, + /* 30: */new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: *//^(?:\S+)/, + /* 32: *//^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: *//^(?:(\r\n|\n|\r))/, + /* 34: *//^(?:([^\S\n\r])+)/, + /* 35: *//^(?:([^\S\n\r])+)/, + /* 36: *//^(?:(\r\n|\n|\r)+)/, + /* 37: */new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', ''), + /* 40: *//^(?:\$end\b)/, + /* 41: *//^(?:\$eof\b)/, + /* 42: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: *//^(?:\S+)/, + /* 45: *//^(?::)/, + /* 46: *//^(?:;)/, + /* 47: *//^(?:\|)/, + /* 48: *//^(?:%%)/, + /* 49: *//^(?:%ebnf\b)/, + /* 50: *//^(?:%debug\b)/, + /* 51: *//^(?:%parser-type\b)/, + /* 52: *//^(?:%prec\b)/, + /* 53: *//^(?:%start\b)/, + /* 54: *//^(?:%left\b)/, + /* 55: *//^(?:%right\b)/, + /* 56: *//^(?:%nonassoc\b)/, + /* 57: *//^(?:%token\b)/, + /* 58: *//^(?:%parse-param\b)/, + /* 59: *//^(?:%options\b)/, + /* 60: */new XRegExp('^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', ''), + /* 61: *//^(?:%code\b)/, + /* 62: *//^(?:%import\b)/, + /* 63: *//^(?:%include\b)/, + /* 64: */new XRegExp('^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', ''), + /* 65: */new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: *//^(?:\{)/, + /* 69: *//^(?:->.*)/, + /* 70: *//^(?:→.*)/, + /* 71: *//^(?:=>.*)/, + /* 72: *//^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: *//^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: *//^(?:[^\r\n]*(\r|\n)+)/, + /* 75: *//^(?:[^\r\n]+)/, + /* 76: *//^(?:(\r\n|\n|\r))/, + /* 77: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: *//^(?:([^\S\n\r])+)/, + /* 80: *//^(?:\S+)/, + /* 81: *//^(?:")/, + /* 82: *//^(?:')/, + /* 83: *//^(?:`)/, + /* 84: *//^(?:")/, + /* 85: *//^(?:')/, + /* 86: *//^(?:`)/, + /* 87: *//^(?:")/, + /* 88: *//^(?:')/, + /* 89: *//^(?:`)/, + /* 90: *//^(?:.)/, + /* 91: *//^(?:$)/], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [9, 10, 11, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'bnf': { + rules: [12, 13, 14, 15, 16, 17, 18, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'ebnf': { + rules: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'INITIAL': { + rules: [29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function (s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; +}(); +parser.lexer = lexer; + +function Parser() { + this.yy = {}; +} +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); +} + +var bnf = { + parser: parser, + Parser: Parser, + parse: yyparse + +}; + +var version = '0.6.1-205'; // require('./package.json').version; + +function parse(grammar) { + return bnf.parser.parse(grammar); +} + +// adds a declaration to the grammar +bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { + if (decl.start) { + grammar.start = decl.start; + } else if (decl.lex) { + grammar.lex = parseLex(decl.lex.text, decl.lex.position); + } else if (decl.operator) { + if (!grammar.operators) grammar.operators = []; + grammar.operators.push(decl.operator); + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { + if (!grammar.parseParams) grammar.parseParams = []; + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; + } else if (decl.include) { + if (!grammar.moduleInclude) grammar.moduleInclude = ''; + grammar.moduleInclude += decl.include; + } else if (decl.options) { + if (!grammar.options) grammar.options = {}; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; + } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } + } +}; + +// parse an embedded lex section +function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += new Array(l).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + new Array(c - 3).join('.') + prelude; + } + return jisonlex.parse(prelude + text); +} + +var ebnf_parser = { + transform: transform +}; + +var ebnfParser = { + parse: parse, + + transform: transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser: ebnf_parser, + bnf_lexer: jisonlex, + + version: version +}; + +module.exports = ebnfParser; diff --git a/dist/ebnf-parser-cjs.js b/dist/ebnf-parser-cjs.js new file mode 100644 index 0000000..0968c67 --- /dev/null +++ b/dist/ebnf-parser-cjs.js @@ -0,0 +1,11537 @@ +'use strict'; + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var XRegExp = _interopDefault(require('@gerhobbelt/xregexp')); +var helpers = _interopDefault(require('jison-helpers-lib')); +var fs = _interopDefault(require('fs')); +var jisonlex = _interopDefault(require('@gerhobbelt/lex-parser')); + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError$1(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError$1.prototype, Error.prototype); +} else { + JisonParserError$1.prototype = Object.create(Error.prototype); +} +JisonParserError$1.prototype.constructor = JisonParserError$1; +JisonParserError$1.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp$1(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + + + // helper: reconstruct the 'goto' table + function bt$1(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s$1(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c$1(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u$1(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser$1 = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError$1, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp$1({ + pop: u$1([ + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + s$1, + [17, 4] +]), + rule: u$1([ + 2, + 1, + 3, + 0, + 1, + 1, + 2, + 3, + c$1, + [8, 6], + 1 +]) +}), +performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + switch (yystate) { +case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + +case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + +case 2: + /*! Production:: handle_list : handle */ +case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + +case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + +case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + +case 5: + /*! Production:: handle : rule */ +case 13: + /*! Production:: suffix : "*" */ +case 14: + /*! Production:: suffix : "?" */ +case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + +case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + +case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + +case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + +case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + +case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + +} +}, +table: bt$1({ + len: u$1([ + 8, + 1, + 1, + 7, + 0, + 10, + 0, + 9, + 0, + 0, + 6, + s$1, + [0, 3], + 2, + s$1, + [0, 3], + 8, + 0 +]), + symbol: u$1([ + 1, + 4, + 10, + 11, + s$1, + [13, 4, 1], + s$1, + [1, 3], + 3, + 4, + 5, + 10, + c$1, + [9, 3], + s$1, + [3, 8, 1], + 17, + c$1, + [16, 4], + s$1, + [12, 5, 1], + c$1, + [19, 4], + 9, + 10, + 3, + 5, + c$1, + [17, 4], + c$1, + [16, 4] +]), + type: u$1([ + s$1, + [2, 3], + s$1, + [0, 5], + 1, + s$1, + [2, 6], + 0, + 0, + s$1, + [2, 9], + c$1, + [10, 5], + s$1, + [0, 5], + s$1, + [2, 12], + s$1, + [0, 4] +]), + state: u$1([ + s$1, + [1, 5, 1], + 9, + 5, + 10, + 14, + 15, + c$1, + [8, 3], + 19, + c$1, + [4, 3] +]), + mode: u$1([ + 2, + s$1, + [1, 3], + 2, + 2, + 1, + 2, + c$1, + [5, 3], + c$1, + [7, 3], + c$1, + [12, 4], + c$1, + [13, 9], + c$1, + [15, 3], + c$1, + [5, 4] +]), + goto: u$1([ + 4, + 7, + 6, + 8, + 5, + 5, + 7, + 5, + 6, + s$1, + [12, 4], + 11, + 12, + 13, + 12, + 12, + 4, + 7, + 4, + 6, + s$1, + [9, 4], + 16, + 9, + 18, + 17, + c$1, + [12, 4] +]) +}), +defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 +}, +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var symbol = 0; + + + + var EOF = this.EOF; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + + + + + + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + } + + return resultValue; + }; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + + + + + + + + + + + + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + + + + + + + + + + + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +} +}; +parser$1.originalParseError = parser$1.parseError; +parser$1.originalQuoteName = parser$1.quoteName; + + +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer$1 = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... false +// location assignment: ............. false +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: */ /^(?:\s+)/, + /* 1: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: */ /^(?:\$end\b)/, + /* 3: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: */ /^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: */ /^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: */ /^(?:\.)/, + /* 7: */ /^(?:\()/, + /* 8: */ /^(?:\))/, + /* 9: */ /^(?:\*)/, + /* 10: */ /^(?:\?)/, + /* 11: */ /^(?:\|)/, + /* 12: */ /^(?:\+)/, + /* 13: */ /^(?:$)/ + ], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; +}(); +parser$1.lexer = lexer$1; + +function Parser$1() { + this.yy = {}; +} +Parser$1.prototype = parser$1; +parser$1.Parser = Parser$1; + +function yyparse$1() { + return parser$1.parse.apply(parser$1, arguments); +} + + + +var parser$2 = { + parser: parser$1, + Parser: Parser$1, + parse: yyparse$1, + +}; + +//import assert from 'assert'; + +var devDebug = 0; + +// WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) +// +// This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! +const ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + +// produce a unique production symbol. +// Use this to produce rule productions from transformed EBNF which are +// guaranteed not to collide with previously generated / already existing +// rules (~ symbols). +function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; +} + +function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; +} + +function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; + } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } + + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + list.fragment, + '$$ = [' + generatePushAction(list, 1) + '];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + '', + '$$ = [];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [ + [ + '', + '$$ = undefined;' + ], + [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ] + ]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { + if (!name) { + name = generateUniqueSymbol(opts.production, '_group', opts); + } + if (devDebug > 2) console.log('group EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ]; + }); + } + } + + return has_transformed; +} + +function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); + } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; + } + return tot; + }, []); + + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index + }; +} + +function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar + }; +} + +function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; + + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser$2.parse(handle); + + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp(`\\[${ID_REGEX_BASE}\\]`); + var term_re = new XRegExp(`^${ID_REGEX_BASE}$`); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt, + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp(`(?:[$@]|##)${ID_REGEX_BASE}`, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + + 'which is not available in production "' + handle + '"; ' + + 'it probably got removed by the EBNF rule rewrite process.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */ var n_suffixes = [ 'st', 'nd', 'rd', 'th' ]; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + + 'which is not available in production "' + handle + '"; ' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); + } + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); +} + +var ref_list; +var ref_names; + +// create a deep copy of the input, so we will keep the input constant. +function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || typeof from !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; +} + +function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; +} + +function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; +} + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + // helper: reconstruct the defaultActions[] table + function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; + } + + + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp({ + pop: u([ + s, + [47, 3], + 48, + 48, + s, + [49, 3], + s, + [50, 3], + s, + [51, 20], + s, + [52, 3], + 53, + 53, + 54, + 54, + s, + [55, 3], + 56, + 56, + s, + [57, 6], + 58, + 58, + 59, + 59, + 60, + 60, + s, + [61, 3], + 62, + 62, + 63, + 63, + s, + [64, 3], + 65, + s, + [65, 4, 1], + 68, + 69, + 70, + 70, + s, + [71, 3], + 72, + 72, + 73, + 73, + s, + [74, 4], + s, + [75, 3], + 76, + 76, + 77, + 77, + 78, + 78, + s, + [79, 5], + s, + [80, 4], + s, + [81, 3], + 82, + 82, + 83, + s, + [84, 4], + s, + [85, 3], + s, + [86, 5], + 87, + 87, + 88, + 88, + 89, + 89, + s, + [90, 3], + 91, + 91 +]), + rule: u([ + 5, + 5, + 3, + 0, + 2, + 0, + s, + [2, 3], + c, + [4, 3], + 1, + 1, + c, + [3, 3], + s, + [1, 6], + s, + [3, 5], + s, + [2, 3], + c, + [15, 9], + c, + [11, 4], + c, + [20, 7], + s, + [2, 4], + s, + [1, 3], + 2, + 1, + 2, + 2, + c, + [15, 3], + 0, + c, + [11, 7], + c, + [36, 4], + 3, + 3, + 1, + 0, + 3, + c, + [39, 4], + c, + [80, 4], + c, + [9, 3], + c, + [39, 4], + 3, + 3, + c, + [34, 5], + c, + [40, 5], + c, + [32, 3], + s, + [1, 3], + 0, + 0, + 1, + 5, + 4, + 4, + c, + [53, 3], + c, + [85, 4], + c, + [35, 3], + 0 +]) +}), +performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + + + switch (yystate) { +case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + +case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate trailing code from the grammar rule set with a '%%' marker on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate the parse 'header section' (token definitions, options, lexer spec, etc.) from the grammar rule set with a '%%' on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 4: + /*! Production:: optional_end_block : %epsilon */ +case 100: + /*! Production:: suffix : %epsilon */ +case 116: + /*! Production:: action : %epsilon */ +case 117: + /*! Production:: action_body : %epsilon */ +case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + +case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + The extra parser module code section (a.k.a. 'epilogue') does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = yyvstack[yysp]; + break; + +case 6: + /*! Production:: optional_action_header_block : %epsilon */ +case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + +case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ +case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + +case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; yy.addDeclaration(this.$, yyvstack[yysp]); + break; + +case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + declaration list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {start: yyvstack[yysp]}; + break; + +case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {lex: {text: yyvstack[yysp], position: yylstack[yysp]}}; + break; + +case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {operator: yyvstack[yysp]}; + break; + +case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {token_list: yyvstack[yysp]}; + break; + +case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parseParams: yyvstack[yysp]}; + break; + +case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parserType: yyvstack[yysp]}; + break; + +case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: yyvstack[yysp]}; + break; + +case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: [['debug', true]]}; + break; + +case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = {options: [['ebnf', true]]}; + break; + +case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {unknownDecl: yyvstack[yysp]}; + break; + +case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {imports: {name: yyvstack[yysp - 1], path: yyvstack[yysp]}}; + break; + +case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + You did not specify a legal file path for the '%import' initialization code statement, which must have the format: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%import'-ed initialization code section must be qualified by a name, e.g. 'required' before the import path itself: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + %code "${$init_code_name}" initialization section action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + +case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%code' initialization code section must be qualified by a name, e.g. 'required' before the action code itself: + + %code qualifier_name {action code} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %start token error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %token definition list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %import name or source filename missing maybe? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 32: + /*! Production:: init_code_name : ID */ +case 33: + /*! Production:: init_code_name : NAME */ +case 34: + /*! Production:: init_code_name : STRING */ +case 35: + /*! Production:: import_name : ID */ +case 36: + /*! Production:: import_name : STRING */ +case 37: + /*! Production:: import_path : ID */ +case 38: + /*! Production:: import_path : STRING */ +case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ +case 68: + /*! Production:: token_value : INTEGER */ +case 69: + /*! Production:: token_description : STRING */ +case 80: + /*! Production:: optional_production_description : STRING */ +case 95: + /*! Production:: expression : ID */ +case 101: + /*! Production:: suffix : "*" */ +case 102: + /*! Production:: suffix : "?" */ +case 103: + /*! Production:: suffix : "+" */ +case 107: + /*! Production:: symbol : id */ +case 108: + /*! Production:: symbol : STRING */ +case 109: + /*! Production:: id : ID */ +case 112: + /*! Production:: action_ne : ACTION */ +case 113: + /*! Production:: action_ne : include_macro_code */ +case 114: + /*! Production:: action : action_ne */ +case 118: + /*! Production:: action_body : action_comments_body */ +case 122: + /*! Production:: action_comments_body : ACTION_BODY */ +case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ +case 128: + /*! Production:: module_code_chunk : CODE */ +case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ +case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + +case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options ill defined / error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options don't seem terminated? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 42: + /*! Production:: option_list : option_list option */ +case 59: + /*! Production:: token_list : token_list symbol */ +case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; this.$.push(yyvstack[yysp]); + break; + +case 43: + /*! Production:: option_list : option */ +case 60: + /*! Production:: token_list : symbol */ +case 71: + /*! Production:: id_list : id */ +case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + +case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + +case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ +case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + +case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value error for ${yyvstack[yysp - 2]}? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value assignment error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ +case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parse-params declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parser-type declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]]; this.$.push.apply(this.$, yyvstack[yysp]); + break; + +case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + operator token list error in an associativity statement? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + +case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + +case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + +case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + +case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + +case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + +case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + +case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + +case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + +case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + +case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + +case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error: did you terminate the rule production set with a semicolon? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + +case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule id should be followed by a colon, but that one seems missing? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + +case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule alternative production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + multiple alternative rule productions should be separated by a '|' pipe character, not a ':' colon! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [(yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '')]; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS` + You cannot specify a precedence override for an epsilon (a.k.a. empty) rule! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + epsilon production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %epsilon rule action declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + +case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + +case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + +case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + +case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + +case 94: + /*! Production:: suffixed_expression : expression suffix */ +case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ +case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + +case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + +case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + +case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a grammar rule sublist in '( ... )' brackets. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + +case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %prec precedence override declaration error? + + Erroneous precedence declaration: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + +case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a parser rule action block in curly braces: '{ ... }'. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + +case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly match curly braces '{ ... }' in a parser rule action block. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS` + included action code file "${$PATH}" does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + +case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + %include MUST be followed by a valid file path. + + Erroneous path: + ` + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + +case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + module code declaration error? + + Erroneous area: + ` + yylexer.prettyPrintRange(yylstack[yysp])); + break; + +case 164: // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + +} +}, +table: bt({ + len: u([ + 20, + 1, + 25, + 5, + 19, + 18, + 3, + 18, + 18, + 5, + s, + [18, 8], + 4, + 5, + 6, + 2, + s, + [6, 4, -1], + 3, + 3, + 4, + 8, + 1, + 18, + 18, + 26, + c, + [18, 3], + 1, + 4, + 21, + 3, + 3, + 5, + 5, + s, + [3, 3], + 22, + 18, + 20, + 25, + 25, + 24, + 24, + 22, + s, + [18, 3], + 3, + 19, + 2, + 4, + 1, + 1, + 7, + 7, + c, + [40, 3], + 17, + 4, + 20, + 18, + 23, + s, + [18, 6], + 6, + 21, + 21, + 18, + 20, + 18, + 2, + 18, + 4, + 2, + s, + [1, 3], + s, + [3, 4], + 4, + 3, + 5, + 3, + 15, + 11, + 2, + 2, + 19, + 20, + 18, + c, + [104, 3], + 4, + 4, + s, + [2, 4], + 7, + 3, + 4, + 16, + 1, + 4, + 10, + 14, + c, + [122, 3], + 18, + 18, + 9, + s, + [3, 4], + 14, + 14, + 18, + 21, + 21, + 6, + 4, + c, + [50, 5], + 7, + 7, + s, + [15, 4], + 3, + 9, + 3, + 14, + 18, + 18, + 8, + 5, + 3, + 9, + 4 +]), + symbol: u([ + 2, + s, + [14, 10, 1], + 27, + s, + [31, 5, 1], + 44, + 47, + 50, + 1, + c, + [21, 18], + 51, + 55, + s, + [58, 4, 1], + 89, + 15, + 24, + 44, + 49, + 69, + c, + [31, 19], + c, + [18, 19], + 24, + 83, + c, + [39, 38], + 36, + 63, + 65, + c, + [41, 37], + c, + [18, 108], + 24, + 26, + 53, + 2, + 24, + 25, + 26, + 52, + c, + [9, 3], + 62, + 82, + 83, + 2, + 45, + c, + [8, 7], + 24, + 26, + c, + [5, 3], + 25, + 56, + 57, + c, + [9, 3], + c, + [3, 6], + c, + [266, 3], + 48, + c, + [275, 3], + 70, + 71, + 72, + 83, + 89, + c, + [278, 38], + 4, + 5, + 6, + 12, + s, + [14, 11, 1], + 26, + c, + [24, 6], + 37, + 42, + c, + [152, 37], + 24, + 64, + 68, + 83, + 24, + c, + [119, 3], + 54, + c, + [27, 11], + c, + [67, 8], + 44, + 54, + c, + [147, 6], + 12, + 15, + 44, + 84, + 89, + c, + [5, 8], + c, + [3, 6], + c, + [46, 20], + c, + [201, 3], + c, + [113, 28], + c, + [40, 9], + c, + [177, 23], + c, + [176, 3], + c, + [25, 24], + 1, + c, + [26, 4], + c, + [25, 11], + c, + [73, 7], + 46, + c, + [24, 24], + c, + [158, 51], + c, + [18, 25], + 25, + 28, + 57, + c, + [21, 12], + 28, + c, + [22, 8], + 2, + 3, + 25, + 28, + s, + [1, 3], + 2, + 44, + 46, + 88, + 90, + 91, + c, + [425, 3], + 24, + c, + [433, 3], + c, + [440, 3], + c, + [3, 3], + c, + [13, 4], + c, + [153, 4], + 7, + 12, + 15, + 24, + 26, + 38, + 40, + 41, + 42, + 44, + 74, + 75, + 76, + 2, + 5, + 26, + 73, + c, + [151, 12], + c, + [94, 7], + c, + [307, 38], + 37, + 44, + 66, + 67, + c, + [685, 109], + 12, + 13, + 43, + 86, + 87, + c, + [349, 14], + c, + [445, 11], + c, + [84, 46], + c, + [504, 10], + c, + [348, 19], + c, + [58, 19], + 25, + 29, + 30, + c, + [346, 5], + 1, + 44, + 89, + 1, + c, + [483, 3], + c, + [3, 6], + c, + [339, 3], + c, + [121, 3], + c, + [496, 3], + c, + [8, 5], + c, + [349, 8], + c, + [348, 4], + 78, + 79, + 81, + c, + [568, 5], + 15, + 42, + 44, + 84, + 85, + 89, + 2, + 5, + 2, + 5, + c, + [359, 19], + c, + [19, 11], + c, + [142, 8], + c, + [337, 30], + c, + [180, 26], + c, + [284, 3], + c, + [287, 4], + c, + [4, 4], + 25, + 28, + 25, + 28, + c, + [4, 4], + c, + [517, 8], + c, + [168, 6], + c, + [507, 14], + c, + [506, 3], + c, + [189, 7], + c, + [162, 8], + s, + [4, 5, 1], + c, + [190, 8], + c, + [1024, 6], + s, + [4, 9, 1], + c, + [22, 3], + s, + [39, 4, 1], + 44, + 80, + c, + [19, 18], + c, + [18, 37], + c, + [16, 3], + c, + [88, 3], + 76, + 77, + c, + [292, 6], + c, + [3, 6], + c, + [144, 14], + c, + [14, 15], + c, + [480, 39], + c, + [21, 21], + c, + [549, 6], + c, + [6, 3], + 1, + c, + [111, 12], + c, + [234, 7], + c, + [7, 7], + c, + [238, 10], + c, + [179, 11], + c, + [15, 40], + 6, + 8, + c, + [209, 7], + 78, + 79, + c, + [374, 4], + c, + [313, 14], + c, + [271, 43], + c, + [164, 4], + c, + [169, 4], + c, + [78, 12], + 43 +]), + type: u([ + s, + [2, 18], + 0, + 0, + 1, + c, + [21, 20], + s, + [0, 5], + c, + [10, 5], + s, + [2, 39], + c, + [40, 41], + c, + [41, 40], + s, + [2, 108], + c, + [148, 5], + c, + [239, 6], + c, + [159, 6], + c, + [253, 10], + c, + [176, 14], + c, + [36, 7], + c, + [197, 102], + c, + [103, 7], + c, + [108, 21], + c, + [21, 10], + c, + [423, 36], + c, + [373, 149], + c, + [158, 67], + c, + [57, 32], + c, + [322, 8], + c, + [98, 26], + c, + [489, 7], + c, + [721, 173], + c, + [462, 131], + c, + [130, 37], + c, + [375, 11], + c, + [818, 45], + c, + [223, 79], + c, + [124, 24], + c, + [986, 15], + c, + [38, 19], + c, + [57, 20], + c, + [157, 62], + c, + [443, 106], + c, + [106, 103], + c, + [103, 62], + c, + [1248, 16], + c, + [78, 6] +]), + state: u([ + 1, + 2, + 5, + 14, + 12, + 13, + 8, + 20, + 11, + 29, + 28, + 31, + 34, + 36, + 38, + 42, + 47, + 49, + 50, + 54, + 49, + 50, + 56, + 50, + 58, + 60, + 62, + 65, + 68, + 69, + 70, + 67, + 72, + 71, + 73, + 74, + 78, + 79, + 82, + 83, + 82, + 84, + 50, + 84, + 50, + 86, + 92, + 94, + 93, + 97, + 69, + 70, + 98, + 100, + 101, + 103, + 105, + 106, + 107, + 110, + 111, + 117, + 124, + 126, + 123, + 133, + 131, + 82, + 137, + 142, + 94, + 93, + 143, + 101, + 133, + 146, + 82, + 147, + 50, + 149, + 154, + 153, + 155, + 111, + 124, + 126, + 162, + 163, + 124, + 126 +]), + mode: u([ + s, + [2, 18], + s, + [1, 18], + c, + [21, 4], + s, + [2, 36], + c, + [42, 5], + c, + [38, 34], + c, + [77, 38], + s, + [2, 108], + s, + [1, 20], + c, + [30, 15], + c, + [134, 100], + c, + [106, 4], + c, + [335, 26], + c, + [151, 16], + c, + [376, 48], + c, + [347, 120], + c, + [63, 75], + c, + [13, 9], + c, + [23, 4], + c, + [4, 3], + c, + [587, 6], + c, + [427, 12], + c, + [9, 15], + c, + [335, 13], + c, + [389, 39], + c, + [45, 43], + c, + [509, 77], + c, + [762, 121], + c, + [129, 9], + c, + [756, 14], + c, + [334, 14], + c, + [41, 6], + c, + [367, 5], + c, + [784, 37], + c, + [208, 63], + c, + [1142, 20], + c, + [1081, 10], + c, + [487, 14], + c, + [22, 9], + c, + [151, 17], + c, + [221, 10], + c, + [803, 156], + c, + [318, 61], + c, + [216, 50], + c, + [457, 7], + c, + [455, 38], + c, + [123, 34], + c, + [1206, 8], + 1 +]), + goto: u([ + s, + [10, 18], + 4, + 3, + 10, + 6, + 7, + 9, + s, + [15, 5, 1], + 24, + 22, + 23, + 25, + 26, + 27, + 21, + s, + [6, 3], + 30, + s, + [11, 18], + s, + [9, 18], + 32, + 33, + s, + [13, 18], + s, + [14, 18], + 35, + 66, + 37, + s, + [16, 18], + s, + [17, 18], + s, + [18, 18], + s, + [19, 18], + s, + [20, 18], + s, + [21, 18], + s, + [22, 18], + s, + [23, 18], + 39, + 40, + 41, + s, + [43, 4, 1], + 48, + 33, + 51, + 53, + 52, + 55, + 33, + 51, + 57, + 33, + 51, + 59, + 61, + s, + [56, 3], + s, + [57, 3], + s, + [58, 3], + 4, + 63, + 64, + 66, + 33, + 21, + 3, + s, + [12, 18], + s, + [29, 18], + s, + [109, 26], + s, + [15, 18], + s, + [30, 18], + 33, + 67, + 75, + 76, + 77, + s, + [31, 11], + c, + [13, 9], + s, + [35, 3], + s, + [36, 3], + 80, + 81, + 21, + c, + [3, 3], + s, + [32, 3], + s, + [33, 3], + s, + [34, 3], + s, + [54, 11], + 33, + 51, + s, + [54, 7], + s, + [55, 18], + s, + [60, 20], + s, + [107, 25], + s, + [108, 25], + s, + [126, 24], + s, + [127, 24], + s, + [50, 11], + 33, + 51, + s, + [50, 7], + s, + [51, 18], + s, + [52, 18], + s, + [53, 18], + 61, + 85, + s, + [41, 12], + 87, + s, + [41, 6], + 43, + 43, + 89, + 88, + 44, + 44, + 90, + 91, + 132, + 96, + 132, + 95, + s, + [72, 3], + 33, + s, + [7, 3], + s, + [8, 3], + s, + [74, 4], + 99, + s, + [90, 8], + 102, + s, + [90, 4], + 81, + 81, + 104, + s, + [61, 11], + 33, + s, + [61, 7], + s, + [62, 18], + s, + [71, 12], + 109, + s, + [71, 6], + 108, + 71, + s, + [24, 18], + s, + [25, 18], + s, + [37, 18], + s, + [38, 18], + s, + [26, 18], + s, + [27, 18], + s, + [117, 3], + s, + [112, 22], + s, + [113, 21], + s, + [28, 18], + s, + [59, 20], + s, + [39, 18], + 42, + 42, + s, + [40, 18], + 116, + 115, + 113, + 114, + 49, + 49, + 1, + 2, + 5, + 124, + 21, + 131, + 131, + 118, + s, + [128, 3], + s, + [130, 3], + s, + [73, 4], + 119, + 121, + 120, + 77, + 77, + 122, + 77, + 77, + s, + [83, 3], + s, + [106, 3], + 130, + 106, + 106, + 127, + 129, + 128, + 125, + 106, + 106, + 132, + s, + [116, 3], + 80, + 81, + 134, + 21, + 136, + 135, + 80, + 80, + s, + [70, 19], + s, + [65, 11], + 109, + s, + [65, 7], + s, + [64, 18], + s, + [68, 19], + s, + [69, 18], + 139, + 140, + 138, + s, + [118, 3], + 141, + s, + [122, 4], + 45, + 45, + 46, + 46, + 47, + 47, + 48, + 48, + c, + [494, 4], + s, + [129, 3], + s, + [75, 4], + 144, + c, + [487, 13], + 145, + s, + [76, 4], + c, + [153, 7], + s, + [89, 14], + 148, + 33, + 51, + s, + [100, 6], + 150, + 151, + 152, + s, + [100, 9], + s, + [95, 18], + s, + [96, 18], + s, + [97, 18], + s, + [90, 7], + s, + [87, 3], + s, + [88, 3], + s, + [114, 3], + s, + [115, 3], + s, + [78, 14], + s, + [79, 14], + s, + [63, 18], + s, + [110, 21], + s, + [111, 21], + c, + [526, 4], + s, + [123, 4], + 125, + s, + [82, 3], + s, + [84, 3], + s, + [85, 3], + s, + [86, 3], + s, + [104, 7], + s, + [105, 7], + s, + [94, 10], + 156, + s, + [94, 4], + s, + [101, 15], + s, + [102, 15], + s, + [103, 15], + 158, + 159, + 157, + 92, + 92, + 130, + 92, + c, + [465, 3], + 161, + 140, + 160, + s, + [93, 14], + s, + [98, 18], + s, + [99, 18], + s, + [90, 7], + s, + [120, 3], + 112, + s, + [121, 3], + 91, + 91, + 130, + 91, + c, + [74, 3], + s, + [119, 3], + 141 +]) +}), +defaultActions: bda({ + idx: u([ + 0, + 3, + 5, + 7, + 8, + s, + [10, 8, 1], + 25, + 26, + 27, + s, + [30, 6, 1], + 37, + 40, + 41, + 44, + 45, + 46, + s, + [48, 6, 1], + 55, + 56, + 57, + 60, + 66, + 67, + 68, + 72, + s, + [74, 6, 1], + s, + [81, 7, 1], + s, + [89, 4, 1], + 95, + 96, + 97, + 100, + 104, + 105, + 107, + 108, + 109, + s, + [112, 5, 1], + 118, + 119, + 122, + 124, + s, + [127, 13, 1], + s, + [141, 8, 1], + 150, + 151, + 152, + s, + [156, 4, 1], + 161 +]), + goto: u([ + 10, + 6, + 9, + 13, + 14, + s, + [16, 8, 1], + 56, + 57, + 58, + 3, + 12, + 29, + 109, + 15, + 30, + 67, + 35, + 36, + 32, + 33, + 34, + 55, + 60, + 107, + 108, + 126, + 127, + 51, + 52, + 53, + 43, + 7, + 8, + 74, + 62, + 24, + 25, + 37, + 38, + 26, + 27, + 112, + 113, + 28, + 59, + 39, + 42, + 40, + 49, + 1, + 2, + 5, + 128, + 130, + 73, + 83, + 80, + 70, + 64, + 68, + 69, + 122, + s, + [45, 4, 1], + 129, + 75, + 76, + 89, + 95, + 96, + 97, + 90, + 87, + 88, + 114, + 115, + 78, + 79, + 63, + 110, + 111, + 123, + 125, + 82, + 84, + 85, + 86, + 104, + 105, + 101, + 102, + 103, + 93, + 98, + 99, + 90, + 121 +]) +}), +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = (this.options.errorRecoveryTokenDiscardCount | 0) || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if (typeof src === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + + + + + + + + + + + var error_rule_depth = (this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1); + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, (error_rule_depth >= 0)); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + + + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + + + + + + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + + + + + + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + + + + + + + + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + + + + + + + + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, (error_rule_depth >= 0)); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + + + + + + + + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + + + + + + + + + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + + + + + + + + + + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = (symbol === TERROR ? 0 : symbol); // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + const EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + + + + + + + + + + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + + + + + + + + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + + + + + + + + + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +}, +yyError: 1 +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + +var ebnf = false; + + + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } + else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; +} + +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... true +// location assignment: ............. true +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState((yy.ebnf ? 'ebnf' : 'bnf')); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS` + EBNF: ignoring unsupported parser option ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [ + this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS` + unsupported parser input: ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: */ /^(?:\/\/[^\r\n]*)/, + /* 2: */ /^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: */ /^(?:[\/"'][^{}\/"']+)/, + /* 6: */ /^(?:[^{}\/"']+)/, + /* 7: */ /^(?:\{)/, + /* 8: */ /^(?:\})/, + /* 9: */ /^(?:(\r\n|\n|\r))/, + /* 10: */ /^(?:%%)/, + /* 11: */ /^(?:;)/, + /* 12: */ /^(?:%%)/, + /* 13: */ /^(?:%empty\b)/, + /* 14: */ /^(?:%epsilon\b)/, + /* 15: */ /^(?:\u0190)/, + /* 16: */ /^(?:\u025B)/, + /* 17: */ /^(?:\u03B5)/, + /* 18: */ /^(?:\u03F5)/, + /* 19: */ /^(?:\()/, + /* 20: */ /^(?:\))/, + /* 21: */ /^(?:\*)/, + /* 22: */ /^(?:\?)/, + /* 23: */ /^(?:\+)/, + /* 24: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 25: */ /^(?:=)/, + /* 26: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: */ /^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: */ /^(?:\/\/[^\r\n]*)/, + /* 30: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: */ /^(?:\S+)/, + /* 32: */ /^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: */ /^(?:(\r\n|\n|\r))/, + /* 34: */ /^(?:([^\S\n\r])+)/, + /* 35: */ /^(?:([^\S\n\r])+)/, + /* 36: */ /^(?:(\r\n|\n|\r)+)/, + /* 37: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 40: */ /^(?:\$end\b)/, + /* 41: */ /^(?:\$eof\b)/, + /* 42: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: */ /^(?:\S+)/, + /* 45: */ /^(?::)/, + /* 46: */ /^(?:;)/, + /* 47: */ /^(?:\|)/, + /* 48: */ /^(?:%%)/, + /* 49: */ /^(?:%ebnf\b)/, + /* 50: */ /^(?:%debug\b)/, + /* 51: */ /^(?:%parser-type\b)/, + /* 52: */ /^(?:%prec\b)/, + /* 53: */ /^(?:%start\b)/, + /* 54: */ /^(?:%left\b)/, + /* 55: */ /^(?:%right\b)/, + /* 56: */ /^(?:%nonassoc\b)/, + /* 57: */ /^(?:%token\b)/, + /* 58: */ /^(?:%parse-param\b)/, + /* 59: */ /^(?:%options\b)/, + /* 60: */ new XRegExp( + '^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', + '' + ), + /* 61: */ /^(?:%code\b)/, + /* 62: */ /^(?:%import\b)/, + /* 63: */ /^(?:%include\b)/, + /* 64: */ new XRegExp( + '^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', + '' + ), + /* 65: */ new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */ new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */ new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: */ /^(?:\{)/, + /* 69: */ /^(?:->.*)/, + /* 70: */ /^(?:→.*)/, + /* 71: */ /^(?:=>.*)/, + /* 72: */ /^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: */ /^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: */ /^(?:[^\r\n]*(\r|\n)+)/, + /* 75: */ /^(?:[^\r\n]+)/, + /* 76: */ /^(?:(\r\n|\n|\r))/, + /* 77: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: */ /^(?:([^\S\n\r])+)/, + /* 80: */ /^(?:\S+)/, + /* 81: */ /^(?:")/, + /* 82: */ /^(?:')/, + /* 83: */ /^(?:`)/, + /* 84: */ /^(?:")/, + /* 85: */ /^(?:')/, + /* 86: */ /^(?:`)/, + /* 87: */ /^(?:")/, + /* 88: */ /^(?:')/, + /* 89: */ /^(?:`)/, + /* 90: */ /^(?:.)/, + /* 91: */ /^(?:$)/ + ], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [ + 9, + 10, + 11, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'bnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'ebnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'INITIAL': { + rules: [ + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function(s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; +}(); +parser.lexer = lexer; + +function Parser() { + this.yy = {}; +} +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); +} + + + +var bnf = { + parser, + Parser, + parse: yyparse, + +}; + +var version = '0.6.1-205'; // require('./package.json').version; + +function parse(grammar) { + return bnf.parser.parse(grammar); +} + +// adds a declaration to the grammar +bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { + if (decl.start) { + grammar.start = decl.start; + } else if (decl.lex) { + grammar.lex = parseLex(decl.lex.text, decl.lex.position); + } else if (decl.operator) { + if (!grammar.operators) grammar.operators = []; + grammar.operators.push(decl.operator); + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { + if (!grammar.parseParams) grammar.parseParams = []; + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; + } else if (decl.include) { + if (!grammar.moduleInclude) grammar.moduleInclude = ''; + grammar.moduleInclude += decl.include; + } else if (decl.options) { + if (!grammar.options) grammar.options = {}; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; + } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } + } +}; + +// parse an embedded lex section +function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += (new Array(l)).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + (new Array(c - 3)).join('.') + prelude; + } + return jisonlex.parse(prelude + text); +} + +const ebnf_parser = { + transform +}; + +var ebnfParser = { + parse, + + transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser, + bnf_lexer: jisonlex, + + version, +}; + +module.exports = ebnfParser; diff --git a/dist/ebnf-parser-es6.js b/dist/ebnf-parser-es6.js new file mode 100644 index 0000000..b85fdb2 --- /dev/null +++ b/dist/ebnf-parser-es6.js @@ -0,0 +1,11533 @@ +import XRegExp from '@gerhobbelt/xregexp'; +import helpers from 'jison-helpers-lib'; +import fs from 'fs'; +import jisonlex from '@gerhobbelt/lex-parser'; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError$1(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError$1.prototype, Error.prototype); +} else { + JisonParserError$1.prototype = Object.create(Error.prototype); +} +JisonParserError$1.prototype.constructor = JisonParserError$1; +JisonParserError$1.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp$1(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + + + // helper: reconstruct the 'goto' table + function bt$1(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s$1(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c$1(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u$1(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser$1 = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError$1, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp$1({ + pop: u$1([ + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + s$1, + [17, 4] +]), + rule: u$1([ + 2, + 1, + 3, + 0, + 1, + 1, + 2, + 3, + c$1, + [8, 6], + 1 +]) +}), +performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + switch (yystate) { +case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + +case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + +case 2: + /*! Production:: handle_list : handle */ +case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + +case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + +case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + +case 5: + /*! Production:: handle : rule */ +case 13: + /*! Production:: suffix : "*" */ +case 14: + /*! Production:: suffix : "?" */ +case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + +case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + +case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + +case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + +case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + +case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + +} +}, +table: bt$1({ + len: u$1([ + 8, + 1, + 1, + 7, + 0, + 10, + 0, + 9, + 0, + 0, + 6, + s$1, + [0, 3], + 2, + s$1, + [0, 3], + 8, + 0 +]), + symbol: u$1([ + 1, + 4, + 10, + 11, + s$1, + [13, 4, 1], + s$1, + [1, 3], + 3, + 4, + 5, + 10, + c$1, + [9, 3], + s$1, + [3, 8, 1], + 17, + c$1, + [16, 4], + s$1, + [12, 5, 1], + c$1, + [19, 4], + 9, + 10, + 3, + 5, + c$1, + [17, 4], + c$1, + [16, 4] +]), + type: u$1([ + s$1, + [2, 3], + s$1, + [0, 5], + 1, + s$1, + [2, 6], + 0, + 0, + s$1, + [2, 9], + c$1, + [10, 5], + s$1, + [0, 5], + s$1, + [2, 12], + s$1, + [0, 4] +]), + state: u$1([ + s$1, + [1, 5, 1], + 9, + 5, + 10, + 14, + 15, + c$1, + [8, 3], + 19, + c$1, + [4, 3] +]), + mode: u$1([ + 2, + s$1, + [1, 3], + 2, + 2, + 1, + 2, + c$1, + [5, 3], + c$1, + [7, 3], + c$1, + [12, 4], + c$1, + [13, 9], + c$1, + [15, 3], + c$1, + [5, 4] +]), + goto: u$1([ + 4, + 7, + 6, + 8, + 5, + 5, + 7, + 5, + 6, + s$1, + [12, 4], + 11, + 12, + 13, + 12, + 12, + 4, + 7, + 4, + 6, + s$1, + [9, 4], + 16, + 9, + 18, + 17, + c$1, + [12, 4] +]) +}), +defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 +}, +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var symbol = 0; + + + + var EOF = this.EOF; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + + + + + + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + } + + return resultValue; + }; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + + + + + + + + + + + + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + + + + + + + + + + + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +} +}; +parser$1.originalParseError = parser$1.parseError; +parser$1.originalQuoteName = parser$1.quoteName; + + +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer$1 = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... false +// location assignment: ............. false +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: */ /^(?:\s+)/, + /* 1: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: */ /^(?:\$end\b)/, + /* 3: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: */ /^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: */ /^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: */ /^(?:\.)/, + /* 7: */ /^(?:\()/, + /* 8: */ /^(?:\))/, + /* 9: */ /^(?:\*)/, + /* 10: */ /^(?:\?)/, + /* 11: */ /^(?:\|)/, + /* 12: */ /^(?:\+)/, + /* 13: */ /^(?:$)/ + ], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; +}(); +parser$1.lexer = lexer$1; + +function Parser$1() { + this.yy = {}; +} +Parser$1.prototype = parser$1; +parser$1.Parser = Parser$1; + +function yyparse$1() { + return parser$1.parse.apply(parser$1, arguments); +} + + + +var parser$2 = { + parser: parser$1, + Parser: Parser$1, + parse: yyparse$1, + +}; + +//import assert from 'assert'; + +var devDebug = 0; + +// WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) +// +// This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! +const ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + +// produce a unique production symbol. +// Use this to produce rule productions from transformed EBNF which are +// guaranteed not to collide with previously generated / already existing +// rules (~ symbols). +function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; +} + +function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; +} + +function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; + } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } + + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + list.fragment, + '$$ = [' + generatePushAction(list, 1) + '];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + '', + '$$ = [];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [ + [ + '', + '$$ = undefined;' + ], + [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ] + ]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { + if (!name) { + name = generateUniqueSymbol(opts.production, '_group', opts); + } + if (devDebug > 2) console.log('group EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ]; + }); + } + } + + return has_transformed; +} + +function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); + } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; + } + return tot; + }, []); + + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index + }; +} + +function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar + }; +} + +function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; + + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser$2.parse(handle); + + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp(`\\[${ID_REGEX_BASE}\\]`); + var term_re = new XRegExp(`^${ID_REGEX_BASE}$`); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt, + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp(`(?:[$@]|##)${ID_REGEX_BASE}`, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + + 'which is not available in production "' + handle + '"; ' + + 'it probably got removed by the EBNF rule rewrite process.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */ var n_suffixes = [ 'st', 'nd', 'rd', 'th' ]; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + + 'which is not available in production "' + handle + '"; ' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); + } + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); +} + +var ref_list; +var ref_names; + +// create a deep copy of the input, so we will keep the input constant. +function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || typeof from !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; +} + +function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; +} + +function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; +} + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + // helper: reconstruct the defaultActions[] table + function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; + } + + + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp({ + pop: u([ + s, + [47, 3], + 48, + 48, + s, + [49, 3], + s, + [50, 3], + s, + [51, 20], + s, + [52, 3], + 53, + 53, + 54, + 54, + s, + [55, 3], + 56, + 56, + s, + [57, 6], + 58, + 58, + 59, + 59, + 60, + 60, + s, + [61, 3], + 62, + 62, + 63, + 63, + s, + [64, 3], + 65, + s, + [65, 4, 1], + 68, + 69, + 70, + 70, + s, + [71, 3], + 72, + 72, + 73, + 73, + s, + [74, 4], + s, + [75, 3], + 76, + 76, + 77, + 77, + 78, + 78, + s, + [79, 5], + s, + [80, 4], + s, + [81, 3], + 82, + 82, + 83, + s, + [84, 4], + s, + [85, 3], + s, + [86, 5], + 87, + 87, + 88, + 88, + 89, + 89, + s, + [90, 3], + 91, + 91 +]), + rule: u([ + 5, + 5, + 3, + 0, + 2, + 0, + s, + [2, 3], + c, + [4, 3], + 1, + 1, + c, + [3, 3], + s, + [1, 6], + s, + [3, 5], + s, + [2, 3], + c, + [15, 9], + c, + [11, 4], + c, + [20, 7], + s, + [2, 4], + s, + [1, 3], + 2, + 1, + 2, + 2, + c, + [15, 3], + 0, + c, + [11, 7], + c, + [36, 4], + 3, + 3, + 1, + 0, + 3, + c, + [39, 4], + c, + [80, 4], + c, + [9, 3], + c, + [39, 4], + 3, + 3, + c, + [34, 5], + c, + [40, 5], + c, + [32, 3], + s, + [1, 3], + 0, + 0, + 1, + 5, + 4, + 4, + c, + [53, 3], + c, + [85, 4], + c, + [35, 3], + 0 +]) +}), +performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + + + switch (yystate) { +case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + +case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate trailing code from the grammar rule set with a '%%' marker on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate the parse 'header section' (token definitions, options, lexer spec, etc.) from the grammar rule set with a '%%' on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 4: + /*! Production:: optional_end_block : %epsilon */ +case 100: + /*! Production:: suffix : %epsilon */ +case 116: + /*! Production:: action : %epsilon */ +case 117: + /*! Production:: action_body : %epsilon */ +case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + +case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + The extra parser module code section (a.k.a. 'epilogue') does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = yyvstack[yysp]; + break; + +case 6: + /*! Production:: optional_action_header_block : %epsilon */ +case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + +case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ +case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + +case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; yy.addDeclaration(this.$, yyvstack[yysp]); + break; + +case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + declaration list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {start: yyvstack[yysp]}; + break; + +case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {lex: {text: yyvstack[yysp], position: yylstack[yysp]}}; + break; + +case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {operator: yyvstack[yysp]}; + break; + +case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {token_list: yyvstack[yysp]}; + break; + +case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parseParams: yyvstack[yysp]}; + break; + +case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parserType: yyvstack[yysp]}; + break; + +case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: yyvstack[yysp]}; + break; + +case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: [['debug', true]]}; + break; + +case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = {options: [['ebnf', true]]}; + break; + +case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {unknownDecl: yyvstack[yysp]}; + break; + +case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {imports: {name: yyvstack[yysp - 1], path: yyvstack[yysp]}}; + break; + +case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + You did not specify a legal file path for the '%import' initialization code statement, which must have the format: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%import'-ed initialization code section must be qualified by a name, e.g. 'required' before the import path itself: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + %code "${$init_code_name}" initialization section action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + +case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%code' initialization code section must be qualified by a name, e.g. 'required' before the action code itself: + + %code qualifier_name {action code} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %start token error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %token definition list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %import name or source filename missing maybe? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 32: + /*! Production:: init_code_name : ID */ +case 33: + /*! Production:: init_code_name : NAME */ +case 34: + /*! Production:: init_code_name : STRING */ +case 35: + /*! Production:: import_name : ID */ +case 36: + /*! Production:: import_name : STRING */ +case 37: + /*! Production:: import_path : ID */ +case 38: + /*! Production:: import_path : STRING */ +case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ +case 68: + /*! Production:: token_value : INTEGER */ +case 69: + /*! Production:: token_description : STRING */ +case 80: + /*! Production:: optional_production_description : STRING */ +case 95: + /*! Production:: expression : ID */ +case 101: + /*! Production:: suffix : "*" */ +case 102: + /*! Production:: suffix : "?" */ +case 103: + /*! Production:: suffix : "+" */ +case 107: + /*! Production:: symbol : id */ +case 108: + /*! Production:: symbol : STRING */ +case 109: + /*! Production:: id : ID */ +case 112: + /*! Production:: action_ne : ACTION */ +case 113: + /*! Production:: action_ne : include_macro_code */ +case 114: + /*! Production:: action : action_ne */ +case 118: + /*! Production:: action_body : action_comments_body */ +case 122: + /*! Production:: action_comments_body : ACTION_BODY */ +case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ +case 128: + /*! Production:: module_code_chunk : CODE */ +case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ +case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + +case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options ill defined / error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options don't seem terminated? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 42: + /*! Production:: option_list : option_list option */ +case 59: + /*! Production:: token_list : token_list symbol */ +case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; this.$.push(yyvstack[yysp]); + break; + +case 43: + /*! Production:: option_list : option */ +case 60: + /*! Production:: token_list : symbol */ +case 71: + /*! Production:: id_list : id */ +case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + +case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + +case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ +case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + +case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value error for ${yyvstack[yysp - 2]}? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value assignment error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ +case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parse-params declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parser-type declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]]; this.$.push.apply(this.$, yyvstack[yysp]); + break; + +case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + operator token list error in an associativity statement? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + +case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + +case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + +case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + +case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + +case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + +case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + +case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + +case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + +case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + +case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + +case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error: did you terminate the rule production set with a semicolon? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + +case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule id should be followed by a colon, but that one seems missing? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + +case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule alternative production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + multiple alternative rule productions should be separated by a '|' pipe character, not a ':' colon! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [(yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '')]; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS` + You cannot specify a precedence override for an epsilon (a.k.a. empty) rule! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + epsilon production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %epsilon rule action declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + +case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + +case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + +case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + +case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + +case 94: + /*! Production:: suffixed_expression : expression suffix */ +case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ +case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + +case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + +case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + +case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a grammar rule sublist in '( ... )' brackets. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + +case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %prec precedence override declaration error? + + Erroneous precedence declaration: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + +case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a parser rule action block in curly braces: '{ ... }'. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + +case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly match curly braces '{ ... }' in a parser rule action block. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS` + included action code file "${$PATH}" does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + +case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + %include MUST be followed by a valid file path. + + Erroneous path: + ` + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + +case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + module code declaration error? + + Erroneous area: + ` + yylexer.prettyPrintRange(yylstack[yysp])); + break; + +case 164: // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + +} +}, +table: bt({ + len: u([ + 20, + 1, + 25, + 5, + 19, + 18, + 3, + 18, + 18, + 5, + s, + [18, 8], + 4, + 5, + 6, + 2, + s, + [6, 4, -1], + 3, + 3, + 4, + 8, + 1, + 18, + 18, + 26, + c, + [18, 3], + 1, + 4, + 21, + 3, + 3, + 5, + 5, + s, + [3, 3], + 22, + 18, + 20, + 25, + 25, + 24, + 24, + 22, + s, + [18, 3], + 3, + 19, + 2, + 4, + 1, + 1, + 7, + 7, + c, + [40, 3], + 17, + 4, + 20, + 18, + 23, + s, + [18, 6], + 6, + 21, + 21, + 18, + 20, + 18, + 2, + 18, + 4, + 2, + s, + [1, 3], + s, + [3, 4], + 4, + 3, + 5, + 3, + 15, + 11, + 2, + 2, + 19, + 20, + 18, + c, + [104, 3], + 4, + 4, + s, + [2, 4], + 7, + 3, + 4, + 16, + 1, + 4, + 10, + 14, + c, + [122, 3], + 18, + 18, + 9, + s, + [3, 4], + 14, + 14, + 18, + 21, + 21, + 6, + 4, + c, + [50, 5], + 7, + 7, + s, + [15, 4], + 3, + 9, + 3, + 14, + 18, + 18, + 8, + 5, + 3, + 9, + 4 +]), + symbol: u([ + 2, + s, + [14, 10, 1], + 27, + s, + [31, 5, 1], + 44, + 47, + 50, + 1, + c, + [21, 18], + 51, + 55, + s, + [58, 4, 1], + 89, + 15, + 24, + 44, + 49, + 69, + c, + [31, 19], + c, + [18, 19], + 24, + 83, + c, + [39, 38], + 36, + 63, + 65, + c, + [41, 37], + c, + [18, 108], + 24, + 26, + 53, + 2, + 24, + 25, + 26, + 52, + c, + [9, 3], + 62, + 82, + 83, + 2, + 45, + c, + [8, 7], + 24, + 26, + c, + [5, 3], + 25, + 56, + 57, + c, + [9, 3], + c, + [3, 6], + c, + [266, 3], + 48, + c, + [275, 3], + 70, + 71, + 72, + 83, + 89, + c, + [278, 38], + 4, + 5, + 6, + 12, + s, + [14, 11, 1], + 26, + c, + [24, 6], + 37, + 42, + c, + [152, 37], + 24, + 64, + 68, + 83, + 24, + c, + [119, 3], + 54, + c, + [27, 11], + c, + [67, 8], + 44, + 54, + c, + [147, 6], + 12, + 15, + 44, + 84, + 89, + c, + [5, 8], + c, + [3, 6], + c, + [46, 20], + c, + [201, 3], + c, + [113, 28], + c, + [40, 9], + c, + [177, 23], + c, + [176, 3], + c, + [25, 24], + 1, + c, + [26, 4], + c, + [25, 11], + c, + [73, 7], + 46, + c, + [24, 24], + c, + [158, 51], + c, + [18, 25], + 25, + 28, + 57, + c, + [21, 12], + 28, + c, + [22, 8], + 2, + 3, + 25, + 28, + s, + [1, 3], + 2, + 44, + 46, + 88, + 90, + 91, + c, + [425, 3], + 24, + c, + [433, 3], + c, + [440, 3], + c, + [3, 3], + c, + [13, 4], + c, + [153, 4], + 7, + 12, + 15, + 24, + 26, + 38, + 40, + 41, + 42, + 44, + 74, + 75, + 76, + 2, + 5, + 26, + 73, + c, + [151, 12], + c, + [94, 7], + c, + [307, 38], + 37, + 44, + 66, + 67, + c, + [685, 109], + 12, + 13, + 43, + 86, + 87, + c, + [349, 14], + c, + [445, 11], + c, + [84, 46], + c, + [504, 10], + c, + [348, 19], + c, + [58, 19], + 25, + 29, + 30, + c, + [346, 5], + 1, + 44, + 89, + 1, + c, + [483, 3], + c, + [3, 6], + c, + [339, 3], + c, + [121, 3], + c, + [496, 3], + c, + [8, 5], + c, + [349, 8], + c, + [348, 4], + 78, + 79, + 81, + c, + [568, 5], + 15, + 42, + 44, + 84, + 85, + 89, + 2, + 5, + 2, + 5, + c, + [359, 19], + c, + [19, 11], + c, + [142, 8], + c, + [337, 30], + c, + [180, 26], + c, + [284, 3], + c, + [287, 4], + c, + [4, 4], + 25, + 28, + 25, + 28, + c, + [4, 4], + c, + [517, 8], + c, + [168, 6], + c, + [507, 14], + c, + [506, 3], + c, + [189, 7], + c, + [162, 8], + s, + [4, 5, 1], + c, + [190, 8], + c, + [1024, 6], + s, + [4, 9, 1], + c, + [22, 3], + s, + [39, 4, 1], + 44, + 80, + c, + [19, 18], + c, + [18, 37], + c, + [16, 3], + c, + [88, 3], + 76, + 77, + c, + [292, 6], + c, + [3, 6], + c, + [144, 14], + c, + [14, 15], + c, + [480, 39], + c, + [21, 21], + c, + [549, 6], + c, + [6, 3], + 1, + c, + [111, 12], + c, + [234, 7], + c, + [7, 7], + c, + [238, 10], + c, + [179, 11], + c, + [15, 40], + 6, + 8, + c, + [209, 7], + 78, + 79, + c, + [374, 4], + c, + [313, 14], + c, + [271, 43], + c, + [164, 4], + c, + [169, 4], + c, + [78, 12], + 43 +]), + type: u([ + s, + [2, 18], + 0, + 0, + 1, + c, + [21, 20], + s, + [0, 5], + c, + [10, 5], + s, + [2, 39], + c, + [40, 41], + c, + [41, 40], + s, + [2, 108], + c, + [148, 5], + c, + [239, 6], + c, + [159, 6], + c, + [253, 10], + c, + [176, 14], + c, + [36, 7], + c, + [197, 102], + c, + [103, 7], + c, + [108, 21], + c, + [21, 10], + c, + [423, 36], + c, + [373, 149], + c, + [158, 67], + c, + [57, 32], + c, + [322, 8], + c, + [98, 26], + c, + [489, 7], + c, + [721, 173], + c, + [462, 131], + c, + [130, 37], + c, + [375, 11], + c, + [818, 45], + c, + [223, 79], + c, + [124, 24], + c, + [986, 15], + c, + [38, 19], + c, + [57, 20], + c, + [157, 62], + c, + [443, 106], + c, + [106, 103], + c, + [103, 62], + c, + [1248, 16], + c, + [78, 6] +]), + state: u([ + 1, + 2, + 5, + 14, + 12, + 13, + 8, + 20, + 11, + 29, + 28, + 31, + 34, + 36, + 38, + 42, + 47, + 49, + 50, + 54, + 49, + 50, + 56, + 50, + 58, + 60, + 62, + 65, + 68, + 69, + 70, + 67, + 72, + 71, + 73, + 74, + 78, + 79, + 82, + 83, + 82, + 84, + 50, + 84, + 50, + 86, + 92, + 94, + 93, + 97, + 69, + 70, + 98, + 100, + 101, + 103, + 105, + 106, + 107, + 110, + 111, + 117, + 124, + 126, + 123, + 133, + 131, + 82, + 137, + 142, + 94, + 93, + 143, + 101, + 133, + 146, + 82, + 147, + 50, + 149, + 154, + 153, + 155, + 111, + 124, + 126, + 162, + 163, + 124, + 126 +]), + mode: u([ + s, + [2, 18], + s, + [1, 18], + c, + [21, 4], + s, + [2, 36], + c, + [42, 5], + c, + [38, 34], + c, + [77, 38], + s, + [2, 108], + s, + [1, 20], + c, + [30, 15], + c, + [134, 100], + c, + [106, 4], + c, + [335, 26], + c, + [151, 16], + c, + [376, 48], + c, + [347, 120], + c, + [63, 75], + c, + [13, 9], + c, + [23, 4], + c, + [4, 3], + c, + [587, 6], + c, + [427, 12], + c, + [9, 15], + c, + [335, 13], + c, + [389, 39], + c, + [45, 43], + c, + [509, 77], + c, + [762, 121], + c, + [129, 9], + c, + [756, 14], + c, + [334, 14], + c, + [41, 6], + c, + [367, 5], + c, + [784, 37], + c, + [208, 63], + c, + [1142, 20], + c, + [1081, 10], + c, + [487, 14], + c, + [22, 9], + c, + [151, 17], + c, + [221, 10], + c, + [803, 156], + c, + [318, 61], + c, + [216, 50], + c, + [457, 7], + c, + [455, 38], + c, + [123, 34], + c, + [1206, 8], + 1 +]), + goto: u([ + s, + [10, 18], + 4, + 3, + 10, + 6, + 7, + 9, + s, + [15, 5, 1], + 24, + 22, + 23, + 25, + 26, + 27, + 21, + s, + [6, 3], + 30, + s, + [11, 18], + s, + [9, 18], + 32, + 33, + s, + [13, 18], + s, + [14, 18], + 35, + 66, + 37, + s, + [16, 18], + s, + [17, 18], + s, + [18, 18], + s, + [19, 18], + s, + [20, 18], + s, + [21, 18], + s, + [22, 18], + s, + [23, 18], + 39, + 40, + 41, + s, + [43, 4, 1], + 48, + 33, + 51, + 53, + 52, + 55, + 33, + 51, + 57, + 33, + 51, + 59, + 61, + s, + [56, 3], + s, + [57, 3], + s, + [58, 3], + 4, + 63, + 64, + 66, + 33, + 21, + 3, + s, + [12, 18], + s, + [29, 18], + s, + [109, 26], + s, + [15, 18], + s, + [30, 18], + 33, + 67, + 75, + 76, + 77, + s, + [31, 11], + c, + [13, 9], + s, + [35, 3], + s, + [36, 3], + 80, + 81, + 21, + c, + [3, 3], + s, + [32, 3], + s, + [33, 3], + s, + [34, 3], + s, + [54, 11], + 33, + 51, + s, + [54, 7], + s, + [55, 18], + s, + [60, 20], + s, + [107, 25], + s, + [108, 25], + s, + [126, 24], + s, + [127, 24], + s, + [50, 11], + 33, + 51, + s, + [50, 7], + s, + [51, 18], + s, + [52, 18], + s, + [53, 18], + 61, + 85, + s, + [41, 12], + 87, + s, + [41, 6], + 43, + 43, + 89, + 88, + 44, + 44, + 90, + 91, + 132, + 96, + 132, + 95, + s, + [72, 3], + 33, + s, + [7, 3], + s, + [8, 3], + s, + [74, 4], + 99, + s, + [90, 8], + 102, + s, + [90, 4], + 81, + 81, + 104, + s, + [61, 11], + 33, + s, + [61, 7], + s, + [62, 18], + s, + [71, 12], + 109, + s, + [71, 6], + 108, + 71, + s, + [24, 18], + s, + [25, 18], + s, + [37, 18], + s, + [38, 18], + s, + [26, 18], + s, + [27, 18], + s, + [117, 3], + s, + [112, 22], + s, + [113, 21], + s, + [28, 18], + s, + [59, 20], + s, + [39, 18], + 42, + 42, + s, + [40, 18], + 116, + 115, + 113, + 114, + 49, + 49, + 1, + 2, + 5, + 124, + 21, + 131, + 131, + 118, + s, + [128, 3], + s, + [130, 3], + s, + [73, 4], + 119, + 121, + 120, + 77, + 77, + 122, + 77, + 77, + s, + [83, 3], + s, + [106, 3], + 130, + 106, + 106, + 127, + 129, + 128, + 125, + 106, + 106, + 132, + s, + [116, 3], + 80, + 81, + 134, + 21, + 136, + 135, + 80, + 80, + s, + [70, 19], + s, + [65, 11], + 109, + s, + [65, 7], + s, + [64, 18], + s, + [68, 19], + s, + [69, 18], + 139, + 140, + 138, + s, + [118, 3], + 141, + s, + [122, 4], + 45, + 45, + 46, + 46, + 47, + 47, + 48, + 48, + c, + [494, 4], + s, + [129, 3], + s, + [75, 4], + 144, + c, + [487, 13], + 145, + s, + [76, 4], + c, + [153, 7], + s, + [89, 14], + 148, + 33, + 51, + s, + [100, 6], + 150, + 151, + 152, + s, + [100, 9], + s, + [95, 18], + s, + [96, 18], + s, + [97, 18], + s, + [90, 7], + s, + [87, 3], + s, + [88, 3], + s, + [114, 3], + s, + [115, 3], + s, + [78, 14], + s, + [79, 14], + s, + [63, 18], + s, + [110, 21], + s, + [111, 21], + c, + [526, 4], + s, + [123, 4], + 125, + s, + [82, 3], + s, + [84, 3], + s, + [85, 3], + s, + [86, 3], + s, + [104, 7], + s, + [105, 7], + s, + [94, 10], + 156, + s, + [94, 4], + s, + [101, 15], + s, + [102, 15], + s, + [103, 15], + 158, + 159, + 157, + 92, + 92, + 130, + 92, + c, + [465, 3], + 161, + 140, + 160, + s, + [93, 14], + s, + [98, 18], + s, + [99, 18], + s, + [90, 7], + s, + [120, 3], + 112, + s, + [121, 3], + 91, + 91, + 130, + 91, + c, + [74, 3], + s, + [119, 3], + 141 +]) +}), +defaultActions: bda({ + idx: u([ + 0, + 3, + 5, + 7, + 8, + s, + [10, 8, 1], + 25, + 26, + 27, + s, + [30, 6, 1], + 37, + 40, + 41, + 44, + 45, + 46, + s, + [48, 6, 1], + 55, + 56, + 57, + 60, + 66, + 67, + 68, + 72, + s, + [74, 6, 1], + s, + [81, 7, 1], + s, + [89, 4, 1], + 95, + 96, + 97, + 100, + 104, + 105, + 107, + 108, + 109, + s, + [112, 5, 1], + 118, + 119, + 122, + 124, + s, + [127, 13, 1], + s, + [141, 8, 1], + 150, + 151, + 152, + s, + [156, 4, 1], + 161 +]), + goto: u([ + 10, + 6, + 9, + 13, + 14, + s, + [16, 8, 1], + 56, + 57, + 58, + 3, + 12, + 29, + 109, + 15, + 30, + 67, + 35, + 36, + 32, + 33, + 34, + 55, + 60, + 107, + 108, + 126, + 127, + 51, + 52, + 53, + 43, + 7, + 8, + 74, + 62, + 24, + 25, + 37, + 38, + 26, + 27, + 112, + 113, + 28, + 59, + 39, + 42, + 40, + 49, + 1, + 2, + 5, + 128, + 130, + 73, + 83, + 80, + 70, + 64, + 68, + 69, + 122, + s, + [45, 4, 1], + 129, + 75, + 76, + 89, + 95, + 96, + 97, + 90, + 87, + 88, + 114, + 115, + 78, + 79, + 63, + 110, + 111, + 123, + 125, + 82, + 84, + 85, + 86, + 104, + 105, + 101, + 102, + 103, + 93, + 98, + 99, + 90, + 121 +]) +}), +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = (this.options.errorRecoveryTokenDiscardCount | 0) || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if (typeof src === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + + + + + + + + + + + var error_rule_depth = (this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1); + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, (error_rule_depth >= 0)); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + + + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + + + + + + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + + + + + + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + + + + + + + + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + + + + + + + + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, (error_rule_depth >= 0)); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + + + + + + + + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + + + + + + + + + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + + + + + + + + + + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = (symbol === TERROR ? 0 : symbol); // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + const EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + + + + + + + + + + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + + + + + + + + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + + + + + + + + + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +}, +yyError: 1 +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + +var ebnf = false; + + + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } + else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; +} + +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... true +// location assignment: ............. true +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState((yy.ebnf ? 'ebnf' : 'bnf')); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS` + EBNF: ignoring unsupported parser option ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [ + this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS` + unsupported parser input: ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: */ /^(?:\/\/[^\r\n]*)/, + /* 2: */ /^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: */ /^(?:[\/"'][^{}\/"']+)/, + /* 6: */ /^(?:[^{}\/"']+)/, + /* 7: */ /^(?:\{)/, + /* 8: */ /^(?:\})/, + /* 9: */ /^(?:(\r\n|\n|\r))/, + /* 10: */ /^(?:%%)/, + /* 11: */ /^(?:;)/, + /* 12: */ /^(?:%%)/, + /* 13: */ /^(?:%empty\b)/, + /* 14: */ /^(?:%epsilon\b)/, + /* 15: */ /^(?:\u0190)/, + /* 16: */ /^(?:\u025B)/, + /* 17: */ /^(?:\u03B5)/, + /* 18: */ /^(?:\u03F5)/, + /* 19: */ /^(?:\()/, + /* 20: */ /^(?:\))/, + /* 21: */ /^(?:\*)/, + /* 22: */ /^(?:\?)/, + /* 23: */ /^(?:\+)/, + /* 24: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 25: */ /^(?:=)/, + /* 26: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: */ /^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: */ /^(?:\/\/[^\r\n]*)/, + /* 30: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: */ /^(?:\S+)/, + /* 32: */ /^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: */ /^(?:(\r\n|\n|\r))/, + /* 34: */ /^(?:([^\S\n\r])+)/, + /* 35: */ /^(?:([^\S\n\r])+)/, + /* 36: */ /^(?:(\r\n|\n|\r)+)/, + /* 37: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 40: */ /^(?:\$end\b)/, + /* 41: */ /^(?:\$eof\b)/, + /* 42: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: */ /^(?:\S+)/, + /* 45: */ /^(?::)/, + /* 46: */ /^(?:;)/, + /* 47: */ /^(?:\|)/, + /* 48: */ /^(?:%%)/, + /* 49: */ /^(?:%ebnf\b)/, + /* 50: */ /^(?:%debug\b)/, + /* 51: */ /^(?:%parser-type\b)/, + /* 52: */ /^(?:%prec\b)/, + /* 53: */ /^(?:%start\b)/, + /* 54: */ /^(?:%left\b)/, + /* 55: */ /^(?:%right\b)/, + /* 56: */ /^(?:%nonassoc\b)/, + /* 57: */ /^(?:%token\b)/, + /* 58: */ /^(?:%parse-param\b)/, + /* 59: */ /^(?:%options\b)/, + /* 60: */ new XRegExp( + '^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', + '' + ), + /* 61: */ /^(?:%code\b)/, + /* 62: */ /^(?:%import\b)/, + /* 63: */ /^(?:%include\b)/, + /* 64: */ new XRegExp( + '^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', + '' + ), + /* 65: */ new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */ new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */ new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: */ /^(?:\{)/, + /* 69: */ /^(?:->.*)/, + /* 70: */ /^(?:→.*)/, + /* 71: */ /^(?:=>.*)/, + /* 72: */ /^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: */ /^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: */ /^(?:[^\r\n]*(\r|\n)+)/, + /* 75: */ /^(?:[^\r\n]+)/, + /* 76: */ /^(?:(\r\n|\n|\r))/, + /* 77: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: */ /^(?:([^\S\n\r])+)/, + /* 80: */ /^(?:\S+)/, + /* 81: */ /^(?:")/, + /* 82: */ /^(?:')/, + /* 83: */ /^(?:`)/, + /* 84: */ /^(?:")/, + /* 85: */ /^(?:')/, + /* 86: */ /^(?:`)/, + /* 87: */ /^(?:")/, + /* 88: */ /^(?:')/, + /* 89: */ /^(?:`)/, + /* 90: */ /^(?:.)/, + /* 91: */ /^(?:$)/ + ], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [ + 9, + 10, + 11, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'bnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'ebnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'INITIAL': { + rules: [ + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function(s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; +}(); +parser.lexer = lexer; + +function Parser() { + this.yy = {}; +} +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); +} + + + +var bnf = { + parser, + Parser, + parse: yyparse, + +}; + +var version = '0.6.1-205'; // require('./package.json').version; + +function parse(grammar) { + return bnf.parser.parse(grammar); +} + +// adds a declaration to the grammar +bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { + if (decl.start) { + grammar.start = decl.start; + } else if (decl.lex) { + grammar.lex = parseLex(decl.lex.text, decl.lex.position); + } else if (decl.operator) { + if (!grammar.operators) grammar.operators = []; + grammar.operators.push(decl.operator); + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { + if (!grammar.parseParams) grammar.parseParams = []; + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; + } else if (decl.include) { + if (!grammar.moduleInclude) grammar.moduleInclude = ''; + grammar.moduleInclude += decl.include; + } else if (decl.options) { + if (!grammar.options) grammar.options = {}; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; + } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } + } +}; + +// parse an embedded lex section +function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += (new Array(l)).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + (new Array(c - 3)).join('.') + prelude; + } + return jisonlex.parse(prelude + text); +} + +const ebnf_parser = { + transform +}; + +var ebnfParser = { + parse, + + transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser, + bnf_lexer: jisonlex, + + version, +}; + +export default ebnfParser; diff --git a/dist/ebnf-parser-umd-es5.js b/dist/ebnf-parser-umd-es5.js new file mode 100644 index 0000000..701840d --- /dev/null +++ b/dist/ebnf-parser-umd-es5.js @@ -0,0 +1,8836 @@ +'use strict'; + +var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; + +var _templateObject = _taggedTemplateLiteral(['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']), + _templateObject2 = _taggedTemplateLiteral(['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']), + _templateObject3 = _taggedTemplateLiteral(['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject4 = _taggedTemplateLiteral(['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject5 = _taggedTemplateLiteral(['\n declaration list error?\n \n Erroneous area:\n ', '\n '], ['\n declaration list error?\n \n Erroneous area:\n ', '\n ']), + _templateObject6 = _taggedTemplateLiteral(['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject7 = _taggedTemplateLiteral(['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']), + _templateObject8 = _taggedTemplateLiteral(['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']), + _templateObject9 = _taggedTemplateLiteral(['\n %code "', '" initialization section action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n %code "', '" initialization section action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject10 = _taggedTemplateLiteral(['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n \n %code qualifier_name {action code}\n \n Erroneous area:\n ', '\n '], ['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n \n %code qualifier_name {action code}\n \n Erroneous area:\n ', '\n ']), + _templateObject11 = _taggedTemplateLiteral(['\n %start token error?\n \n Erroneous area:\n ', '\n '], ['\n %start token error?\n \n Erroneous area:\n ', '\n ']), + _templateObject12 = _taggedTemplateLiteral(['\n %token definition list error?\n \n Erroneous area:\n ', '\n '], ['\n %token definition list error?\n \n Erroneous area:\n ', '\n ']), + _templateObject13 = _taggedTemplateLiteral(['\n %import name or source filename missing maybe?\n \n Erroneous area:\n ', '\n '], ['\n %import name or source filename missing maybe?\n \n Erroneous area:\n ', '\n ']), + _templateObject14 = _taggedTemplateLiteral(['\n %options ill defined / error?\n \n Erroneous area:\n ', '\n '], ['\n %options ill defined / error?\n \n Erroneous area:\n ', '\n ']), + _templateObject15 = _taggedTemplateLiteral(['\n %options don\'t seem terminated?\n \n Erroneous area:\n ', '\n '], ['\n %options don\'t seem terminated?\n \n Erroneous area:\n ', '\n ']), + _templateObject16 = _taggedTemplateLiteral(['\n named %option value error for ', '?\n \n Erroneous area:\n ', '\n '], ['\n named %option value error for ', '?\n \n Erroneous area:\n ', '\n ']), + _templateObject17 = _taggedTemplateLiteral(['\n named %option value assignment error?\n \n Erroneous area:\n ', '\n '], ['\n named %option value assignment error?\n \n Erroneous area:\n ', '\n ']), + _templateObject18 = _taggedTemplateLiteral(['\n %parse-params declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %parse-params declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject19 = _taggedTemplateLiteral(['\n %parser-type declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %parser-type declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject20 = _taggedTemplateLiteral(['\n operator token list error in an associativity statement?\n \n Erroneous area:\n ', '\n '], ['\n operator token list error in an associativity statement?\n \n Erroneous area:\n ', '\n ']), + _templateObject21 = _taggedTemplateLiteral(['\n rule production declaration error?\n \n Erroneous area:\n ', '\n '], ['\n rule production declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject22 = _taggedTemplateLiteral(['\n rule production declaration error: did you terminate the rule production set with a semicolon?\n \n Erroneous area:\n ', '\n '], ['\n rule production declaration error: did you terminate the rule production set with a semicolon?\n \n Erroneous area:\n ', '\n ']), + _templateObject23 = _taggedTemplateLiteral(['\n rule id should be followed by a colon, but that one seems missing?\n \n Erroneous area:\n ', '\n '], ['\n rule id should be followed by a colon, but that one seems missing?\n \n Erroneous area:\n ', '\n ']), + _templateObject24 = _taggedTemplateLiteral(['\n rule alternative production declaration error?\n \n Erroneous area:\n ', '\n '], ['\n rule alternative production declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject25 = _taggedTemplateLiteral(['\n multiple alternative rule productions should be separated by a \'|\' pipe character, not a \':\' colon!\n \n Erroneous area:\n ', '\n '], ['\n multiple alternative rule productions should be separated by a \'|\' pipe character, not a \':\' colon!\n \n Erroneous area:\n ', '\n ']), + _templateObject26 = _taggedTemplateLiteral(['\n production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject27 = _taggedTemplateLiteral(['\n You cannot specify a precedence override for an epsilon (a.k.a. empty) rule!\n \n Erroneous area:\n ', '\n '], ['\n You cannot specify a precedence override for an epsilon (a.k.a. empty) rule!\n \n Erroneous area:\n ', '\n ']), + _templateObject28 = _taggedTemplateLiteral(['\n epsilon production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n epsilon production rule action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject29 = _taggedTemplateLiteral(['\n %epsilon rule action declaration error?\n \n Erroneous area:\n ', '\n '], ['\n %epsilon rule action declaration error?\n \n Erroneous area:\n ', '\n ']), + _templateObject30 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a grammar rule sublist in \'( ... )\' brackets.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly bracket a grammar rule sublist in \'( ... )\' brackets.\n \n Erroneous area:\n ', '\n ']), + _templateObject31 = _taggedTemplateLiteral(['\n %prec precedence override declaration error?\n \n Erroneous precedence declaration:\n ', '\n '], ['\n %prec precedence override declaration error?\n \n Erroneous precedence declaration:\n ', '\n ']), + _templateObject32 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a parser rule action block in curly braces: \'{ ... }\'.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly bracket a parser rule action block in curly braces: \'{ ... }\'.\n \n Erroneous area:\n ', '\n ']), + _templateObject33 = _taggedTemplateLiteral(['\n Seems you did not correctly match curly braces \'{ ... }\' in a parser rule action block.\n \n Erroneous area:\n ', '\n '], ['\n Seems you did not correctly match curly braces \'{ ... }\' in a parser rule action block.\n \n Erroneous area:\n ', '\n ']), + _templateObject34 = _taggedTemplateLiteral(['\n included action code file "', '" does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n included action code file "', '" does not compile: ', '\n \n Erroneous area:\n ', '\n ']), + _templateObject35 = _taggedTemplateLiteral(['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n '], ['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n ']), + _templateObject36 = _taggedTemplateLiteral(['\n module code declaration error?\n \n Erroneous area:\n '], ['\n module code declaration error?\n \n Erroneous area:\n ']), + _templateObject37 = _taggedTemplateLiteral(['\n EBNF: ignoring unsupported parser option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n '], ['\n EBNF: ignoring unsupported parser option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n ']), + _templateObject38 = _taggedTemplateLiteral(['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n '], ['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n ']), + _templateObject39 = _taggedTemplateLiteral(['\n unterminated string constant in %options entry.\n\n Erroneous area:\n '], ['\n unterminated string constant in %options entry.\n\n Erroneous area:\n ']), + _templateObject40 = _taggedTemplateLiteral(['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n '], ['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n ']), + _templateObject41 = _taggedTemplateLiteral(['\n unsupported parser input: ', '\n while lexing in ', ' state.\n \n Erroneous area:\n '], ['\n unsupported parser input: ', '\n while lexing in ', ' state.\n \n Erroneous area:\n ']); + +function _taggedTemplateLiteral(strings, raw) { return Object.freeze(Object.defineProperties(strings, { raw: { value: Object.freeze(raw) } })); } + +(function (global, factory) { + (typeof exports === 'undefined' ? 'undefined' : _typeof(exports)) === 'object' && typeof module !== 'undefined' ? module.exports = factory(require('@gerhobbelt/xregexp'), require('jison-helpers-lib'), require('fs'), require('@gerhobbelt/lex-parser')) : typeof define === 'function' && define.amd ? define(['@gerhobbelt/xregexp', 'jison-helpers-lib', 'fs', '@gerhobbelt/lex-parser'], factory) : global['ebnf-parser'] = factory(global.XRegExp, global.helpers, global.fs, global.jisonlex); +})(undefined, function (XRegExp, helpers, fs, jisonlex) { + 'use strict'; + + XRegExp = XRegExp && XRegExp.hasOwnProperty('default') ? XRegExp['default'] : XRegExp; + helpers = helpers && helpers.hasOwnProperty('default') ? helpers['default'] : helpers; + fs = fs && fs.hasOwnProperty('default') ? fs['default'] : fs; + jisonlex = jisonlex && jisonlex.hasOwnProperty('default') ? jisonlex['default'] : jisonlex; + + /* parser generated by jison 0.6.1-205 */ + + /* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + // See also: + // http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + // but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + // with userland code which might access the derived class in a 'classic' way. + function JisonParserError$1(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError$1.prototype, Error.prototype); + } else { + JisonParserError$1.prototype = Object.create(Error.prototype); + } + JisonParserError$1.prototype.constructor = JisonParserError$1; + JisonParserError$1.prototype.name = 'JisonParserError'; + + // helper: reconstruct the productions[] table + function bp$1(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([p[i], r[i]]); + } + return rv; + } + + // helper: reconstruct the 'goto' table + function bt$1(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [m.shift(), g.shift()]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [3]; + } + } + rv.push(q); + } + return rv; + } + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s$1(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c$1(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u$1(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + var parser$1 = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + + trace: function no_op_trace() {}, + JisonParserError: JisonParserError$1, + yy: {}, + options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 + }, + symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 + }, + terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" + }, + TERROR: 2, + EOF: 1, + + // internals: defined here so the object *structure* doesn't get modified by parse() et al, + // thus helping JIT compilers like Chrome V8. + originalQuoteName: null, + originalParseError: null, + cleanupAfterParse: null, + constructParseErrorInfo: null, + yyMergeLocationInfo: null, + + __reentrant_call_depth: 0, // INTERNAL USE ONLY + __error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + __error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + + // APIs which will be set up depending on user action code analysis: + //yyRecovering: 0, + //yyErrOk: 0, + //yyClearIn: 0, + + // Helper APIs + // ----------- + + // Helper function which can be overridden by user code later on: put suitable quotes around + // literal IDs in a description string. + quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; + }, + + // Return the name of the given symbol (terminal or non-terminal) as a string, when available. + // + // Return NULL when the symbol is unknown to the parser. + getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; + }, + + // Return a more-or-less human-readable description of the given symbol, when available, + // or the symbol itself, serving as its own 'description' for lack of something better to serve up. + // + // Return NULL when the symbol is unknown to the parser. + describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; + }, + + // Produce a (more or less) human-readable list of expected tokens at the point of failure. + // + // The produced list may contain token or token set descriptions instead of the tokens + // themselves to help turning this output into something that easier to read by humans + // unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, + // expected terminals and nonterminals is produced. + // + // The returned list (array) will not contain any duplicate entries. + collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; + }, + productions_: bp$1({ + pop: u$1([11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, s$1, [17, 4]]), + rule: u$1([2, 1, 3, 0, 1, 1, 2, 3, c$1, [8, 6], 1]) + }), + performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + switch (yystate) { + case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + + case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + + case 2: + /*! Production:: handle_list : handle */ + case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + + case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + + case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + + case 5: + /*! Production:: handle : rule */ + case 13: + /*! Production:: suffix : "*" */ + case 14: + /*! Production:: suffix : "?" */ + case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + + case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + + case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + + case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + + case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + + case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + + case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + + } + }, + table: bt$1({ + len: u$1([8, 1, 1, 7, 0, 10, 0, 9, 0, 0, 6, s$1, [0, 3], 2, s$1, [0, 3], 8, 0]), + symbol: u$1([1, 4, 10, 11, s$1, [13, 4, 1], s$1, [1, 3], 3, 4, 5, 10, c$1, [9, 3], s$1, [3, 8, 1], 17, c$1, [16, 4], s$1, [12, 5, 1], c$1, [19, 4], 9, 10, 3, 5, c$1, [17, 4], c$1, [16, 4]]), + type: u$1([s$1, [2, 3], s$1, [0, 5], 1, s$1, [2, 6], 0, 0, s$1, [2, 9], c$1, [10, 5], s$1, [0, 5], s$1, [2, 12], s$1, [0, 4]]), + state: u$1([s$1, [1, 5, 1], 9, 5, 10, 14, 15, c$1, [8, 3], 19, c$1, [4, 3]]), + mode: u$1([2, s$1, [1, 3], 2, 2, 1, 2, c$1, [5, 3], c$1, [7, 3], c$1, [12, 4], c$1, [13, 9], c$1, [15, 3], c$1, [5, 4]]), + goto: u$1([4, 7, 6, 8, 5, 5, 7, 5, 6, s$1, [12, 4], 11, 12, 13, 12, 12, 4, 7, 4, 6, s$1, [9, 4], 16, 9, 18, 17, c$1, [12, 4]]) + }), + defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 + }, + parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } + }, + parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var symbol = 0; + + var EOF = this.EOF; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + } + + return resultValue; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = this.describeSymbol(symbol) || symbol; + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; + } + }; + parser$1.originalParseError = parser$1.parseError; + parser$1.originalQuoteName = parser$1.quoteName; + + /* lexer generated by jison-lex 0.6.1-205 */ + + /* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + var lexer$1 = function () { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // backtracking: .................... false + // location.ranges: ................. true + // location line+column tracking: ... true + // + // + // Forwarded Parser Analysis flags: + // + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses lexer values: ............... true / true + // location tracking: ............... false + // location assignment: ............. false + // + // + // Lexer Analysis flags: + // + // uses yyleng: ..................... ??? + // uses yylineno: ................... ??? + // uses yytext: ..................... ??? + // uses yylloc: ..................... ??? + // uses ParseError API: ............. ??? + // uses yyerror: .................... ??? + // uses location tracking & editing: ??? + // uses more() API: ................. ??? + // uses unput() API: ................ ??? + // uses reject() API: ............... ??? + // uses less() API: ................. ??? + // uses display APIs pastInput(), upcomingInput(), showPosition(): + // ............................. ??? + // uses describeYYLLOC() API: ....... ??? + // + // --------- END OF REPORT ----------- + + EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': ' + str, this.options.lexerErrorsAreRecoverable); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = this.yylloc ? this.yylloc.last_column : 0; + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', false); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) maxSize = past.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) maxSize = next.length + this._input.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var CONTEXT = 3; + var CONTEXT_TAIL = 1; + var MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, context_loc ? context_loc.first_line : loc.first_line - CONTEXT); + + var l1 = Math.max(1, context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max(2, (lno === loc.last_line ? loc.last_column : line.length) - loc.first_column + 1); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call(this, this.yy, indexed_rule, this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', false); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': Unrecognized text.', this.options.lexerErrorsAreRecoverable); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: *//^(?:\s+)/, + /* 1: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: *//^(?:\$end\b)/, + /* 3: */new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: *//^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: *//^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: *//^(?:\.)/, + /* 7: *//^(?:\()/, + /* 8: *//^(?:\))/, + /* 9: *//^(?:\*)/, + /* 10: *//^(?:\?)/, + /* 11: *//^(?:\|)/, + /* 12: *//^(?:\+)/, + /* 13: *//^(?:$)/], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; + }(); + parser$1.lexer = lexer$1; + + function Parser$1() { + this.yy = {}; + } + Parser$1.prototype = parser$1; + parser$1.Parser = Parser$1; + + function yyparse$1() { + return parser$1.parse.apply(parser$1, arguments); + } + + var parser$2 = { + parser: parser$1, + Parser: Parser$1, + parse: yyparse$1 + + }; + + //import assert from 'assert'; + + var devDebug = 0; + + // WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) + // + // This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! + var ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + + // produce a unique production symbol. + // Use this to produce rule productions from transformed EBNF which are + // guaranteed not to collide with previously generated / already existing + // rules (~ symbols). + function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; + } + + function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; + } + + function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; + } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } + + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [[list.fragment, '$$ = [' + generatePushAction(list, 1) + '];'], [name + ' ' + list.fragment, '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;']]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [['', '$$ = [];'], [name + ' ' + list.fragment, '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;']]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [['', '$$ = undefined;'], [list.fragment, '$$ = ' + generatePushAction(list, 1) + ';']]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { + if (!name) { + name = generateUniqueSymbol(opts.production, '_group', opts); + } + if (devDebug > 2) console.log('group EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [list.fragment, '$$ = ' + generatePushAction(list, 1) + ';']; + }); + } + } + + return has_transformed; + } + + function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); + } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; + } + return tot; + }, []); + + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index + }; + } + + function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar + }; + } + + function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; + + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser$2.parse(handle); + + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp('\\[' + ID_REGEX_BASE + '\\]'); + var term_re = new XRegExp('^' + ID_REGEX_BASE + '$'); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp('(?:[$@]|##)' + ID_REGEX_BASE, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + 'which is not available in production "' + handle + '"; ' + 'it probably got removed by the EBNF rule rewrite process.\n' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */var n_suffixes = ['st', 'nd', 'rd', 'th']; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + 'which is not available in production "' + handle + '"; ' + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + 'only the outer-most EBNF group alias will remain available at all times ' + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); + } + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); + } + + var ref_list; + var ref_names; + + // create a deep copy of the input, so we will keep the input constant. + function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || (typeof from === 'undefined' ? 'undefined' : _typeof(from)) !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; + } + + function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; + } + + function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; + } + + // hack: + var assert; + + /* parser generated by jison 0.6.1-205 */ + + /* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + // See also: + // http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + // but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + // with userland code which might access the derived class in a 'classic' way. + function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); + } else { + JisonParserError.prototype = Object.create(Error.prototype); + } + JisonParserError.prototype.constructor = JisonParserError; + JisonParserError.prototype.name = 'JisonParserError'; + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([p[i], r[i]]); + } + return rv; + } + + // helper: reconstruct the defaultActions[] table + function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; + } + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [m.shift(), g.shift()]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [3]; + } + } + rv.push(q); + } + return rv; + } + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + + trace: function no_op_trace() {}, + JisonParserError: JisonParserError, + yy: {}, + options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 + }, + symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 + }, + terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" + }, + TERROR: 2, + EOF: 1, + + // internals: defined here so the object *structure* doesn't get modified by parse() et al, + // thus helping JIT compilers like Chrome V8. + originalQuoteName: null, + originalParseError: null, + cleanupAfterParse: null, + constructParseErrorInfo: null, + yyMergeLocationInfo: null, + + __reentrant_call_depth: 0, // INTERNAL USE ONLY + __error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + __error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + + // APIs which will be set up depending on user action code analysis: + //yyRecovering: 0, + //yyErrOk: 0, + //yyClearIn: 0, + + // Helper APIs + // ----------- + + // Helper function which can be overridden by user code later on: put suitable quotes around + // literal IDs in a description string. + quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; + }, + + // Return the name of the given symbol (terminal or non-terminal) as a string, when available. + // + // Return NULL when the symbol is unknown to the parser. + getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; + }, + + // Return a more-or-less human-readable description of the given symbol, when available, + // or the symbol itself, serving as its own 'description' for lack of something better to serve up. + // + // Return NULL when the symbol is unknown to the parser. + describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; + }, + + // Produce a (more or less) human-readable list of expected tokens at the point of failure. + // + // The produced list may contain token or token set descriptions instead of the tokens + // themselves to help turning this output into something that easier to read by humans + // unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, + // expected terminals and nonterminals is produced. + // + // The returned list (array) will not contain any duplicate entries. + collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; + }, + productions_: bp({ + pop: u([s, [47, 3], 48, 48, s, [49, 3], s, [50, 3], s, [51, 20], s, [52, 3], 53, 53, 54, 54, s, [55, 3], 56, 56, s, [57, 6], 58, 58, 59, 59, 60, 60, s, [61, 3], 62, 62, 63, 63, s, [64, 3], 65, s, [65, 4, 1], 68, 69, 70, 70, s, [71, 3], 72, 72, 73, 73, s, [74, 4], s, [75, 3], 76, 76, 77, 77, 78, 78, s, [79, 5], s, [80, 4], s, [81, 3], 82, 82, 83, s, [84, 4], s, [85, 3], s, [86, 5], 87, 87, 88, 88, 89, 89, s, [90, 3], 91, 91]), + rule: u([5, 5, 3, 0, 2, 0, s, [2, 3], c, [4, 3], 1, 1, c, [3, 3], s, [1, 6], s, [3, 5], s, [2, 3], c, [15, 9], c, [11, 4], c, [20, 7], s, [2, 4], s, [1, 3], 2, 1, 2, 2, c, [15, 3], 0, c, [11, 7], c, [36, 4], 3, 3, 1, 0, 3, c, [39, 4], c, [80, 4], c, [9, 3], c, [39, 4], 3, 3, c, [34, 5], c, [40, 5], c, [32, 3], s, [1, 3], 0, 0, 1, 5, 4, 4, c, [53, 3], c, [85, 4], c, [35, 3], 0]) + }), + performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + switch (yystate) { + case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + + case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + + case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject2, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 4: + /*! Production:: optional_end_block : %epsilon */ + case 100: + /*! Production:: suffix : %epsilon */ + case 116: + /*! Production:: action : %epsilon */ + case 117: + /*! Production:: action_body : %epsilon */ + case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + + case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject3, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = yyvstack[yysp]; + break; + + case 6: + /*! Production:: optional_action_header_block : %epsilon */ + case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + + case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ + case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject4, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + + case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1];yy.addDeclaration(this.$, yyvstack[yysp]); + break; + + case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject5, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { start: yyvstack[yysp] }; + break; + + case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { lex: { text: yyvstack[yysp], position: yylstack[yysp] } }; + break; + + case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { operator: yyvstack[yysp] }; + break; + + case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { token_list: yyvstack[yysp] }; + break; + + case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject6, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = { include: yyvstack[yysp] }; + break; + + case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject4, rv, yylexer.prettyPrintRange(yylstack[yysp]))); + } + this.$ = { include: yyvstack[yysp] }; + break; + + case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { parseParams: yyvstack[yysp] }; + break; + + case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { parserType: yyvstack[yysp] }; + break; + + case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { options: yyvstack[yysp] }; + break; + + case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { options: [['debug', true]] }; + break; + + case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = { options: [['ebnf', true]] }; + break; + + case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { unknownDecl: yyvstack[yysp] }; + break; + + case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { imports: { name: yyvstack[yysp - 1], path: yyvstack[yysp] } }; + break; + + case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject7, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject8, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject9, $init_code_name, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + + case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject10, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp]))); + break; + + case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject11, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject12, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject13, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 32: + /*! Production:: init_code_name : ID */ + case 33: + /*! Production:: init_code_name : NAME */ + case 34: + /*! Production:: init_code_name : STRING */ + case 35: + /*! Production:: import_name : ID */ + case 36: + /*! Production:: import_name : STRING */ + case 37: + /*! Production:: import_path : ID */ + case 38: + /*! Production:: import_path : STRING */ + case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ + case 68: + /*! Production:: token_value : INTEGER */ + case 69: + /*! Production:: token_description : STRING */ + case 80: + /*! Production:: optional_production_description : STRING */ + case 95: + /*! Production:: expression : ID */ + case 101: + /*! Production:: suffix : "*" */ + case 102: + /*! Production:: suffix : "?" */ + case 103: + /*! Production:: suffix : "+" */ + case 107: + /*! Production:: symbol : id */ + case 108: + /*! Production:: symbol : STRING */ + case 109: + /*! Production:: id : ID */ + case 112: + /*! Production:: action_ne : ACTION */ + case 113: + /*! Production:: action_ne : include_macro_code */ + case 114: + /*! Production:: action : action_ne */ + case 118: + /*! Production:: action_body : action_comments_body */ + case 122: + /*! Production:: action_comments_body : ACTION_BODY */ + case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ + case 128: + /*! Production:: module_code_chunk : CODE */ + case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + + case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ + case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + + case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject14, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp]))); + break; + + case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject15, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 42: + /*! Production:: option_list : option_list option */ + case 59: + /*! Production:: token_list : token_list symbol */ + case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1];this.$.push(yyvstack[yysp]); + break; + + case 43: + /*! Production:: option_list : option */ + case 60: + /*! Production:: token_list : symbol */ + case 71: + /*! Production:: id_list : id */ + case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + + case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + + case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + + case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ + case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + + case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject16, yyvstack[yysp - 2], yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject17, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ + case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + + case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject18, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject19, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]];this.$.push.apply(this.$, yyvstack[yysp]); + break; + + case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject20, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + + case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + + case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + + case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = { id: id }; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + + case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + + case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + + case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + + case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + + case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + + case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + + case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + + case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {};this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + + case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + + case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject21, yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2]))); + break; + + case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject22, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + + case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject23, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + + case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + + case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject24, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject25, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject26, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS(_templateObject27, yylexer.prettyPrintRange(yylstack[yysp - 2]))); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + + case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject28, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + + case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject29, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + + case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + + case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + + case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + + case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + + case 94: + /*! Production:: suffixed_expression : expression suffix */ + case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ + case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + + case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + + case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + + case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject30, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + + case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject31, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + break; + + case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + + case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject32, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + + case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject33, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2]))); + break; + + case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + + case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS(_templateObject34, $PATH, rv, yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1]))); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + + case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS(_templateObject35) + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + + case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS(_templateObject36) + yylexer.prettyPrintRange(yylstack[yysp])); + break; + + case 164: + // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + + } + }, + table: bt({ + len: u([20, 1, 25, 5, 19, 18, 3, 18, 18, 5, s, [18, 8], 4, 5, 6, 2, s, [6, 4, -1], 3, 3, 4, 8, 1, 18, 18, 26, c, [18, 3], 1, 4, 21, 3, 3, 5, 5, s, [3, 3], 22, 18, 20, 25, 25, 24, 24, 22, s, [18, 3], 3, 19, 2, 4, 1, 1, 7, 7, c, [40, 3], 17, 4, 20, 18, 23, s, [18, 6], 6, 21, 21, 18, 20, 18, 2, 18, 4, 2, s, [1, 3], s, [3, 4], 4, 3, 5, 3, 15, 11, 2, 2, 19, 20, 18, c, [104, 3], 4, 4, s, [2, 4], 7, 3, 4, 16, 1, 4, 10, 14, c, [122, 3], 18, 18, 9, s, [3, 4], 14, 14, 18, 21, 21, 6, 4, c, [50, 5], 7, 7, s, [15, 4], 3, 9, 3, 14, 18, 18, 8, 5, 3, 9, 4]), + symbol: u([2, s, [14, 10, 1], 27, s, [31, 5, 1], 44, 47, 50, 1, c, [21, 18], 51, 55, s, [58, 4, 1], 89, 15, 24, 44, 49, 69, c, [31, 19], c, [18, 19], 24, 83, c, [39, 38], 36, 63, 65, c, [41, 37], c, [18, 108], 24, 26, 53, 2, 24, 25, 26, 52, c, [9, 3], 62, 82, 83, 2, 45, c, [8, 7], 24, 26, c, [5, 3], 25, 56, 57, c, [9, 3], c, [3, 6], c, [266, 3], 48, c, [275, 3], 70, 71, 72, 83, 89, c, [278, 38], 4, 5, 6, 12, s, [14, 11, 1], 26, c, [24, 6], 37, 42, c, [152, 37], 24, 64, 68, 83, 24, c, [119, 3], 54, c, [27, 11], c, [67, 8], 44, 54, c, [147, 6], 12, 15, 44, 84, 89, c, [5, 8], c, [3, 6], c, [46, 20], c, [201, 3], c, [113, 28], c, [40, 9], c, [177, 23], c, [176, 3], c, [25, 24], 1, c, [26, 4], c, [25, 11], c, [73, 7], 46, c, [24, 24], c, [158, 51], c, [18, 25], 25, 28, 57, c, [21, 12], 28, c, [22, 8], 2, 3, 25, 28, s, [1, 3], 2, 44, 46, 88, 90, 91, c, [425, 3], 24, c, [433, 3], c, [440, 3], c, [3, 3], c, [13, 4], c, [153, 4], 7, 12, 15, 24, 26, 38, 40, 41, 42, 44, 74, 75, 76, 2, 5, 26, 73, c, [151, 12], c, [94, 7], c, [307, 38], 37, 44, 66, 67, c, [685, 109], 12, 13, 43, 86, 87, c, [349, 14], c, [445, 11], c, [84, 46], c, [504, 10], c, [348, 19], c, [58, 19], 25, 29, 30, c, [346, 5], 1, 44, 89, 1, c, [483, 3], c, [3, 6], c, [339, 3], c, [121, 3], c, [496, 3], c, [8, 5], c, [349, 8], c, [348, 4], 78, 79, 81, c, [568, 5], 15, 42, 44, 84, 85, 89, 2, 5, 2, 5, c, [359, 19], c, [19, 11], c, [142, 8], c, [337, 30], c, [180, 26], c, [284, 3], c, [287, 4], c, [4, 4], 25, 28, 25, 28, c, [4, 4], c, [517, 8], c, [168, 6], c, [507, 14], c, [506, 3], c, [189, 7], c, [162, 8], s, [4, 5, 1], c, [190, 8], c, [1024, 6], s, [4, 9, 1], c, [22, 3], s, [39, 4, 1], 44, 80, c, [19, 18], c, [18, 37], c, [16, 3], c, [88, 3], 76, 77, c, [292, 6], c, [3, 6], c, [144, 14], c, [14, 15], c, [480, 39], c, [21, 21], c, [549, 6], c, [6, 3], 1, c, [111, 12], c, [234, 7], c, [7, 7], c, [238, 10], c, [179, 11], c, [15, 40], 6, 8, c, [209, 7], 78, 79, c, [374, 4], c, [313, 14], c, [271, 43], c, [164, 4], c, [169, 4], c, [78, 12], 43]), + type: u([s, [2, 18], 0, 0, 1, c, [21, 20], s, [0, 5], c, [10, 5], s, [2, 39], c, [40, 41], c, [41, 40], s, [2, 108], c, [148, 5], c, [239, 6], c, [159, 6], c, [253, 10], c, [176, 14], c, [36, 7], c, [197, 102], c, [103, 7], c, [108, 21], c, [21, 10], c, [423, 36], c, [373, 149], c, [158, 67], c, [57, 32], c, [322, 8], c, [98, 26], c, [489, 7], c, [721, 173], c, [462, 131], c, [130, 37], c, [375, 11], c, [818, 45], c, [223, 79], c, [124, 24], c, [986, 15], c, [38, 19], c, [57, 20], c, [157, 62], c, [443, 106], c, [106, 103], c, [103, 62], c, [1248, 16], c, [78, 6]]), + state: u([1, 2, 5, 14, 12, 13, 8, 20, 11, 29, 28, 31, 34, 36, 38, 42, 47, 49, 50, 54, 49, 50, 56, 50, 58, 60, 62, 65, 68, 69, 70, 67, 72, 71, 73, 74, 78, 79, 82, 83, 82, 84, 50, 84, 50, 86, 92, 94, 93, 97, 69, 70, 98, 100, 101, 103, 105, 106, 107, 110, 111, 117, 124, 126, 123, 133, 131, 82, 137, 142, 94, 93, 143, 101, 133, 146, 82, 147, 50, 149, 154, 153, 155, 111, 124, 126, 162, 163, 124, 126]), + mode: u([s, [2, 18], s, [1, 18], c, [21, 4], s, [2, 36], c, [42, 5], c, [38, 34], c, [77, 38], s, [2, 108], s, [1, 20], c, [30, 15], c, [134, 100], c, [106, 4], c, [335, 26], c, [151, 16], c, [376, 48], c, [347, 120], c, [63, 75], c, [13, 9], c, [23, 4], c, [4, 3], c, [587, 6], c, [427, 12], c, [9, 15], c, [335, 13], c, [389, 39], c, [45, 43], c, [509, 77], c, [762, 121], c, [129, 9], c, [756, 14], c, [334, 14], c, [41, 6], c, [367, 5], c, [784, 37], c, [208, 63], c, [1142, 20], c, [1081, 10], c, [487, 14], c, [22, 9], c, [151, 17], c, [221, 10], c, [803, 156], c, [318, 61], c, [216, 50], c, [457, 7], c, [455, 38], c, [123, 34], c, [1206, 8], 1]), + goto: u([s, [10, 18], 4, 3, 10, 6, 7, 9, s, [15, 5, 1], 24, 22, 23, 25, 26, 27, 21, s, [6, 3], 30, s, [11, 18], s, [9, 18], 32, 33, s, [13, 18], s, [14, 18], 35, 66, 37, s, [16, 18], s, [17, 18], s, [18, 18], s, [19, 18], s, [20, 18], s, [21, 18], s, [22, 18], s, [23, 18], 39, 40, 41, s, [43, 4, 1], 48, 33, 51, 53, 52, 55, 33, 51, 57, 33, 51, 59, 61, s, [56, 3], s, [57, 3], s, [58, 3], 4, 63, 64, 66, 33, 21, 3, s, [12, 18], s, [29, 18], s, [109, 26], s, [15, 18], s, [30, 18], 33, 67, 75, 76, 77, s, [31, 11], c, [13, 9], s, [35, 3], s, [36, 3], 80, 81, 21, c, [3, 3], s, [32, 3], s, [33, 3], s, [34, 3], s, [54, 11], 33, 51, s, [54, 7], s, [55, 18], s, [60, 20], s, [107, 25], s, [108, 25], s, [126, 24], s, [127, 24], s, [50, 11], 33, 51, s, [50, 7], s, [51, 18], s, [52, 18], s, [53, 18], 61, 85, s, [41, 12], 87, s, [41, 6], 43, 43, 89, 88, 44, 44, 90, 91, 132, 96, 132, 95, s, [72, 3], 33, s, [7, 3], s, [8, 3], s, [74, 4], 99, s, [90, 8], 102, s, [90, 4], 81, 81, 104, s, [61, 11], 33, s, [61, 7], s, [62, 18], s, [71, 12], 109, s, [71, 6], 108, 71, s, [24, 18], s, [25, 18], s, [37, 18], s, [38, 18], s, [26, 18], s, [27, 18], s, [117, 3], s, [112, 22], s, [113, 21], s, [28, 18], s, [59, 20], s, [39, 18], 42, 42, s, [40, 18], 116, 115, 113, 114, 49, 49, 1, 2, 5, 124, 21, 131, 131, 118, s, [128, 3], s, [130, 3], s, [73, 4], 119, 121, 120, 77, 77, 122, 77, 77, s, [83, 3], s, [106, 3], 130, 106, 106, 127, 129, 128, 125, 106, 106, 132, s, [116, 3], 80, 81, 134, 21, 136, 135, 80, 80, s, [70, 19], s, [65, 11], 109, s, [65, 7], s, [64, 18], s, [68, 19], s, [69, 18], 139, 140, 138, s, [118, 3], 141, s, [122, 4], 45, 45, 46, 46, 47, 47, 48, 48, c, [494, 4], s, [129, 3], s, [75, 4], 144, c, [487, 13], 145, s, [76, 4], c, [153, 7], s, [89, 14], 148, 33, 51, s, [100, 6], 150, 151, 152, s, [100, 9], s, [95, 18], s, [96, 18], s, [97, 18], s, [90, 7], s, [87, 3], s, [88, 3], s, [114, 3], s, [115, 3], s, [78, 14], s, [79, 14], s, [63, 18], s, [110, 21], s, [111, 21], c, [526, 4], s, [123, 4], 125, s, [82, 3], s, [84, 3], s, [85, 3], s, [86, 3], s, [104, 7], s, [105, 7], s, [94, 10], 156, s, [94, 4], s, [101, 15], s, [102, 15], s, [103, 15], 158, 159, 157, 92, 92, 130, 92, c, [465, 3], 161, 140, 160, s, [93, 14], s, [98, 18], s, [99, 18], s, [90, 7], s, [120, 3], 112, s, [121, 3], 91, 91, 130, 91, c, [74, 3], s, [119, 3], 141]) + }), + defaultActions: bda({ + idx: u([0, 3, 5, 7, 8, s, [10, 8, 1], 25, 26, 27, s, [30, 6, 1], 37, 40, 41, 44, 45, 46, s, [48, 6, 1], 55, 56, 57, 60, 66, 67, 68, 72, s, [74, 6, 1], s, [81, 7, 1], s, [89, 4, 1], 95, 96, 97, 100, 104, 105, 107, 108, 109, s, [112, 5, 1], 118, 119, 122, 124, s, [127, 13, 1], s, [141, 8, 1], 150, 151, 152, s, [156, 4, 1], 161]), + goto: u([10, 6, 9, 13, 14, s, [16, 8, 1], 56, 57, 58, 3, 12, 29, 109, 15, 30, 67, 35, 36, 32, 33, 34, 55, 60, 107, 108, 126, 127, 51, 52, 53, 43, 7, 8, 74, 62, 24, 25, 37, 38, 26, 27, 112, 113, 28, 59, 39, 42, 40, 49, 1, 2, 5, 128, 130, 73, 83, 80, 70, 64, 68, 69, 122, s, [45, 4, 1], 129, 75, 76, 89, 95, 96, 97, 90, 87, 88, 114, 115, 78, 79, 63, 110, 111, 123, 125, 82, 84, 85, 86, 104, 105, 101, 102, 103, 93, 98, 99, 90, 121]) + }), + parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } + }, + parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = this.options.errorRecoveryTokenDiscardCount | 0 || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if ((typeof src === 'undefined' ? 'undefined' : _typeof(src)) === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + var error_rule_depth = this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1; + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, error_rule_depth >= 0); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = this.describeSymbol(symbol) || symbol; + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, error_rule_depth >= 0); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = symbol === TERROR ? 0 : symbol; // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + var EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = table[newState] && table[newState][symbol] || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = table[state] && table[state][symbol] || NO_ACTION; + newState = t[1]; + action = t[0]; + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { + // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + // read action for current state and first input + t = table[newState] && table[newState][symbol] || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + } + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { + // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + // read action for current state and first input + t = table[newState] && table[newState][symbol] || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; + }, + yyError: 1 + }; + parser.originalParseError = parser.parseError; + parser.originalQuoteName = parser.quoteName; + + var ebnf = false; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + var checkActionBlock = helpers.checkActionBlock; + + // transform ebnf to bnf if necessary + function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; + } + + // convert string value to number or boolean value, when possible + // (and when this is more or less obviously the intent) + // otherwise produce the string itself as value. + function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; + } + + parser.warn = function p_warn() { + console.warn.apply(console, arguments); + }; + + parser.log = function p_log() { + console.log.apply(console, arguments); + }; + /* lexer generated by jison-lex 0.6.1-205 */ + + /* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + var lexer = function () { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // backtracking: .................... false + // location.ranges: ................. true + // location line+column tracking: ... true + // + // + // Forwarded Parser Analysis flags: + // + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses lexer values: ............... true / true + // location tracking: ............... true + // location assignment: ............. true + // + // + // Lexer Analysis flags: + // + // uses yyleng: ..................... ??? + // uses yylineno: ................... ??? + // uses yytext: ..................... ??? + // uses yylloc: ..................... ??? + // uses ParseError API: ............. ??? + // uses yyerror: .................... ??? + // uses location tracking & editing: ??? + // uses more() API: ................. ??? + // uses unput() API: ................ ??? + // uses reject() API: ............... ??? + // uses less() API: ................. ??? + // uses display APIs pastInput(), upcomingInput(), showPosition(): + // ............................. ??? + // uses describeYYLLOC() API: ....... ??? + // + // --------- END OF REPORT ----------- + + EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && (typeof key === 'undefined' ? 'undefined' : _typeof(key)) === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': ' + str, this.options.lexerErrorsAreRecoverable); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = this.yylloc ? this.yylloc.last_column : 0; + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', false); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) maxSize = past.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) maxSize = next.length + this._input.length;else if (!maxSize) maxSize = 20; + + if (maxLines < 0) maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var CONTEXT = 3; + var CONTEXT_TAIL = 1; + var MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, context_loc ? context_loc.first_line : loc.first_line - CONTEXT); + + var l1 = Math.max(1, context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max(2, (lno === loc.last_line ? loc.last_column : line.length) - loc.first_column + 1); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call(this, this.yy, indexed_rule, this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', false); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo('Lexical error' + lineno_msg + ': Unrecognized text.', this.options.lexerErrorsAreRecoverable); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState(yy.ebnf ? 'ebnf' : 'bnf'); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS(_templateObject37, dquote(yy_.yytext), dquote(this.topState())) + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS(_templateObject38) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS(_templateObject39) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = this.topState() === 'macro' ? 'macro\'s' : this.topState(); + + yy_.yyerror(rmCommonWS(_templateObject40, rules) + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS(_templateObject41, dquote(yy_.yytext), dquote(this.topState())) + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: *//^(?:\/\/[^\r\n]*)/, + /* 2: *//^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: *//^(?:[\/"'][^{}\/"']+)/, + /* 6: *//^(?:[^{}\/"']+)/, + /* 7: *//^(?:\{)/, + /* 8: *//^(?:\})/, + /* 9: *//^(?:(\r\n|\n|\r))/, + /* 10: *//^(?:%%)/, + /* 11: *//^(?:;)/, + /* 12: *//^(?:%%)/, + /* 13: *//^(?:%empty\b)/, + /* 14: *//^(?:%epsilon\b)/, + /* 15: *//^(?:\u0190)/, + /* 16: *//^(?:\u025B)/, + /* 17: *//^(?:\u03B5)/, + /* 18: *//^(?:\u03F5)/, + /* 19: *//^(?:\()/, + /* 20: *//^(?:\))/, + /* 21: *//^(?:\*)/, + /* 22: *//^(?:\?)/, + /* 23: *//^(?:\+)/, + /* 24: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', ''), + /* 25: *//^(?:=)/, + /* 26: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: *//^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: *//^(?:\/\/[^\r\n]*)/, + /* 30: */new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: *//^(?:\S+)/, + /* 32: *//^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: *//^(?:(\r\n|\n|\r))/, + /* 34: *//^(?:([^\S\n\r])+)/, + /* 35: *//^(?:([^\S\n\r])+)/, + /* 36: *//^(?:(\r\n|\n|\r)+)/, + /* 37: */new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', ''), + /* 40: *//^(?:\$end\b)/, + /* 41: *//^(?:\$eof\b)/, + /* 42: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: *//^(?:\S+)/, + /* 45: *//^(?::)/, + /* 46: *//^(?:;)/, + /* 47: *//^(?:\|)/, + /* 48: *//^(?:%%)/, + /* 49: *//^(?:%ebnf\b)/, + /* 50: *//^(?:%debug\b)/, + /* 51: *//^(?:%parser-type\b)/, + /* 52: *//^(?:%prec\b)/, + /* 53: *//^(?:%start\b)/, + /* 54: *//^(?:%left\b)/, + /* 55: *//^(?:%right\b)/, + /* 56: *//^(?:%nonassoc\b)/, + /* 57: *//^(?:%token\b)/, + /* 58: *//^(?:%parse-param\b)/, + /* 59: *//^(?:%options\b)/, + /* 60: */new XRegExp('^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', ''), + /* 61: *//^(?:%code\b)/, + /* 62: *//^(?:%import\b)/, + /* 63: *//^(?:%include\b)/, + /* 64: */new XRegExp('^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', ''), + /* 65: */new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: *//^(?:\{)/, + /* 69: *//^(?:->.*)/, + /* 70: *//^(?:→.*)/, + /* 71: *//^(?:=>.*)/, + /* 72: *//^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: *//^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: *//^(?:[^\r\n]*(\r|\n)+)/, + /* 75: *//^(?:[^\r\n]+)/, + /* 76: *//^(?:(\r\n|\n|\r))/, + /* 77: *//^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: *//^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: *//^(?:([^\S\n\r])+)/, + /* 80: *//^(?:\S+)/, + /* 81: *//^(?:")/, + /* 82: *//^(?:')/, + /* 83: *//^(?:`)/, + /* 84: *//^(?:")/, + /* 85: *//^(?:')/, + /* 86: *//^(?:`)/, + /* 87: *//^(?:")/, + /* 88: *//^(?:')/, + /* 89: *//^(?:`)/, + /* 90: *//^(?:.)/, + /* 91: *//^(?:$)/], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [9, 10, 11, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'bnf': { + rules: [12, 13, 14, 15, 16, 17, 18, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'ebnf': { + rules: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + }, + + 'INITIAL': { + rules: [29, 30, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 87, 88, 89, 90, 91], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function (s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; + }(); + parser.lexer = lexer; + + function Parser() { + this.yy = {}; + } + Parser.prototype = parser; + parser.Parser = Parser; + + function yyparse() { + return parser.parse.apply(parser, arguments); + } + + var bnf = { + parser: parser, + Parser: Parser, + parse: yyparse + + }; + + var version = '0.6.1-205'; // require('./package.json').version; + + function parse(grammar) { + return bnf.parser.parse(grammar); + } + + // adds a declaration to the grammar + bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { + if (decl.start) { + grammar.start = decl.start; + } else if (decl.lex) { + grammar.lex = parseLex(decl.lex.text, decl.lex.position); + } else if (decl.operator) { + if (!grammar.operators) grammar.operators = []; + grammar.operators.push(decl.operator); + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { + if (!grammar.parseParams) grammar.parseParams = []; + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; + } else if (decl.include) { + if (!grammar.moduleInclude) grammar.moduleInclude = ''; + grammar.moduleInclude += decl.include; + } else if (decl.options) { + if (!grammar.options) grammar.options = {}; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; + } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } + } + }; + + // parse an embedded lex section + function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += new Array(l).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + new Array(c - 3).join('.') + prelude; + } + return jisonlex.parse(prelude + text); + } + + var ebnf_parser = { + transform: transform + }; + + var ebnfParser = { + parse: parse, + + transform: transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser: ebnf_parser, + bnf_lexer: jisonlex, + + version: version + }; + + return ebnfParser; +}); diff --git a/dist/ebnf-parser-umd.js b/dist/ebnf-parser-umd.js new file mode 100644 index 0000000..bfbc284 --- /dev/null +++ b/dist/ebnf-parser-umd.js @@ -0,0 +1,11541 @@ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory(require('@gerhobbelt/xregexp'), require('jison-helpers-lib'), require('fs'), require('@gerhobbelt/lex-parser')) : + typeof define === 'function' && define.amd ? define(['@gerhobbelt/xregexp', 'jison-helpers-lib', 'fs', '@gerhobbelt/lex-parser'], factory) : + (global['ebnf-parser'] = factory(global.XRegExp,global.helpers,global.fs,global.jisonlex)); +}(this, (function (XRegExp,helpers,fs,jisonlex) { 'use strict'; + +XRegExp = XRegExp && XRegExp.hasOwnProperty('default') ? XRegExp['default'] : XRegExp; +helpers = helpers && helpers.hasOwnProperty('default') ? helpers['default'] : helpers; +fs = fs && fs.hasOwnProperty('default') ? fs['default'] : fs; +jisonlex = jisonlex && jisonlex.hasOwnProperty('default') ? jisonlex['default'] : jisonlex; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError$1(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError$1.prototype, Error.prototype); +} else { + JisonParserError$1.prototype = Object.create(Error.prototype); +} +JisonParserError$1.prototype.constructor = JisonParserError$1; +JisonParserError$1.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp$1(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + + + // helper: reconstruct the 'goto' table + function bt$1(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s$1(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c$1(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u$1(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser$1 = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError$1, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp$1({ + pop: u$1([ + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + s$1, + [17, 4] +]), + rule: u$1([ + 2, + 1, + 3, + 0, + 1, + 1, + 2, + 3, + c$1, + [8, 6], + 1 +]) +}), +performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + switch (yystate) { +case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + +case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + +case 2: + /*! Production:: handle_list : handle */ +case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + +case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + +case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + +case 5: + /*! Production:: handle : rule */ +case 13: + /*! Production:: suffix : "*" */ +case 14: + /*! Production:: suffix : "?" */ +case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + +case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + +case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + +case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + +case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + +case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + +} +}, +table: bt$1({ + len: u$1([ + 8, + 1, + 1, + 7, + 0, + 10, + 0, + 9, + 0, + 0, + 6, + s$1, + [0, 3], + 2, + s$1, + [0, 3], + 8, + 0 +]), + symbol: u$1([ + 1, + 4, + 10, + 11, + s$1, + [13, 4, 1], + s$1, + [1, 3], + 3, + 4, + 5, + 10, + c$1, + [9, 3], + s$1, + [3, 8, 1], + 17, + c$1, + [16, 4], + s$1, + [12, 5, 1], + c$1, + [19, 4], + 9, + 10, + 3, + 5, + c$1, + [17, 4], + c$1, + [16, 4] +]), + type: u$1([ + s$1, + [2, 3], + s$1, + [0, 5], + 1, + s$1, + [2, 6], + 0, + 0, + s$1, + [2, 9], + c$1, + [10, 5], + s$1, + [0, 5], + s$1, + [2, 12], + s$1, + [0, 4] +]), + state: u$1([ + s$1, + [1, 5, 1], + 9, + 5, + 10, + 14, + 15, + c$1, + [8, 3], + 19, + c$1, + [4, 3] +]), + mode: u$1([ + 2, + s$1, + [1, 3], + 2, + 2, + 1, + 2, + c$1, + [5, 3], + c$1, + [7, 3], + c$1, + [12, 4], + c$1, + [13, 9], + c$1, + [15, 3], + c$1, + [5, 4] +]), + goto: u$1([ + 4, + 7, + 6, + 8, + 5, + 5, + 7, + 5, + 6, + s$1, + [12, 4], + 11, + 12, + 13, + 12, + 12, + 4, + 7, + 4, + 6, + s$1, + [9, 4], + 16, + 9, + 18, + 17, + c$1, + [12, 4] +]) +}), +defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 +}, +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var symbol = 0; + + + + var EOF = this.EOF; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + + + + + + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + } + + return resultValue; + }; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + + + + + + + + + + + + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + + + + + + + + + + + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +} +}; +parser$1.originalParseError = parser$1.parseError; +parser$1.originalQuoteName = parser$1.quoteName; + + +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer$1 = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... false +// location assignment: ............. false +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: */ /^(?:\s+)/, + /* 1: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: */ /^(?:\$end\b)/, + /* 3: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: */ /^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: */ /^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: */ /^(?:\.)/, + /* 7: */ /^(?:\()/, + /* 8: */ /^(?:\))/, + /* 9: */ /^(?:\*)/, + /* 10: */ /^(?:\?)/, + /* 11: */ /^(?:\|)/, + /* 12: */ /^(?:\+)/, + /* 13: */ /^(?:$)/ + ], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; +}(); +parser$1.lexer = lexer$1; + +function Parser$1() { + this.yy = {}; +} +Parser$1.prototype = parser$1; +parser$1.Parser = Parser$1; + +function yyparse$1() { + return parser$1.parse.apply(parser$1, arguments); +} + + + +var parser$2 = { + parser: parser$1, + Parser: Parser$1, + parse: yyparse$1, + +}; + +//import assert from 'assert'; + +var devDebug = 0; + +// WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) +// +// This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! +const ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + +// produce a unique production symbol. +// Use this to produce rule productions from transformed EBNF which are +// guaranteed not to collide with previously generated / already existing +// rules (~ symbols). +function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; +} + +function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; +} + +function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; + } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } + + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + list.fragment, + '$$ = [' + generatePushAction(list, 1) + '];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + '', + '$$ = [];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [ + [ + '', + '$$ = undefined;' + ], + [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ] + ]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { + if (!name) { + name = generateUniqueSymbol(opts.production, '_group', opts); + } + if (devDebug > 2) console.log('group EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ]; + }); + } + } + + return has_transformed; +} + +function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); + } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; + } + return tot; + }, []); + + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index + }; +} + +function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar + }; +} + +function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; + + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser$2.parse(handle); + + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp(`\\[${ID_REGEX_BASE}\\]`); + var term_re = new XRegExp(`^${ID_REGEX_BASE}$`); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt, + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp(`(?:[$@]|##)${ID_REGEX_BASE}`, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + + 'which is not available in production "' + handle + '"; ' + + 'it probably got removed by the EBNF rule rewrite process.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */ var n_suffixes = [ 'st', 'nd', 'rd', 'th' ]; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + + 'which is not available in production "' + handle + '"; ' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); + } + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); +} + +var ref_list; +var ref_names; + +// create a deep copy of the input, so we will keep the input constant. +function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || typeof from !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; +} + +function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; +} + +function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; +} + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + // helper: reconstruct the defaultActions[] table + function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; + } + + + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp({ + pop: u([ + s, + [47, 3], + 48, + 48, + s, + [49, 3], + s, + [50, 3], + s, + [51, 20], + s, + [52, 3], + 53, + 53, + 54, + 54, + s, + [55, 3], + 56, + 56, + s, + [57, 6], + 58, + 58, + 59, + 59, + 60, + 60, + s, + [61, 3], + 62, + 62, + 63, + 63, + s, + [64, 3], + 65, + s, + [65, 4, 1], + 68, + 69, + 70, + 70, + s, + [71, 3], + 72, + 72, + 73, + 73, + s, + [74, 4], + s, + [75, 3], + 76, + 76, + 77, + 77, + 78, + 78, + s, + [79, 5], + s, + [80, 4], + s, + [81, 3], + 82, + 82, + 83, + s, + [84, 4], + s, + [85, 3], + s, + [86, 5], + 87, + 87, + 88, + 88, + 89, + 89, + s, + [90, 3], + 91, + 91 +]), + rule: u([ + 5, + 5, + 3, + 0, + 2, + 0, + s, + [2, 3], + c, + [4, 3], + 1, + 1, + c, + [3, 3], + s, + [1, 6], + s, + [3, 5], + s, + [2, 3], + c, + [15, 9], + c, + [11, 4], + c, + [20, 7], + s, + [2, 4], + s, + [1, 3], + 2, + 1, + 2, + 2, + c, + [15, 3], + 0, + c, + [11, 7], + c, + [36, 4], + 3, + 3, + 1, + 0, + 3, + c, + [39, 4], + c, + [80, 4], + c, + [9, 3], + c, + [39, 4], + 3, + 3, + c, + [34, 5], + c, + [40, 5], + c, + [32, 3], + s, + [1, 3], + 0, + 0, + 1, + 5, + 4, + 4, + c, + [53, 3], + c, + [85, 4], + c, + [35, 3], + 0 +]) +}), +performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + + + switch (yystate) { +case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + +case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate trailing code from the grammar rule set with a '%%' marker on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate the parse 'header section' (token definitions, options, lexer spec, etc.) from the grammar rule set with a '%%' on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 4: + /*! Production:: optional_end_block : %epsilon */ +case 100: + /*! Production:: suffix : %epsilon */ +case 116: + /*! Production:: action : %epsilon */ +case 117: + /*! Production:: action_body : %epsilon */ +case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + +case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + The extra parser module code section (a.k.a. 'epilogue') does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = yyvstack[yysp]; + break; + +case 6: + /*! Production:: optional_action_header_block : %epsilon */ +case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + +case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ +case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + +case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; yy.addDeclaration(this.$, yyvstack[yysp]); + break; + +case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + declaration list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {start: yyvstack[yysp]}; + break; + +case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {lex: {text: yyvstack[yysp], position: yylstack[yysp]}}; + break; + +case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {operator: yyvstack[yysp]}; + break; + +case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {token_list: yyvstack[yysp]}; + break; + +case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parseParams: yyvstack[yysp]}; + break; + +case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parserType: yyvstack[yysp]}; + break; + +case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: yyvstack[yysp]}; + break; + +case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: [['debug', true]]}; + break; + +case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = {options: [['ebnf', true]]}; + break; + +case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {unknownDecl: yyvstack[yysp]}; + break; + +case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {imports: {name: yyvstack[yysp - 1], path: yyvstack[yysp]}}; + break; + +case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + You did not specify a legal file path for the '%import' initialization code statement, which must have the format: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%import'-ed initialization code section must be qualified by a name, e.g. 'required' before the import path itself: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + %code "${$init_code_name}" initialization section action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + +case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%code' initialization code section must be qualified by a name, e.g. 'required' before the action code itself: + + %code qualifier_name {action code} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %start token error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %token definition list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %import name or source filename missing maybe? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 32: + /*! Production:: init_code_name : ID */ +case 33: + /*! Production:: init_code_name : NAME */ +case 34: + /*! Production:: init_code_name : STRING */ +case 35: + /*! Production:: import_name : ID */ +case 36: + /*! Production:: import_name : STRING */ +case 37: + /*! Production:: import_path : ID */ +case 38: + /*! Production:: import_path : STRING */ +case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ +case 68: + /*! Production:: token_value : INTEGER */ +case 69: + /*! Production:: token_description : STRING */ +case 80: + /*! Production:: optional_production_description : STRING */ +case 95: + /*! Production:: expression : ID */ +case 101: + /*! Production:: suffix : "*" */ +case 102: + /*! Production:: suffix : "?" */ +case 103: + /*! Production:: suffix : "+" */ +case 107: + /*! Production:: symbol : id */ +case 108: + /*! Production:: symbol : STRING */ +case 109: + /*! Production:: id : ID */ +case 112: + /*! Production:: action_ne : ACTION */ +case 113: + /*! Production:: action_ne : include_macro_code */ +case 114: + /*! Production:: action : action_ne */ +case 118: + /*! Production:: action_body : action_comments_body */ +case 122: + /*! Production:: action_comments_body : ACTION_BODY */ +case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ +case 128: + /*! Production:: module_code_chunk : CODE */ +case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ +case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + +case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options ill defined / error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options don't seem terminated? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 42: + /*! Production:: option_list : option_list option */ +case 59: + /*! Production:: token_list : token_list symbol */ +case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; this.$.push(yyvstack[yysp]); + break; + +case 43: + /*! Production:: option_list : option */ +case 60: + /*! Production:: token_list : symbol */ +case 71: + /*! Production:: id_list : id */ +case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + +case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + +case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ +case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + +case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value error for ${yyvstack[yysp - 2]}? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value assignment error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ +case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parse-params declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parser-type declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]]; this.$.push.apply(this.$, yyvstack[yysp]); + break; + +case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + operator token list error in an associativity statement? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + +case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + +case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + +case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + +case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + +case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + +case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + +case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + +case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + +case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + +case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + +case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error: did you terminate the rule production set with a semicolon? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + +case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule id should be followed by a colon, but that one seems missing? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + +case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule alternative production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + multiple alternative rule productions should be separated by a '|' pipe character, not a ':' colon! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [(yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '')]; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS` + You cannot specify a precedence override for an epsilon (a.k.a. empty) rule! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + epsilon production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %epsilon rule action declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + +case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + +case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + +case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + +case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + +case 94: + /*! Production:: suffixed_expression : expression suffix */ +case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ +case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + +case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + +case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + +case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a grammar rule sublist in '( ... )' brackets. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + +case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %prec precedence override declaration error? + + Erroneous precedence declaration: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + +case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a parser rule action block in curly braces: '{ ... }'. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + +case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly match curly braces '{ ... }' in a parser rule action block. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS` + included action code file "${$PATH}" does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + +case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + %include MUST be followed by a valid file path. + + Erroneous path: + ` + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + +case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + module code declaration error? + + Erroneous area: + ` + yylexer.prettyPrintRange(yylstack[yysp])); + break; + +case 164: // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + +} +}, +table: bt({ + len: u([ + 20, + 1, + 25, + 5, + 19, + 18, + 3, + 18, + 18, + 5, + s, + [18, 8], + 4, + 5, + 6, + 2, + s, + [6, 4, -1], + 3, + 3, + 4, + 8, + 1, + 18, + 18, + 26, + c, + [18, 3], + 1, + 4, + 21, + 3, + 3, + 5, + 5, + s, + [3, 3], + 22, + 18, + 20, + 25, + 25, + 24, + 24, + 22, + s, + [18, 3], + 3, + 19, + 2, + 4, + 1, + 1, + 7, + 7, + c, + [40, 3], + 17, + 4, + 20, + 18, + 23, + s, + [18, 6], + 6, + 21, + 21, + 18, + 20, + 18, + 2, + 18, + 4, + 2, + s, + [1, 3], + s, + [3, 4], + 4, + 3, + 5, + 3, + 15, + 11, + 2, + 2, + 19, + 20, + 18, + c, + [104, 3], + 4, + 4, + s, + [2, 4], + 7, + 3, + 4, + 16, + 1, + 4, + 10, + 14, + c, + [122, 3], + 18, + 18, + 9, + s, + [3, 4], + 14, + 14, + 18, + 21, + 21, + 6, + 4, + c, + [50, 5], + 7, + 7, + s, + [15, 4], + 3, + 9, + 3, + 14, + 18, + 18, + 8, + 5, + 3, + 9, + 4 +]), + symbol: u([ + 2, + s, + [14, 10, 1], + 27, + s, + [31, 5, 1], + 44, + 47, + 50, + 1, + c, + [21, 18], + 51, + 55, + s, + [58, 4, 1], + 89, + 15, + 24, + 44, + 49, + 69, + c, + [31, 19], + c, + [18, 19], + 24, + 83, + c, + [39, 38], + 36, + 63, + 65, + c, + [41, 37], + c, + [18, 108], + 24, + 26, + 53, + 2, + 24, + 25, + 26, + 52, + c, + [9, 3], + 62, + 82, + 83, + 2, + 45, + c, + [8, 7], + 24, + 26, + c, + [5, 3], + 25, + 56, + 57, + c, + [9, 3], + c, + [3, 6], + c, + [266, 3], + 48, + c, + [275, 3], + 70, + 71, + 72, + 83, + 89, + c, + [278, 38], + 4, + 5, + 6, + 12, + s, + [14, 11, 1], + 26, + c, + [24, 6], + 37, + 42, + c, + [152, 37], + 24, + 64, + 68, + 83, + 24, + c, + [119, 3], + 54, + c, + [27, 11], + c, + [67, 8], + 44, + 54, + c, + [147, 6], + 12, + 15, + 44, + 84, + 89, + c, + [5, 8], + c, + [3, 6], + c, + [46, 20], + c, + [201, 3], + c, + [113, 28], + c, + [40, 9], + c, + [177, 23], + c, + [176, 3], + c, + [25, 24], + 1, + c, + [26, 4], + c, + [25, 11], + c, + [73, 7], + 46, + c, + [24, 24], + c, + [158, 51], + c, + [18, 25], + 25, + 28, + 57, + c, + [21, 12], + 28, + c, + [22, 8], + 2, + 3, + 25, + 28, + s, + [1, 3], + 2, + 44, + 46, + 88, + 90, + 91, + c, + [425, 3], + 24, + c, + [433, 3], + c, + [440, 3], + c, + [3, 3], + c, + [13, 4], + c, + [153, 4], + 7, + 12, + 15, + 24, + 26, + 38, + 40, + 41, + 42, + 44, + 74, + 75, + 76, + 2, + 5, + 26, + 73, + c, + [151, 12], + c, + [94, 7], + c, + [307, 38], + 37, + 44, + 66, + 67, + c, + [685, 109], + 12, + 13, + 43, + 86, + 87, + c, + [349, 14], + c, + [445, 11], + c, + [84, 46], + c, + [504, 10], + c, + [348, 19], + c, + [58, 19], + 25, + 29, + 30, + c, + [346, 5], + 1, + 44, + 89, + 1, + c, + [483, 3], + c, + [3, 6], + c, + [339, 3], + c, + [121, 3], + c, + [496, 3], + c, + [8, 5], + c, + [349, 8], + c, + [348, 4], + 78, + 79, + 81, + c, + [568, 5], + 15, + 42, + 44, + 84, + 85, + 89, + 2, + 5, + 2, + 5, + c, + [359, 19], + c, + [19, 11], + c, + [142, 8], + c, + [337, 30], + c, + [180, 26], + c, + [284, 3], + c, + [287, 4], + c, + [4, 4], + 25, + 28, + 25, + 28, + c, + [4, 4], + c, + [517, 8], + c, + [168, 6], + c, + [507, 14], + c, + [506, 3], + c, + [189, 7], + c, + [162, 8], + s, + [4, 5, 1], + c, + [190, 8], + c, + [1024, 6], + s, + [4, 9, 1], + c, + [22, 3], + s, + [39, 4, 1], + 44, + 80, + c, + [19, 18], + c, + [18, 37], + c, + [16, 3], + c, + [88, 3], + 76, + 77, + c, + [292, 6], + c, + [3, 6], + c, + [144, 14], + c, + [14, 15], + c, + [480, 39], + c, + [21, 21], + c, + [549, 6], + c, + [6, 3], + 1, + c, + [111, 12], + c, + [234, 7], + c, + [7, 7], + c, + [238, 10], + c, + [179, 11], + c, + [15, 40], + 6, + 8, + c, + [209, 7], + 78, + 79, + c, + [374, 4], + c, + [313, 14], + c, + [271, 43], + c, + [164, 4], + c, + [169, 4], + c, + [78, 12], + 43 +]), + type: u([ + s, + [2, 18], + 0, + 0, + 1, + c, + [21, 20], + s, + [0, 5], + c, + [10, 5], + s, + [2, 39], + c, + [40, 41], + c, + [41, 40], + s, + [2, 108], + c, + [148, 5], + c, + [239, 6], + c, + [159, 6], + c, + [253, 10], + c, + [176, 14], + c, + [36, 7], + c, + [197, 102], + c, + [103, 7], + c, + [108, 21], + c, + [21, 10], + c, + [423, 36], + c, + [373, 149], + c, + [158, 67], + c, + [57, 32], + c, + [322, 8], + c, + [98, 26], + c, + [489, 7], + c, + [721, 173], + c, + [462, 131], + c, + [130, 37], + c, + [375, 11], + c, + [818, 45], + c, + [223, 79], + c, + [124, 24], + c, + [986, 15], + c, + [38, 19], + c, + [57, 20], + c, + [157, 62], + c, + [443, 106], + c, + [106, 103], + c, + [103, 62], + c, + [1248, 16], + c, + [78, 6] +]), + state: u([ + 1, + 2, + 5, + 14, + 12, + 13, + 8, + 20, + 11, + 29, + 28, + 31, + 34, + 36, + 38, + 42, + 47, + 49, + 50, + 54, + 49, + 50, + 56, + 50, + 58, + 60, + 62, + 65, + 68, + 69, + 70, + 67, + 72, + 71, + 73, + 74, + 78, + 79, + 82, + 83, + 82, + 84, + 50, + 84, + 50, + 86, + 92, + 94, + 93, + 97, + 69, + 70, + 98, + 100, + 101, + 103, + 105, + 106, + 107, + 110, + 111, + 117, + 124, + 126, + 123, + 133, + 131, + 82, + 137, + 142, + 94, + 93, + 143, + 101, + 133, + 146, + 82, + 147, + 50, + 149, + 154, + 153, + 155, + 111, + 124, + 126, + 162, + 163, + 124, + 126 +]), + mode: u([ + s, + [2, 18], + s, + [1, 18], + c, + [21, 4], + s, + [2, 36], + c, + [42, 5], + c, + [38, 34], + c, + [77, 38], + s, + [2, 108], + s, + [1, 20], + c, + [30, 15], + c, + [134, 100], + c, + [106, 4], + c, + [335, 26], + c, + [151, 16], + c, + [376, 48], + c, + [347, 120], + c, + [63, 75], + c, + [13, 9], + c, + [23, 4], + c, + [4, 3], + c, + [587, 6], + c, + [427, 12], + c, + [9, 15], + c, + [335, 13], + c, + [389, 39], + c, + [45, 43], + c, + [509, 77], + c, + [762, 121], + c, + [129, 9], + c, + [756, 14], + c, + [334, 14], + c, + [41, 6], + c, + [367, 5], + c, + [784, 37], + c, + [208, 63], + c, + [1142, 20], + c, + [1081, 10], + c, + [487, 14], + c, + [22, 9], + c, + [151, 17], + c, + [221, 10], + c, + [803, 156], + c, + [318, 61], + c, + [216, 50], + c, + [457, 7], + c, + [455, 38], + c, + [123, 34], + c, + [1206, 8], + 1 +]), + goto: u([ + s, + [10, 18], + 4, + 3, + 10, + 6, + 7, + 9, + s, + [15, 5, 1], + 24, + 22, + 23, + 25, + 26, + 27, + 21, + s, + [6, 3], + 30, + s, + [11, 18], + s, + [9, 18], + 32, + 33, + s, + [13, 18], + s, + [14, 18], + 35, + 66, + 37, + s, + [16, 18], + s, + [17, 18], + s, + [18, 18], + s, + [19, 18], + s, + [20, 18], + s, + [21, 18], + s, + [22, 18], + s, + [23, 18], + 39, + 40, + 41, + s, + [43, 4, 1], + 48, + 33, + 51, + 53, + 52, + 55, + 33, + 51, + 57, + 33, + 51, + 59, + 61, + s, + [56, 3], + s, + [57, 3], + s, + [58, 3], + 4, + 63, + 64, + 66, + 33, + 21, + 3, + s, + [12, 18], + s, + [29, 18], + s, + [109, 26], + s, + [15, 18], + s, + [30, 18], + 33, + 67, + 75, + 76, + 77, + s, + [31, 11], + c, + [13, 9], + s, + [35, 3], + s, + [36, 3], + 80, + 81, + 21, + c, + [3, 3], + s, + [32, 3], + s, + [33, 3], + s, + [34, 3], + s, + [54, 11], + 33, + 51, + s, + [54, 7], + s, + [55, 18], + s, + [60, 20], + s, + [107, 25], + s, + [108, 25], + s, + [126, 24], + s, + [127, 24], + s, + [50, 11], + 33, + 51, + s, + [50, 7], + s, + [51, 18], + s, + [52, 18], + s, + [53, 18], + 61, + 85, + s, + [41, 12], + 87, + s, + [41, 6], + 43, + 43, + 89, + 88, + 44, + 44, + 90, + 91, + 132, + 96, + 132, + 95, + s, + [72, 3], + 33, + s, + [7, 3], + s, + [8, 3], + s, + [74, 4], + 99, + s, + [90, 8], + 102, + s, + [90, 4], + 81, + 81, + 104, + s, + [61, 11], + 33, + s, + [61, 7], + s, + [62, 18], + s, + [71, 12], + 109, + s, + [71, 6], + 108, + 71, + s, + [24, 18], + s, + [25, 18], + s, + [37, 18], + s, + [38, 18], + s, + [26, 18], + s, + [27, 18], + s, + [117, 3], + s, + [112, 22], + s, + [113, 21], + s, + [28, 18], + s, + [59, 20], + s, + [39, 18], + 42, + 42, + s, + [40, 18], + 116, + 115, + 113, + 114, + 49, + 49, + 1, + 2, + 5, + 124, + 21, + 131, + 131, + 118, + s, + [128, 3], + s, + [130, 3], + s, + [73, 4], + 119, + 121, + 120, + 77, + 77, + 122, + 77, + 77, + s, + [83, 3], + s, + [106, 3], + 130, + 106, + 106, + 127, + 129, + 128, + 125, + 106, + 106, + 132, + s, + [116, 3], + 80, + 81, + 134, + 21, + 136, + 135, + 80, + 80, + s, + [70, 19], + s, + [65, 11], + 109, + s, + [65, 7], + s, + [64, 18], + s, + [68, 19], + s, + [69, 18], + 139, + 140, + 138, + s, + [118, 3], + 141, + s, + [122, 4], + 45, + 45, + 46, + 46, + 47, + 47, + 48, + 48, + c, + [494, 4], + s, + [129, 3], + s, + [75, 4], + 144, + c, + [487, 13], + 145, + s, + [76, 4], + c, + [153, 7], + s, + [89, 14], + 148, + 33, + 51, + s, + [100, 6], + 150, + 151, + 152, + s, + [100, 9], + s, + [95, 18], + s, + [96, 18], + s, + [97, 18], + s, + [90, 7], + s, + [87, 3], + s, + [88, 3], + s, + [114, 3], + s, + [115, 3], + s, + [78, 14], + s, + [79, 14], + s, + [63, 18], + s, + [110, 21], + s, + [111, 21], + c, + [526, 4], + s, + [123, 4], + 125, + s, + [82, 3], + s, + [84, 3], + s, + [85, 3], + s, + [86, 3], + s, + [104, 7], + s, + [105, 7], + s, + [94, 10], + 156, + s, + [94, 4], + s, + [101, 15], + s, + [102, 15], + s, + [103, 15], + 158, + 159, + 157, + 92, + 92, + 130, + 92, + c, + [465, 3], + 161, + 140, + 160, + s, + [93, 14], + s, + [98, 18], + s, + [99, 18], + s, + [90, 7], + s, + [120, 3], + 112, + s, + [121, 3], + 91, + 91, + 130, + 91, + c, + [74, 3], + s, + [119, 3], + 141 +]) +}), +defaultActions: bda({ + idx: u([ + 0, + 3, + 5, + 7, + 8, + s, + [10, 8, 1], + 25, + 26, + 27, + s, + [30, 6, 1], + 37, + 40, + 41, + 44, + 45, + 46, + s, + [48, 6, 1], + 55, + 56, + 57, + 60, + 66, + 67, + 68, + 72, + s, + [74, 6, 1], + s, + [81, 7, 1], + s, + [89, 4, 1], + 95, + 96, + 97, + 100, + 104, + 105, + 107, + 108, + 109, + s, + [112, 5, 1], + 118, + 119, + 122, + 124, + s, + [127, 13, 1], + s, + [141, 8, 1], + 150, + 151, + 152, + s, + [156, 4, 1], + 161 +]), + goto: u([ + 10, + 6, + 9, + 13, + 14, + s, + [16, 8, 1], + 56, + 57, + 58, + 3, + 12, + 29, + 109, + 15, + 30, + 67, + 35, + 36, + 32, + 33, + 34, + 55, + 60, + 107, + 108, + 126, + 127, + 51, + 52, + 53, + 43, + 7, + 8, + 74, + 62, + 24, + 25, + 37, + 38, + 26, + 27, + 112, + 113, + 28, + 59, + 39, + 42, + 40, + 49, + 1, + 2, + 5, + 128, + 130, + 73, + 83, + 80, + 70, + 64, + 68, + 69, + 122, + s, + [45, 4, 1], + 129, + 75, + 76, + 89, + 95, + 96, + 97, + 90, + 87, + 88, + 114, + 115, + 78, + 79, + 63, + 110, + 111, + 123, + 125, + 82, + 84, + 85, + 86, + 104, + 105, + 101, + 102, + 103, + 93, + 98, + 99, + 90, + 121 +]) +}), +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = (this.options.errorRecoveryTokenDiscardCount | 0) || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if (typeof src === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + + + + + + + + + + + var error_rule_depth = (this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1); + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, (error_rule_depth >= 0)); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + + + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + + + + + + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + + + + + + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + + + + + + + + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + + + + + + + + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, (error_rule_depth >= 0)); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + + + + + + + + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + + + + + + + + + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + + + + + + + + + + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = (symbol === TERROR ? 0 : symbol); // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + const EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + + + + + + + + + + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + + + + + + + + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + + + + + + + + + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +}, +yyError: 1 +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + +var ebnf = false; + + + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } + else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; +} + +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... true +// location assignment: ............. true +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState((yy.ebnf ? 'ebnf' : 'bnf')); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS` + EBNF: ignoring unsupported parser option ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [ + this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS` + unsupported parser input: ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: */ /^(?:\/\/[^\r\n]*)/, + /* 2: */ /^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: */ /^(?:[\/"'][^{}\/"']+)/, + /* 6: */ /^(?:[^{}\/"']+)/, + /* 7: */ /^(?:\{)/, + /* 8: */ /^(?:\})/, + /* 9: */ /^(?:(\r\n|\n|\r))/, + /* 10: */ /^(?:%%)/, + /* 11: */ /^(?:;)/, + /* 12: */ /^(?:%%)/, + /* 13: */ /^(?:%empty\b)/, + /* 14: */ /^(?:%epsilon\b)/, + /* 15: */ /^(?:\u0190)/, + /* 16: */ /^(?:\u025B)/, + /* 17: */ /^(?:\u03B5)/, + /* 18: */ /^(?:\u03F5)/, + /* 19: */ /^(?:\()/, + /* 20: */ /^(?:\))/, + /* 21: */ /^(?:\*)/, + /* 22: */ /^(?:\?)/, + /* 23: */ /^(?:\+)/, + /* 24: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 25: */ /^(?:=)/, + /* 26: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: */ /^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: */ /^(?:\/\/[^\r\n]*)/, + /* 30: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: */ /^(?:\S+)/, + /* 32: */ /^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: */ /^(?:(\r\n|\n|\r))/, + /* 34: */ /^(?:([^\S\n\r])+)/, + /* 35: */ /^(?:([^\S\n\r])+)/, + /* 36: */ /^(?:(\r\n|\n|\r)+)/, + /* 37: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 40: */ /^(?:\$end\b)/, + /* 41: */ /^(?:\$eof\b)/, + /* 42: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: */ /^(?:\S+)/, + /* 45: */ /^(?::)/, + /* 46: */ /^(?:;)/, + /* 47: */ /^(?:\|)/, + /* 48: */ /^(?:%%)/, + /* 49: */ /^(?:%ebnf\b)/, + /* 50: */ /^(?:%debug\b)/, + /* 51: */ /^(?:%parser-type\b)/, + /* 52: */ /^(?:%prec\b)/, + /* 53: */ /^(?:%start\b)/, + /* 54: */ /^(?:%left\b)/, + /* 55: */ /^(?:%right\b)/, + /* 56: */ /^(?:%nonassoc\b)/, + /* 57: */ /^(?:%token\b)/, + /* 58: */ /^(?:%parse-param\b)/, + /* 59: */ /^(?:%options\b)/, + /* 60: */ new XRegExp( + '^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', + '' + ), + /* 61: */ /^(?:%code\b)/, + /* 62: */ /^(?:%import\b)/, + /* 63: */ /^(?:%include\b)/, + /* 64: */ new XRegExp( + '^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', + '' + ), + /* 65: */ new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */ new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */ new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: */ /^(?:\{)/, + /* 69: */ /^(?:->.*)/, + /* 70: */ /^(?:→.*)/, + /* 71: */ /^(?:=>.*)/, + /* 72: */ /^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: */ /^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: */ /^(?:[^\r\n]*(\r|\n)+)/, + /* 75: */ /^(?:[^\r\n]+)/, + /* 76: */ /^(?:(\r\n|\n|\r))/, + /* 77: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: */ /^(?:([^\S\n\r])+)/, + /* 80: */ /^(?:\S+)/, + /* 81: */ /^(?:")/, + /* 82: */ /^(?:')/, + /* 83: */ /^(?:`)/, + /* 84: */ /^(?:")/, + /* 85: */ /^(?:')/, + /* 86: */ /^(?:`)/, + /* 87: */ /^(?:")/, + /* 88: */ /^(?:')/, + /* 89: */ /^(?:`)/, + /* 90: */ /^(?:.)/, + /* 91: */ /^(?:$)/ + ], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [ + 9, + 10, + 11, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'bnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'ebnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'INITIAL': { + rules: [ + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function(s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; +}(); +parser.lexer = lexer; + +function Parser() { + this.yy = {}; +} +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); +} + + + +var bnf = { + parser, + Parser, + parse: yyparse, + +}; + +var version = '0.6.1-205'; // require('./package.json').version; + +function parse(grammar) { + return bnf.parser.parse(grammar); +} + +// adds a declaration to the grammar +bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { + if (decl.start) { + grammar.start = decl.start; + } else if (decl.lex) { + grammar.lex = parseLex(decl.lex.text, decl.lex.position); + } else if (decl.operator) { + if (!grammar.operators) grammar.operators = []; + grammar.operators.push(decl.operator); + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { + if (!grammar.parseParams) grammar.parseParams = []; + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; + } else if (decl.include) { + if (!grammar.moduleInclude) grammar.moduleInclude = ''; + grammar.moduleInclude += decl.include; + } else if (decl.options) { + if (!grammar.options) grammar.options = {}; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; + } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } + } +}; + +// parse an embedded lex section +function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += (new Array(l)).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + (new Array(c - 3)).join('.') + prelude; + } + return jisonlex.parse(prelude + text); +} + +const ebnf_parser = { + transform +}; + +var ebnfParser = { + parse, + + transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser, + bnf_lexer: jisonlex, + + version, +}; + +return ebnfParser; + +}))); diff --git a/ebnf-parser-prelude.js b/ebnf-parser-prelude.js new file mode 100644 index 0000000..075ac5f --- /dev/null +++ b/ebnf-parser-prelude.js @@ -0,0 +1,3 @@ + +// hack: +var assert; diff --git a/ebnf-parser.js b/ebnf-parser.js index 55a0b8f..765596d 100644 --- a/ebnf-parser.js +++ b/ebnf-parser.js @@ -1,41 +1,100 @@ -var bnf = require("./parser").parser, - ebnf = require("./ebnf-transform"), - jisonlex = require("lex-parser"); -exports.parse = function parse (grammar) { return bnf.parse(grammar); }; -exports.transform = ebnf.transform; +import bnf from "./parser"; +import transform from "./ebnf-transform"; +import jisonlex from "@gerhobbelt/lex-parser"; + +var version = '0.6.1-205'; // require('./package.json').version; + +function parse(grammar) { + return bnf.parser.parse(grammar); +} // adds a declaration to the grammar -bnf.yy.addDeclaration = function (grammar, decl) { +bnf.parser.yy.addDeclaration = function bnfAddDeclaration(grammar, decl) { if (decl.start) { grammar.start = decl.start; - } else if (decl.lex) { - grammar.lex = parseLex(decl.lex); - + grammar.lex = parseLex(decl.lex.text, decl.lex.position); } else if (decl.operator) { if (!grammar.operators) grammar.operators = []; grammar.operators.push(decl.operator); - - } else if (decl.parseParam) { + } else if (decl.token) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + grammar.extra_tokens.push(decl.token); + } else if (decl.token_list) { + if (!grammar.extra_tokens) grammar.extra_tokens = []; + decl.token_list.forEach(function (tok) { + grammar.extra_tokens.push(tok); + }); + } else if (decl.parseParams) { if (!grammar.parseParams) grammar.parseParams = []; - grammar.parseParams = grammar.parseParams.concat(decl.parseParam); - + grammar.parseParams = grammar.parseParams.concat(decl.parseParams); + } else if (decl.parserType) { + if (!grammar.options) grammar.options = {}; + grammar.options.type = decl.parserType; } else if (decl.include) { if (!grammar.moduleInclude) grammar.moduleInclude = ''; grammar.moduleInclude += decl.include; - } else if (decl.options) { if (!grammar.options) grammar.options = {}; - for (var i=0; i < decl.options.length; i++) { - grammar.options[decl.options[i]] = true; + // last occurrence of `%options` wins: + for (var i = 0; i < decl.options.length; i++) { + grammar.options[decl.options[i][0]] = decl.options[i][1]; + } + } else if (decl.unknownDecl) { + if (!grammar.unknownDecls) grammar.unknownDecls = []; + grammar.unknownDecls.push(decl.unknownDecl); + } else if (decl.imports) { + if (!grammar.imports) grammar.imports = []; + grammar.imports.push(decl.imports); + } else if (decl.actionInclude) { + if (!grammar.actionInclude) { + grammar.actionInclude = ''; + } + grammar.actionInclude += decl.actionInclude; + } else if (decl.initCode) { + if (!grammar.moduleInit) { + grammar.moduleInit = []; } + grammar.moduleInit.push(decl.initCode); // {qualifier: , include: } } - }; // parse an embedded lex section -var parseLex = function (text) { - return jisonlex.parse(text.replace(/(?:^%lex)|(?:\/lex$)/g, '')); +function parseLex(text, position) { + text = text.replace(/(?:^%lex)|(?:\/lex$)/g, ''); + // We want the lex input to start at the given 'position', if any, + // so that error reports will produce a line number and character index + // which matches the original input file: + position = position || {}; + position.range = position.range || []; + var l = position.first_line | 0; + var c = position.range[0] | 0; + var prelude = ''; + if (l > 1) { + prelude += (new Array(l)).join('\n'); + c -= prelude.length; + } + if (c > 3) { + prelude = '// ' + (new Array(c - 3)).join('.') + prelude; + } + return jisonlex.parse(prelude + text); +} + +const ebnf_parser = { + transform +}; + +export default { + parse, + + transform, + + // assistant exports for debugging/testing: + bnf_parser: bnf, + ebnf_parser, + bnf_lexer: jisonlex, + + version, }; diff --git a/ebnf-transform.js b/ebnf-transform.js index d1b8af7..2a8171d 100644 --- a/ebnf-transform.js +++ b/ebnf-transform.js @@ -1,135 +1,420 @@ -var EBNF = (function(){ - var parser = require('./transform-parser.js'); - - var transformExpression = function(e, opts, emit) { - var type = e[0], value = e[1], name = false; - - if (type === 'xalias') { - type = e[1]; - value = e[2] - name = e[3]; - if (type) { - e = e.slice(1,2); - } else { - e = value; - type = e[0]; - value = e[1]; - } +import parser from './transform-parser.js'; +import XRegExp from '@gerhobbelt/xregexp'; + +//import assert from 'assert'; + +var devDebug = 0; + +// WARNING: this regex MUST match the regex for `ID` in ebnf-parser::bnf.l jison language lexer spec! (`ID = [{ALPHA}]{ALNUM}*`) +// +// This is the base XRegExp ID regex used in many places; this should match the ID macro definition in the EBNF/BNF parser et al as well! +const ID_REGEX_BASE = '[\\p{Alphabetic}_][\\p{Alphabetic}_\\p{Number}]*'; + +// produce a unique production symbol. +// Use this to produce rule productions from transformed EBNF which are +// guaranteed not to collide with previously generated / already existing +// rules (~ symbols). +function generateUniqueSymbol(id, postfix, opts) { + var sym = id + postfix; + if (opts.grammar[sym]) { + var i = 2; // the first occurrence won't have a number, this is already a collision, so start numbering at *2*. + do { + sym = id + postfix + i; + i++; + } while (opts.grammar[sym]); + } + return sym; +} + +function generatePushAction(handle, offset) { + var terms = handle.terms; + var rv = []; + + for (var i = 0, len = terms.length; i < len; i++) { + rv.push('$' + (i + offset)); + } + rv = rv.join(', '); + // and make sure we contain a term series unambiguously, i.e. anything more complex than + // a single term inside an EBNF check is produced as an array so we can differentiate + // between */+/? EBNF operator results and groups of tokens per individual match. + if (len > 1) { + rv = '[' + rv + ']'; + } + return rv; +} + +function transformExpression(e, opts, emit) { + var type = e[0], + value = e[1], + name = false, + has_transformed = 0; + var list, n; + + if (type === 'xalias') { + type = e[1]; + value = e[2]; + name = e[3]; + if (type) { + e = e.slice(1); + } else { + e = value; + type = e[0]; + value = e[1]; } + if (devDebug > 3) console.log('xalias: ', e, type, value, name); + } - if (type === 'symbol') { - var n; - if (e[1][0] === '\\') n = e[1][1]; - else if (e[1][0] === '\'') n = e[1].substring(1, e[1].length-1); - else n = e[1]; - emit(n + (name ? "["+name+"]" : "")); - } else if (type === "+") { - if (!name) { - name = opts.production + "_repetition_plus" + opts.repid++; - } - emit(name); + if (type === 'symbol') { + n = e[1]; + if (devDebug > 2) console.log('symbol EMIT: ', n + (name ? '[' + name + ']' : '')); + emit(n + (name ? '[' + name + ']' : '')); + } else if (type === '+') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition_plus', opts); + } + if (devDebug > 2) console.log('+ EMIT name: ', name); + emit(name); - opts = optsForProduction(name, opts.grammar); - var list = transformExpressionList([value], opts); - opts.grammar[name] = [ - [list, "$$ = [$1];"], - [ - name + " " + list, - "$1.push($2);" - ] - ]; - } else if (type === "*") { - if (!name) { - name = opts.production + "_repetition" + opts.repid++; - } - emit(name); + has_transformed = 1; - opts = optsForProduction(name, opts.grammar); - opts.grammar[name] = [ - ["", "$$ = [];"], - [ - name + " " + transformExpressionList([value], opts), - "$1.push($2);" - ] - ]; - } else if (type ==="?") { + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + list.fragment, + '$$ = [' + generatePushAction(list, 1) + '];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '*') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_repetition', opts); + } + if (devDebug > 2) console.log('* EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + opts.grammar[name] = [ + [ + '', + '$$ = [];' + ], + [ + name + ' ' + list.fragment, + '$1.push(' + generatePushAction(list, 2) + ');\n$$ = $1;' + ] + ]; + } else if (type === '?') { + if (!name) { + name = generateUniqueSymbol(opts.production, '_option', opts); + } + if (devDebug > 2) console.log('? EMIT name: ', name); + emit(name); + + has_transformed = 1; + + opts = optsForProduction(name, opts.grammar); + list = transformExpressionList([value], opts); + // you want to be able to check if 0 or 1 occurrences were recognized: since jison + // by default *copies* the lexer token value, i.e. `$$ = $1` is the (optional) default action, + // we will need to set the action up explicitly in case of the 0-count match: + // `$$ = undefined`. + // + // Note that we MUST return an array as the + // '1 occurrence' match CAN carry multiple terms, e.g. in constructs like + // `(T T T)?`, which would otherwise be unrecognizable from the `T*` construct. + opts.grammar[name] = [ + [ + '', + '$$ = undefined;' + ], + [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ] + ]; + } else if (type === '()') { + if (value.length === 1 && !name) { + list = transformExpressionList(value[0], opts); + if (list.first_transformed_term_index) { + has_transformed = list.first_transformed_term_index; + } + if (devDebug > 2) console.log('group EMIT len=1: ', list); + emit(list); + } else { if (!name) { - name = opts.production + "_option" + opts.optid++; + name = generateUniqueSymbol(opts.production, '_group', opts); } + if (devDebug > 2) console.log('group EMIT name: ', name); emit(name); + has_transformed = 1; + opts = optsForProduction(name, opts.grammar); - opts.grammar[name] = [ - "", transformExpressionList([value], opts) - ]; - } else if (type === "()") { - if (value.length == 1) { - emit(transformExpressionList(value[0], opts)); - } else { - if (!name) { - name = opts.production + "_group" + opts.groupid++; - } - emit(name); + opts.grammar[name] = value.map(function (handle) { + var list = transformExpressionList(handle, opts); + return [ + list.fragment, + '$$ = ' + generatePushAction(list, 1) + ';' + ]; + }); + } + } - opts = optsForProduction(name, opts.grammar); - opts.grammar[name] = value.map(function(handle) { - return transformExpressionList(handle, opts); - }); + return has_transformed; +} + +function transformExpressionList(list, opts) { + var first_transformed_term_index = false; + var terms = list.reduce(function (tot, e) { + var ci = tot.length; + + var has_transformed = transformExpression(e, opts, function (name) { + if (name.terms) { + tot.push.apply(tot, name.terms); + } else { + tot.push(name); } + }); + + if (has_transformed) { + first_transformed_term_index = ci + has_transformed; } - }; + return tot; + }, []); - var transformExpressionList = function(list, opts) { - return list.reduce (function (tot, e) { - transformExpression (e, opts, function (i) { tot.push(i); }); - return tot; - }, []). - join(" "); + return { + fragment: terms.join(' '), + terms: terms, + first_transformed_term_index: first_transformed_term_index // 1-based index }; +} - var optsForProduction = function(id, grammar) { - return { - production: id, - repid: 0, - groupid: 0, - optid: 0, - grammar: grammar - }; +function optsForProduction(id, grammar) { + return { + production: id, + grammar: grammar }; +} - var transformProduction = function(id, production, grammar) { - var transform_opts = optsForProduction(id, grammar); - return production.map(function (handle) { - var action = null, opts = null; - if (typeof(handle) !== 'string') - action = handle[1], - opts = handle[2], - handle = handle[0]; - var expressions = parser.parse(handle); - - handle = transformExpressionList(expressions, transform_opts); - - var ret = [handle]; - if (action) ret.push(action); - if (opts) ret.push(opts); - if (ret.length == 1) return ret[0]; - else return ret; - }); - }; +function transformProduction(id, production, grammar) { + var transform_opts = optsForProduction(id, grammar); + return production.map(function (handle) { + var action = null, + opts = null; + var i, len, n; - var transformGrammar = function(grammar) { - Object.keys(grammar).forEach(function(id) { - grammar[id] = transformProduction(id, grammar[id], grammar); - }); - }; + if (typeof handle !== 'string') { + action = handle[1]; + opts = handle[2]; + handle = handle[0]; + } + var expressions = parser.parse(handle); - return { - transform: function (ebnf) { - transformGrammar(ebnf); - return ebnf; + if (devDebug > 1) console.log('\n================\nEBNF transform expressions:\n ', handle, opts, JSON.stringify(expressions, null, 2)); + + var list = transformExpressionList(expressions, transform_opts); + + var ret = [list.fragment]; + if (action) { + // make sure the action doesn't address any inner items. + if (list.first_transformed_term_index) { + var rhs = list.fragment; + // seek out all names and aliases; strip out literal tokens first as those cannot serve as $names: + var alist = list.terms; // rhs.replace(/'[^']+'/g, '~').replace(/"[^"]+"/g, '~').split(' '); + // we also know at which index the first transformation occurred: + var first_index = list.first_transformed_term_index - 1; + if (devDebug > 2) console.log('alist ~ rhs rule terms: ', alist, rhs); + + var alias_re = new XRegExp(`\\[${ID_REGEX_BASE}\\]`); + var term_re = new XRegExp(`^${ID_REGEX_BASE}$`); + // and collect the PERMITTED aliases: the names of the terms and all the remaining aliases + var good_aliases = {}; + var alias_cnt = {}; + var donotalias = {}; + + // WARNING: this replicates the knowledge/code of jison.js::addName() + var addName = function addNameEBNF(s, i) { + var base = s.replace(/[0-9]+$/, ''); + var dna = donotalias[base]; + + if (good_aliases[s]) { + alias_cnt[s]++; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } else { + good_aliases[s] = i + 1; + alias_cnt[s] = 1; + if (!dna) { + good_aliases[s + alias_cnt[s]] = i + 1; + alias_cnt[s + alias_cnt[s]] = 1; + } + } + }; + + // WARNING: this replicates the knowledge/code of jison.js::markBasename() + var markBasename = function markBasenameEBNF(s) { + if (/[0-9]$/.test(s)) { + s = s.replace(/[0-9]+$/, ''); + donotalias[s] = true; + } + }; + + // mark both regular and aliased names, e.g., `id[alias1]` and `id1` + // + // WARNING: this replicates the knowledge/code of jison.js::markBasename()+addName() usage + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + markBasename(alias[0].substr(1, alias[0].length - 2)); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + markBasename(term); + } + } + // then check & register both regular and aliased names, e.g., `id[alias1]` and `id1` + for (i = 0, len = alist.length; i < len; i++) { + var term = alist[i]; + var alias = term.match(alias_re); + if (alias) { + addName(alias[0].substr(1, alias[0].length - 2), i); + term = term.replace(alias_re, ''); + } + if (term.match(term_re)) { + addName(term, i); + } + } + if (devDebug > 2) console.log('good_aliases: ', { + donotalias: donotalias, + good_aliases: good_aliases, + alias_cnt: alias_cnt, + }); + + // now scan the action for all named and numeric semantic values ($nonterminal / $1 / @1, ##1, ...) + // + // Note that `#name` are straight **static** symbol translations, which are okay as they don't + // require access to the parse stack: `#n` references can be resolved completely + // at grammar compile time. + // + var nameref_re = new XRegExp(`(?:[$@]|##)${ID_REGEX_BASE}`, 'g'); + var named_spots = nameref_re.exec(action); + var numbered_spots = action.match(/(?:[$@]|##)[0-9]+\b/g); + var max_term_index = list.terms.length; + if (devDebug > 2) console.log('ACTION named_spots: ', named_spots); + if (devDebug > 2) console.log('ACTION numbered_spots: ', numbered_spots); + + // loop through the XRegExp alias regex matches in `action` + while (named_spots) { + n = named_spots[0].replace(/^(?:[$@]|##)/, ''); + if (!good_aliases[n]) { + throw new Error('The action block references the named alias "' + n + '" ' + + 'which is not available in production "' + handle + '"; ' + + 'it probably got removed by the EBNF rule rewrite process.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + + if (alias_cnt[n] !== 1) { + throw new Error('The action block references the ambiguous named alias or term reference "' + n + '" ' + + 'which is mentioned ' + alias_cnt[n] + ' times in production "' + handle + '", implicit and explicit aliases included.\n' + + 'You should either provide unambiguous = uniquely named aliases for these terms or use numeric index references (e.g. `$3`) as a stop-gap in your action code.\n' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + //assert(good_aliases[n] <= max_term_index, 'max term index'); + + named_spots = nameref_re.exec(action); + } + if (numbered_spots) { + for (i = 0, len = numbered_spots.length; i < len; i++) { + n = parseInt(numbered_spots[i].replace(/^(?:[$@]|##)/, '')); + if (n > max_term_index) { + /* @const */ var n_suffixes = [ 'st', 'nd', 'rd', 'th' ]; + throw new Error('The action block references the ' + n + n_suffixes[Math.max(0, Math.min(3, n - 1))] + ' term, ' + + 'which is not available in production "' + handle + '"; ' + + 'Be reminded that you cannot reference sub-elements within EBNF */+/? groups, ' + + 'only the outer-most EBNF group alias will remain available at all times ' + + 'due to the EBNF-to-BNF rewrite process.'); + } + } + } + } + ret.push(action); } - }; -})(); + if (opts) { + ret.push(opts); + } + if (devDebug > 1) console.log('\n\nEBNF tx result:\n ', JSON.stringify(list, null, 2), JSON.stringify(ret, null, 2)); + + if (ret.length === 1) { + return ret[0]; + } else { + return ret; + } + }); +}; + +var ref_list; +var ref_names; + +// create a deep copy of the input, so we will keep the input constant. +function deepClone(from, sub) { + if (sub == null) { + ref_list = []; + ref_names = []; + sub = 'root'; + } + if (typeof from === 'function') return from; + if (from == null || typeof from !== 'object') return from; + if (from.constructor !== Object && from.constructor !== Array) { + return from; + } + + for (var i = 0, len = ref_list.length; i < len; i++) { + if (ref_list[i] === from) { + throw new Error('[Circular/Xref:' + ref_names[i] + ']'); // circular or cross reference + } + } + ref_list.push(from); + ref_names.push(sub); + sub += '.'; + + var to = new from.constructor(); + for (var name in from) { + to[name] = deepClone(from[name], sub + name); + } + return to; +} + +function transformGrammar(grammar) { + grammar = deepClone(grammar); + + Object.keys(grammar).forEach(function transformGrammarForKey(id) { + grammar[id] = transformProduction(id, grammar[id], grammar); + }); + + return grammar; +}; + +function transform(ebnf) { + if (devDebug > 0) console.log('EBNF:\n ', JSON.stringify(ebnf, null, 2)); + var rv = transformGrammar(ebnf); + if (devDebug > 0) console.log('\n\nEBNF after transformation:\n ', JSON.stringify(rv, null, 2)); + + return rv; +} -exports.transform = EBNF.transform; +export default transform; diff --git a/ebnf.y b/ebnf.y index e5ccfd3..ce1edc9 100644 --- a/ebnf.y +++ b/ebnf.y @@ -1,26 +1,81 @@ /* EBNF grammar spec */ +%code imports %{ + import XRegExp from '@gerhobbelt/xregexp'; // for helping out the `%options xregexp` in the lexer +%} + + + %lex -id [a-zA-Z][a-zA-Z0-9_-]* + +ASCII_LETTER [a-zA-z] +// \p{Alphabetic} already includes [a-zA-z], hence we don't need to merge +// with {UNICODE_LETTER} (though jison has code to optimize if you *did* +// include the `[a-zA-Z]` anyway): +UNICODE_LETTER [\p{Alphabetic}] +ALPHA [{UNICODE_LETTER}_] +DIGIT [\p{Number}] +WHITESPACE [\s\r\n\p{Separator}] +ALNUM [{ALPHA}{DIGIT}] + +NAME [{ALPHA}](?:[{ALNUM}-]*{ALNUM})? +ID [{ALPHA}]{ALNUM}* +DECIMAL_NUMBER [1-9][0-9]* +HEX_NUMBER "0"[xX][0-9a-fA-F]+ +BR \r\n|\n|\r +// WhiteSpace MUST NOT match CR/LF and the regex `\s` DOES, so we cannot use +// that one directly. Instead we define the {WS} macro here: +WS [^\S\r\n] + +// Quoted string content: support *escaped* quotes inside strings: +QUOTED_STRING_CONTENT (?:\\\'|\\[^\']|[^\\\'])* +DOUBLEQUOTED_STRING_CONTENT (?:\\\"|\\[^\"]|[^\\\"])* + + + + + +%options easy_keyword_rules +%options ranges +%options xregexp + + %% -\s+ /* skip whitespace */ -{id} return 'symbol'; -"["{id}"]" yytext = yytext.substr(1, yyleng-2); return 'ALIAS'; -"'"[^']*"'" return 'symbol'; -"." return 'symbol'; - -bar return 'bar'; -"(" return '('; -")" return ')'; -"*" return '*'; -"?" return '?'; -"|" return '|'; -"+" return '+'; -<> return 'EOF'; + +\s+ /* skip whitespace */ +{ID} return 'SYMBOL'; +"$end" return 'SYMBOL'; +"["{ID}"]" yytext = this.matches[1]; return 'ALIAS'; + +// Stringified tokens are always `'`-surrounded by the bnf.y grammar unless the token +// itself contain an `'`. +// +// Note about edge case: EBNF grammars should not barf a hairball if someone +// ever decided that the combo of quotes, i.e. `'"` would be a legal token in their grammar, +// e.g. `rule: A '\'"' B`. +// +// And, yes, we assume that the `bnf.y` parser is our regular input source, so we may +// be a bit stricter here in what we lex than in the userland-facing `bnf.l` lexer. +\'{QUOTED_STRING_CONTENT}\' + return 'SYMBOL'; +\"{DOUBLEQUOTED_STRING_CONTENT}\" + return 'SYMBOL'; +"." return 'SYMBOL'; + +"(" return '('; +")" return ')'; +"*" return '*'; +"?" return '?'; +"|" return '|'; +"+" return '+'; +<> return 'EOF'; + /lex + + %start production %% @@ -34,33 +89,56 @@ handle_list : handle { $$ = [$handle]; } | handle_list '|' handle - { $handle_list.push($handle); } + { + $handle_list.push($handle); + $$ = $handle_list; + } ; handle - : + : %epsilon { $$ = []; } - | handle expression_suffix - { $handle.push($expression_suffix); } + | rule + { $$ = $rule; } + ; + +rule + : suffixed_expression + { $$ = [$suffixed_expression]; } + | rule suffixed_expression + { + $rule.push($suffixed_expression); + $$ = $rule; + } ; -expression_suffix +suffixed_expression : expression suffix ALIAS { $$ = ['xalias', $suffix, $expression, $ALIAS]; } | expression suffix - { if ($suffix) $$ = [$suffix, $expression]; else $$ = $expression; } + { + if ($suffix) { + $$ = [$suffix, $expression]; + } else { + $$ = $expression; + } + } ; expression - : symbol - { $$ = ['symbol', $symbol]; } + : SYMBOL + { $$ = ['symbol', $SYMBOL]; } | '(' handle_list ')' { $$ = ['()', $handle_list]; } ; suffix - : + : %epsilon + { $$ = undefined; } | '*' + { $$ = $1; } | '?' + { $$ = $1; } | '+' + { $$ = $1; } ; diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..3ccacff --- /dev/null +++ b/package-lock.json @@ -0,0 +1,2485 @@ +{ + "name": "@gerhobbelt/ebnf-parser", + "version": "0.6.1-205", + "lockfileVersion": 1, + "dependencies": { + "@gerhobbelt/ast-types": { + "version": "0.9.13-4", + "resolved": "https://registry.npmjs.org/@gerhobbelt/ast-types/-/ast-types-0.9.13-4.tgz", + "integrity": "sha512-V8UIj1XN6XOP014fPpecxEa7AlAB9kaTOB/wF9UbguuwIMWCHDmdA9i03JDK9zXyVDVaLWCYh42JK8F9f27AtA==" + }, + "@gerhobbelt/ast-util": { + "version": "0.6.1-4", + "resolved": "https://registry.npmjs.org/@gerhobbelt/ast-util/-/ast-util-0.6.1-4.tgz", + "integrity": "sha512-NP7YZh7rR6CNiMLyKTF+qb2Epx0r5x/zKQ3Z14TgXl73YJurC8WkMkFM9nDj8cRXb6R+f+BEu4DqAvvYKMxbqg==" + }, + "@gerhobbelt/lex-parser": { + "version": "0.6.1-205", + "resolved": "https://registry.npmjs.org/@gerhobbelt/lex-parser/-/lex-parser-0.6.1-205.tgz", + "integrity": "sha512-U+i43wcYKj+JX43o6nhQnK94BJBEku7Sd326C1sU576VxoVlRcmpFwQE5i0G4tiCvgLv0SL3Cxbsm46FBT+xjQ==", + "dependencies": { + "jison-helpers-lib": { + "version": "0.6.1-203", + "resolved": "https://registry.npmjs.org/jison-helpers-lib/-/jison-helpers-lib-0.6.1-203.tgz", + "integrity": "sha512-Pc8JW2rGm3ZpFtcYD3+uoZdVRmnyBPwzZc2SaPvriWbSPwsQpLOZjSGOq5WK6fuPZH0FhifHwr0YwHwiXS3hWw==" + } + } + }, + "@gerhobbelt/linewrap": { + "version": "0.2.2-3", + "resolved": "https://registry.npmjs.org/@gerhobbelt/linewrap/-/linewrap-0.2.2-3.tgz", + "integrity": "sha512-u2eUbXgNtqckBI4gxds/uiUNoytT+qIqpePmVDI5isW8A18uB3Qz1P+UxAHgFafGOZWJNrpR0IKnZhl7QhaUng==", + "dev": true + }, + "@gerhobbelt/nomnom": { + "version": "1.8.4-24", + "resolved": "https://registry.npmjs.org/@gerhobbelt/nomnom/-/nomnom-1.8.4-24.tgz", + "integrity": "sha512-spzyz2vHd1BhYNSUMXjqJOwk4AjnOIzZz3cYCOryUCzMvlqz01/+SAPEy/pjT47CrOGdWd0JgemePjru1aLYgQ==", + "dev": true, + "dependencies": { + "ansi-styles": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.0.tgz", + "integrity": "sha512-NnSOmMEYtVR2JVMIGTzynRkkaxtiq1xnFBcdQD/DnNCYPoEPsVJhM98BDyaoNOQIi7p4okdi3E27eN7GQbsUug==", + "dev": true + }, + "chalk": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.1.0.tgz", + "integrity": "sha512-LUHGS/dge4ujbXMJrnihYMcL4AoOweGnw9Tp3kQuqy1Kx5c1qKjqvMJZ6nVJPMWJtKCTN72ZogH3oeSO9g9rXQ==", + "dev": true + }, + "supports-color": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-4.5.0.tgz", + "integrity": "sha1-vnoN5ITexcXN34s9WRJQRJEvY1s=", + "dev": true + } + } + }, + "@gerhobbelt/recast": { + "version": "0.12.7-11", + "resolved": "https://registry.npmjs.org/@gerhobbelt/recast/-/recast-0.12.7-11.tgz", + "integrity": "sha512-vjk3AMqq8bgg8Wf5B6n2OdWmpa9iyBYX+/N5+vTf9mz/+etm0YUHcgGdzX98f8tSTCUl+LEdMKNN4vteLbUsxg==", + "dependencies": { + "@gerhobbelt/ast-types": { + "version": "0.9.13-7", + "resolved": "https://registry.npmjs.org/@gerhobbelt/ast-types/-/ast-types-0.9.13-7.tgz", + "integrity": "sha512-OKLyvezcD1X9WHXsKfDm2nLhwt1ybNRvErTqVeM5wlq6vQvNMkWKG6SLwG3Y08gkseZWKfe7enhPiJWoJORf3A==" + } + } + }, + "@gerhobbelt/xregexp": { + "version": "3.2.0-22", + "resolved": "https://registry.npmjs.org/@gerhobbelt/xregexp/-/xregexp-3.2.0-22.tgz", + "integrity": "sha512-TRu38Z67VxFSMrBP3z/ORiJVQqp56ulidZirbobtmJnVGBWLdo4GbHtihgIJFGieIZuk+LxmPkK45SY+SQsR3A==" + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "anymatch": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", + "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "dev": true, + "optional": true + }, + "arr-diff": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", + "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", + "dev": true, + "optional": true + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "dev": true, + "optional": true + }, + "array-union": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", + "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", + "dev": true + }, + "array-uniq": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", + "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=", + "dev": true + }, + "array-unique": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", + "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", + "dev": true, + "optional": true + }, + "assertion-error": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.0.2.tgz", + "integrity": "sha1-E8pRXYYgbaC6xm6DTdOX2HWBCUw=", + "dev": true + }, + "async-each": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.1.tgz", + "integrity": "sha1-GdOGodntxufByF04iu28xW0zYC0=", + "dev": true, + "optional": true + }, + "babel-cli": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-cli/-/babel-cli-6.26.0.tgz", + "integrity": "sha1-UCq1SHTX24itALiHoGODzgPQAvE=", + "dev": true, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "babel-code-frame": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", + "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", + "dev": true + }, + "babel-core": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.0.tgz", + "integrity": "sha1-rzL3izGm/O8RnIew/Y2XU/A6C7g=", + "dev": true, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "babel-generator": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.0.tgz", + "integrity": "sha1-rBriAHC3n248odMmlhMFN3TyDcU=", + "dev": true, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "babel-helper-builder-binary-assignment-operator-visitor": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz", + "integrity": "sha1-zORReto1b0IgvK6KAsKzRvmlZmQ=", + "dev": true + }, + "babel-helper-call-delegate": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", + "integrity": "sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340=", + "dev": true + }, + "babel-helper-define-map": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", + "integrity": "sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8=", + "dev": true + }, + "babel-helper-explode-assignable-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz", + "integrity": "sha1-8luCz33BBDPFX3BZLVdGQArCLKo=", + "dev": true + }, + "babel-helper-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", + "integrity": "sha1-00dbjAPtmCQqJbSDUasYOZ01gKk=", + "dev": true + }, + "babel-helper-get-function-arity": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", + "integrity": "sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0=", + "dev": true + }, + "babel-helper-hoist-variables": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", + "integrity": "sha1-HssnaJydJVE+rbyZFKc/VAi+enY=", + "dev": true + }, + "babel-helper-optimise-call-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", + "integrity": "sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc=", + "dev": true + }, + "babel-helper-regex": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", + "integrity": "sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI=", + "dev": true + }, + "babel-helper-remap-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz", + "integrity": "sha1-XsWBgnrXI/7N04HxySg5BnbkVRs=", + "dev": true + }, + "babel-helper-replace-supers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", + "integrity": "sha1-v22/5Dk40XNpohPKiov3S2qQqxo=", + "dev": true + }, + "babel-helpers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", + "integrity": "sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI=", + "dev": true + }, + "babel-messages": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", + "integrity": "sha1-8830cDhYA1sqKVHG7F7fbGLyYw4=", + "dev": true + }, + "babel-plugin-check-es2015-constants": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", + "integrity": "sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o=", + "dev": true + }, + "babel-plugin-syntax-async-functions": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz", + "integrity": "sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU=", + "dev": true + }, + "babel-plugin-syntax-exponentiation-operator": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz", + "integrity": "sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4=", + "dev": true + }, + "babel-plugin-syntax-object-rest-spread": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz", + "integrity": "sha1-/WU28rzhODb/o6VFjEkDpZe7O/U=", + "dev": true + }, + "babel-plugin-syntax-trailing-function-commas": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz", + "integrity": "sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM=", + "dev": true + }, + "babel-plugin-transform-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz", + "integrity": "sha1-ZTbjeK/2yx1VF6wOQOs+n8jQh2E=", + "dev": true + }, + "babel-plugin-transform-es2015-arrow-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", + "integrity": "sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE=", + "dev": true + }, + "babel-plugin-transform-es2015-block-scoped-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", + "integrity": "sha1-u8UbSflk1wy42OC5ToICRs46YUE=", + "dev": true + }, + "babel-plugin-transform-es2015-block-scoping": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", + "integrity": "sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8=", + "dev": true + }, + "babel-plugin-transform-es2015-classes": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", + "integrity": "sha1-WkxYpQyclGHlZLSyo7+ryXolhNs=", + "dev": true + }, + "babel-plugin-transform-es2015-computed-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", + "integrity": "sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM=", + "dev": true + }, + "babel-plugin-transform-es2015-destructuring": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", + "integrity": "sha1-mXux8auWf2gtKwh2/jWNYOdlxW0=", + "dev": true + }, + "babel-plugin-transform-es2015-duplicate-keys": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", + "integrity": "sha1-c+s9MQypaePvnskcU3QabxV2Qj4=", + "dev": true + }, + "babel-plugin-transform-es2015-for-of": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", + "integrity": "sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE=", + "dev": true + }, + "babel-plugin-transform-es2015-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", + "integrity": "sha1-g0yJhTvDaxrw86TF26qU/Y6sqos=", + "dev": true + }, + "babel-plugin-transform-es2015-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", + "integrity": "sha1-T1SgLWzWbPkVKAAZox0xklN3yi4=", + "dev": true + }, + "babel-plugin-transform-es2015-modules-amd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", + "integrity": "sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ=", + "dev": true + }, + "babel-plugin-transform-es2015-modules-commonjs": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.0.tgz", + "integrity": "sha1-DYOUApt9xqvhqX7xgeAHWN0uXYo=", + "dev": true + }, + "babel-plugin-transform-es2015-modules-systemjs": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", + "integrity": "sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM=", + "dev": true + }, + "babel-plugin-transform-es2015-modules-umd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", + "integrity": "sha1-rJl+YoXNGO1hdq22B9YCNErThGg=", + "dev": true + }, + "babel-plugin-transform-es2015-object-super": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", + "integrity": "sha1-JM72muIcuDp/hgPa0CH1cusnj40=", + "dev": true + }, + "babel-plugin-transform-es2015-parameters": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", + "integrity": "sha1-V6w1GrScrxSpfNE7CfZv3wpiXys=", + "dev": true + }, + "babel-plugin-transform-es2015-shorthand-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", + "integrity": "sha1-JPh11nIch2YbvZmkYi5R8U3jiqA=", + "dev": true + }, + "babel-plugin-transform-es2015-spread": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", + "integrity": "sha1-1taKmfia7cRTbIGlQujdnxdG+NE=", + "dev": true + }, + "babel-plugin-transform-es2015-sticky-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", + "integrity": "sha1-AMHNsaynERLN8M9hJsLta0V8zbw=", + "dev": true + }, + "babel-plugin-transform-es2015-template-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", + "integrity": "sha1-qEs0UPfp+PH2g51taH2oS7EjbY0=", + "dev": true + }, + "babel-plugin-transform-es2015-typeof-symbol": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", + "integrity": "sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I=", + "dev": true + }, + "babel-plugin-transform-es2015-unicode-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", + "integrity": "sha1-04sS9C6nMj9yk4fxinxa4frrNek=", + "dev": true + }, + "babel-plugin-transform-exponentiation-operator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz", + "integrity": "sha1-KrDJx/MJj6SJB3cruBP+QejeOg4=", + "dev": true + }, + "babel-plugin-transform-object-rest-spread": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz", + "integrity": "sha1-DzZpLVD+9rfi1LOsFHgTepY7ewY=", + "dev": true + }, + "babel-plugin-transform-regenerator": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", + "integrity": "sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8=", + "dev": true + }, + "babel-plugin-transform-strict-mode": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", + "integrity": "sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g=", + "dev": true + }, + "babel-polyfill": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-polyfill/-/babel-polyfill-6.26.0.tgz", + "integrity": "sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM=", + "dev": true, + "dependencies": { + "regenerator-runtime": { + "version": "0.10.5", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz", + "integrity": "sha1-M2w+/BIgrc7dosn6tntaeVWjNlg=", + "dev": true + } + } + }, + "babel-preset-env": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.6.1.tgz", + "integrity": "sha512-W6VIyA6Ch9ePMI7VptNn2wBM6dbG0eSz25HEiL40nQXCsXGTGZSTZu1Iap+cj3Q0S5a7T9+529l/5Bkvd+afNA==", + "dev": true + }, + "babel-preset-modern-browsers": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-modern-browsers/-/babel-preset-modern-browsers-10.0.1.tgz", + "integrity": "sha512-OwJlaopcYWBjgw4jLkPRXaArpFzpdAdgn7ZDQdY6a284uAjpKGsFP3eRo7rxrXsvmDMcXXQu1CsQzg09IUQelQ==", + "dev": true + }, + "babel-register": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", + "integrity": "sha1-btAhFz4vy0htestFxgCahW9kcHE=", + "dev": true + }, + "babel-runtime": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", + "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=", + "dev": true + }, + "babel-template": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", + "integrity": "sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI=", + "dev": true + }, + "babel-traverse": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", + "integrity": "sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4=", + "dev": true + }, + "babel-types": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", + "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=", + "dev": true + }, + "babylon": { + "version": "6.18.0", + "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", + "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "binary-extensions": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.10.0.tgz", + "integrity": "sha1-muuabF6IY4qtFx4Wf1kAq+JINdA=", + "dev": true, + "optional": true + }, + "brace-expansion": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.8.tgz", + "integrity": "sha1-wHshHHyVLsH479Uad+8NHTmQopI=", + "dev": true + }, + "braces": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", + "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", + "dev": true, + "optional": true + }, + "browser-resolve": { + "version": "1.11.2", + "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-1.11.2.tgz", + "integrity": "sha1-j/CbCixCFxihBRwmCzLkj0QpOM4=", + "dev": true, + "dependencies": { + "resolve": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", + "dev": true + } + } + }, + "browser-stdout": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.0.tgz", + "integrity": "sha1-81HTKWnTL6XXpVZxVCY9korjvR8=", + "dev": true + }, + "browserslist": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-2.5.1.tgz", + "integrity": "sha512-jAvM2ku7YDJ+leAq3bFH1DE0Ylw+F+EQDq4GkqZfgPEqpWYw9ofQH85uKSB9r3Tv7XDbfqVtE+sdvKJW7IlPJA==", + "dev": true + }, + "builtin-modules": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz", + "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=", + "dev": true + }, + "camelcase": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", + "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", + "dev": true + }, + "caniuse-lite": { + "version": "1.0.30000749", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000749.tgz", + "integrity": "sha1-L/OChlrq2MyjXaz7qwT1jv+kwBw=", + "dev": true + }, + "chai": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.1.2.tgz", + "integrity": "sha1-D2RYS6ZC8PKs4oBiefTwbKI61zw=", + "dev": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true + }, + "check-error": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", + "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", + "dev": true + }, + "chokidar": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", + "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", + "dev": true, + "optional": true + }, + "cliui": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", + "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "dev": true, + "dependencies": { + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true + } + } + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "dev": true + }, + "color-convert": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.0.tgz", + "integrity": "sha1-Gsz5fdc5uYO/mU1W/sj5WFNkG3o=", + "dev": true + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "commander": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.11.0.tgz", + "integrity": "sha512-b0553uYA5YAEGgyYIGYROzKQ7X5RAqedkfjiZxwi0kL1g3bOaBNNZfYkzt/CL0umgD5wc9Jec2FbB98CjkMRvQ==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "convert-source-map": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.5.0.tgz", + "integrity": "sha1-ms1whRxtXf3ZPZKC5e35SgP/RrU=", + "dev": true + }, + "core-js": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.1.tgz", + "integrity": "sha1-rmh03GaTd4m4B1T/VCjfZoGcpQs=" + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true, + "optional": true + }, + "cross-spawn": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", + "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=", + "dev": true + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "deep-eql": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", + "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", + "dev": true + }, + "detect-indent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", + "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=", + "dev": true + }, + "diff": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/diff/-/diff-3.3.1.tgz", + "integrity": "sha512-MKPHZDMB0o6yHyDryUOScqZibp914ksXwAMYMTHj6KO8UeKsRYNJD3oNCKjTqZon+V488P7N/HzXF8t7ZR95ww==", + "dev": true + }, + "electron-to-chromium": { + "version": "1.3.27", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.27.tgz", + "integrity": "sha1-eOy4o5kGYYe7N07t412ccFZagD0=", + "dev": true + }, + "error-ex": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.1.tgz", + "integrity": "sha1-+FWobOYa3E6GIcPNoh56dhLDqNw=", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "esprima": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.0.tgz", + "integrity": "sha512-oftTcaMu/EGrEIu904mWteKIv8vMuOgGYo7EhVJJN00R/EED9DCua/xxHRdYnKtcECzVg7xOWhflvJMnqcFZjw==" + }, + "esutils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", + "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", + "dev": true + }, + "execa": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", + "integrity": "sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c=", + "dev": true + }, + "exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=", + "dev": true + }, + "expand-brackets": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", + "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", + "dev": true, + "optional": true + }, + "expand-range": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", + "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", + "dev": true, + "optional": true + }, + "extglob": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", + "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", + "dev": true, + "optional": true + }, + "filename-regex": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", + "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", + "dev": true, + "optional": true + }, + "fill-range": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.3.tgz", + "integrity": "sha1-ULd9/X5Gm8dJJHCWNpn+eoSFpyM=", + "dev": true, + "optional": true + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "dev": true, + "optional": true + }, + "for-own": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", + "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", + "dev": true, + "optional": true + }, + "fs-readdir-recursive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.0.0.tgz", + "integrity": "sha1-jNF0XItPiinIyuw5JHaSG6GV9WA=", + "dev": true + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.1.2.tgz", + "integrity": "sha512-Sn44E5wQW4bTHXvQmvSHwqbuiXtduD6Rrjm2ZtUEGbyrig+nUH3t/QD4M4/ZXViY556TBpRgZkHLDx3JxPwxiw==", + "dev": true, + "optional": true, + "dependencies": { + "abbrev": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "ajv": { + "version": "4.11.8", + "bundled": true, + "dev": true, + "optional": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true, + "dev": true + }, + "aproba": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "are-we-there-yet": { + "version": "1.1.4", + "bundled": true, + "dev": true, + "optional": true + }, + "asn1": { + "version": "0.2.3", + "bundled": true, + "dev": true, + "optional": true + }, + "assert-plus": { + "version": "0.2.0", + "bundled": true, + "dev": true, + "optional": true + }, + "asynckit": { + "version": "0.4.0", + "bundled": true, + "dev": true, + "optional": true + }, + "aws-sign2": { + "version": "0.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "aws4": { + "version": "1.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "balanced-match": { + "version": "0.4.2", + "bundled": true, + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "block-stream": { + "version": "0.0.9", + "bundled": true, + "dev": true + }, + "boom": { + "version": "2.10.1", + "bundled": true, + "dev": true + }, + "brace-expansion": { + "version": "1.1.7", + "bundled": true, + "dev": true + }, + "buffer-shims": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "caseless": { + "version": "0.12.0", + "bundled": true, + "dev": true, + "optional": true + }, + "co": { + "version": "4.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "combined-stream": { + "version": "1.0.5", + "bundled": true, + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true, + "dev": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "cryptiles": { + "version": "2.0.5", + "bundled": true, + "dev": true, + "optional": true + }, + "dashdash": { + "version": "1.14.1", + "bundled": true, + "dev": true, + "optional": true, + "dependencies": { + "assert-plus": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "debug": { + "version": "2.6.8", + "bundled": true, + "dev": true, + "optional": true + }, + "deep-extend": { + "version": "0.4.2", + "bundled": true, + "dev": true, + "optional": true + }, + "delayed-stream": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "ecc-jsbn": { + "version": "0.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "extend": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "extsprintf": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "forever-agent": { + "version": "0.6.1", + "bundled": true, + "dev": true, + "optional": true + }, + "form-data": { + "version": "2.1.4", + "bundled": true, + "dev": true, + "optional": true + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "fstream": { + "version": "1.0.11", + "bundled": true, + "dev": true + }, + "fstream-ignore": { + "version": "1.0.5", + "bundled": true, + "dev": true, + "optional": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "dev": true, + "optional": true + }, + "getpass": { + "version": "0.1.7", + "bundled": true, + "dev": true, + "optional": true, + "dependencies": { + "assert-plus": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "glob": { + "version": "7.1.2", + "bundled": true, + "dev": true + }, + "graceful-fs": { + "version": "4.1.11", + "bundled": true, + "dev": true + }, + "har-schema": { + "version": "1.0.5", + "bundled": true, + "dev": true, + "optional": true + }, + "har-validator": { + "version": "4.2.1", + "bundled": true, + "dev": true, + "optional": true + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "hawk": { + "version": "3.1.3", + "bundled": true, + "dev": true, + "optional": true + }, + "hoek": { + "version": "2.16.3", + "bundled": true, + "dev": true + }, + "http-signature": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "dev": true + }, + "inherits": { + "version": "2.0.3", + "bundled": true, + "dev": true + }, + "ini": { + "version": "1.3.4", + "bundled": true, + "dev": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "is-typedarray": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "isstream": { + "version": "0.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "jodid25519": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "jsbn": { + "version": "0.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "json-schema": { + "version": "0.2.3", + "bundled": true, + "dev": true, + "optional": true + }, + "json-stable-stringify": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "jsonify": { + "version": "0.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "jsprim": { + "version": "1.4.0", + "bundled": true, + "dev": true, + "optional": true, + "dependencies": { + "assert-plus": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "mime-db": { + "version": "1.27.0", + "bundled": true, + "dev": true + }, + "mime-types": { + "version": "2.1.15", + "bundled": true, + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "dev": true + }, + "minimist": { + "version": "0.0.8", + "bundled": true, + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "bundled": true, + "dev": true + }, + "ms": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "node-pre-gyp": { + "version": "0.6.36", + "bundled": true, + "dev": true, + "optional": true + }, + "nopt": { + "version": "4.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "npmlog": { + "version": "4.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "oauth-sign": { + "version": "0.8.2", + "bundled": true, + "dev": true, + "optional": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "dev": true + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "osenv": { + "version": "0.1.4", + "bundled": true, + "dev": true, + "optional": true + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "performance-now": { + "version": "0.2.0", + "bundled": true, + "dev": true, + "optional": true + }, + "process-nextick-args": { + "version": "1.0.7", + "bundled": true, + "dev": true + }, + "punycode": { + "version": "1.4.1", + "bundled": true, + "dev": true, + "optional": true + }, + "qs": { + "version": "6.4.0", + "bundled": true, + "dev": true, + "optional": true + }, + "rc": { + "version": "1.2.1", + "bundled": true, + "dev": true, + "optional": true, + "dependencies": { + "minimist": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "readable-stream": { + "version": "2.2.9", + "bundled": true, + "dev": true + }, + "request": { + "version": "2.81.0", + "bundled": true, + "dev": true, + "optional": true + }, + "rimraf": { + "version": "2.6.1", + "bundled": true, + "dev": true + }, + "safe-buffer": { + "version": "5.0.1", + "bundled": true, + "dev": true + }, + "semver": { + "version": "5.3.0", + "bundled": true, + "dev": true, + "optional": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "sntp": { + "version": "1.0.9", + "bundled": true, + "dev": true, + "optional": true + }, + "sshpk": { + "version": "1.13.0", + "bundled": true, + "dev": true, + "optional": true, + "dependencies": { + "assert-plus": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "string_decoder": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "stringstream": { + "version": "0.0.5", + "bundled": true, + "dev": true, + "optional": true + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "dev": true + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "tar": { + "version": "2.2.1", + "bundled": true, + "dev": true + }, + "tar-pack": { + "version": "3.4.0", + "bundled": true, + "dev": true, + "optional": true + }, + "tough-cookie": { + "version": "2.3.2", + "bundled": true, + "dev": true, + "optional": true + }, + "tunnel-agent": { + "version": "0.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "tweetnacl": { + "version": "0.14.5", + "bundled": true, + "dev": true, + "optional": true + }, + "uid-number": { + "version": "0.0.6", + "bundled": true, + "dev": true, + "optional": true + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "uuid": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "verror": { + "version": "1.3.6", + "bundled": true, + "dev": true, + "optional": true + }, + "wide-align": { + "version": "1.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "wrappy": { + "version": "1.0.2", + "bundled": true, + "dev": true + } + } + }, + "get-caller-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.2.tgz", + "integrity": "sha1-9wLmMSfn4jHBYKgMFVSstw1QR+U=", + "dev": true + }, + "get-func-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", + "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", + "dev": true + }, + "get-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", + "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", + "dev": true + }, + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dev": true + }, + "glob-base": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", + "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", + "dev": true, + "optional": true + }, + "glob-parent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", + "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", + "dev": true + }, + "globals": { + "version": "9.18.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", + "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", + "dev": true + }, + "globby": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz", + "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=", + "dev": true + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=", + "dev": true + }, + "growl": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.3.tgz", + "integrity": "sha512-hKlsbA5Vu3xsh1Cg3J7jSmX/WaW6A5oBeqzM88oNbCRQFz+zUaXm6yxS4RVytp1scBoJzSYl4YAEOQIt6O8V1Q==", + "dev": true + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true + }, + "has-flag": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz", + "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=", + "dev": true + }, + "he": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", + "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=", + "dev": true + }, + "home-or-tmp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", + "integrity": "sha1-42w/LSyufXRqhX440Y1fMqeILbg=", + "dev": true + }, + "hosted-git-info": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.5.0.tgz", + "integrity": "sha512-pNgbURSuab90KbTqvRPsseaTxOJCZBD0a7t+haSN33piP9cCM4l0CqdzAif2hUqm716UovKB2ROmiabGAKVXyg==", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + }, + "invariant": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.2.tgz", + "integrity": "sha1-nh9WrArNtr8wMwbzOL47IErmA2A=", + "dev": true + }, + "invert-kv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", + "dev": true + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dev": true, + "optional": true + }, + "is-buffer": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.5.tgz", + "integrity": "sha1-Hzsm72E7IUuIy8ojzGwB2Hlh7sw=", + "dev": true + }, + "is-builtin-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-1.0.0.tgz", + "integrity": "sha1-VAVy0096wxGfj3bDDLwbHgN6/74=", + "dev": true + }, + "is-dotfile": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", + "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", + "dev": true, + "optional": true + }, + "is-equal-shallow": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", + "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", + "dev": true, + "optional": true + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "dev": true, + "optional": true + }, + "is-extglob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", + "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", + "dev": true + }, + "is-finite": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.0.2.tgz", + "integrity": "sha1-zGZ3aVYCvlUO8R6LSqYwU0K20Ko=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true + }, + "is-glob": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", + "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", + "dev": true + }, + "is-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", + "integrity": "sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE=", + "dev": true + }, + "is-number": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", + "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", + "dev": true, + "optional": true + }, + "is-posix-bracket": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", + "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", + "dev": true, + "optional": true + }, + "is-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", + "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", + "dev": true, + "optional": true + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dev": true, + "optional": true + }, + "jison-gho": { + "version": "0.6.1-205", + "resolved": "https://registry.npmjs.org/jison-gho/-/jison-gho-0.6.1-205.tgz", + "integrity": "sha512-BC0J/LBvYmuZP0MwETzE5rHyEb3fph84mcTGmUSbAxqlVFPGE7KR18Xggqum0xJUebULj8lx9CsiHwbRDqia6A==", + "dev": true, + "dependencies": { + "@gerhobbelt/ast-types": { + "version": "0.9.14-9", + "resolved": "https://registry.npmjs.org/@gerhobbelt/ast-types/-/ast-types-0.9.14-9.tgz", + "integrity": "sha512-5TmMhHOh6OE5VbGJuKnbQ2LEzN5z15CB1zGpA3hUYb00jN+G6qk/Z0ZhRFubS8GTp0h+JJaqnxUIbxneoNnTIQ==", + "dev": true + }, + "@gerhobbelt/json5": { + "version": "0.5.1-20", + "resolved": "https://registry.npmjs.org/@gerhobbelt/json5/-/json5-0.5.1-20.tgz", + "integrity": "sha512-4YEkF451JFUdt3Y54l+BLvbGz5sCVYbIVvrkt+NshIsmDKHZXefkBRznsf5prdmxbxXiAfMoVgtbVD/5V5rVWw==", + "dev": true + }, + "@gerhobbelt/recast": { + "version": "0.12.7-14", + "resolved": "https://registry.npmjs.org/@gerhobbelt/recast/-/recast-0.12.7-14.tgz", + "integrity": "sha512-U1PM+EXUYDXWxLYZiEdd+y5Gk4XHBiAjxolWeCviq3kbxobZiQJI7DWWjG72Ptow3gpXZYi7tMSeumOkoxnPwQ==", + "dev": true + }, + "private": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", + "dev": true + } + } + }, + "jison-helpers-lib": { + "version": "0.6.1-205", + "resolved": "https://registry.npmjs.org/jison-helpers-lib/-/jison-helpers-lib-0.6.1-205.tgz", + "integrity": "sha512-b4iWlapl1cAU0/pZJmIDeJnEUXKMnt7NkwnNahG7gMZWQKV3ogaQOl3ByGWyThYQKQLgGWO4rTUDUlzwgrv4SQ==", + "dependencies": { + "@gerhobbelt/ast-types": { + "version": "0.9.14-9", + "resolved": "https://registry.npmjs.org/@gerhobbelt/ast-types/-/ast-types-0.9.14-9.tgz", + "integrity": "sha512-5TmMhHOh6OE5VbGJuKnbQ2LEzN5z15CB1zGpA3hUYb00jN+G6qk/Z0ZhRFubS8GTp0h+JJaqnxUIbxneoNnTIQ==" + }, + "@gerhobbelt/recast": { + "version": "0.12.7-14", + "resolved": "https://registry.npmjs.org/@gerhobbelt/recast/-/recast-0.12.7-14.tgz", + "integrity": "sha512-U1PM+EXUYDXWxLYZiEdd+y5Gk4XHBiAjxolWeCviq3kbxobZiQJI7DWWjG72Ptow3gpXZYi7tMSeumOkoxnPwQ==" + }, + "private": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==" + } + } + }, + "js-tokens": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", + "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", + "dev": true + }, + "jsesc": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", + "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=", + "dev": true + }, + "json5": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", + "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true + }, + "lcid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", + "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "dev": true + }, + "load-json-file": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", + "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", + "dev": true + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true + }, + "lodash": { + "version": "4.17.4", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.4.tgz", + "integrity": "sha1-eCA6TRwyiuHYbcpkYONptX9AVa4=", + "dev": true + }, + "loose-envify": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.3.1.tgz", + "integrity": "sha1-0aitM/qc4OcT1l/dCsi3SNR4yEg=", + "dev": true + }, + "lru-cache": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.1.tgz", + "integrity": "sha512-q4spe4KTfsAS1SUHLO0wz8Qiyf1+vMIAgpRYioFYDMNqKfHQbg+AVDH3i4fvpl71/P1L0dBl+fQi+P37UYf0ew==", + "dev": true + }, + "mem": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/mem/-/mem-1.1.0.tgz", + "integrity": "sha1-Xt1StIXKHZAP5kiVUFOZoN+kX3Y=", + "dev": true + }, + "micromatch": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", + "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", + "dev": true, + "optional": true + }, + "mimic-fn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.1.0.tgz", + "integrity": "sha1-5md4PZLonb00KBi1IwudYqZyrRg=", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true + }, + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true + }, + "mocha": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-4.0.1.tgz", + "integrity": "sha512-evDmhkoA+cBNiQQQdSKZa2b9+W2mpLoj50367lhy+Klnx9OV8XlCIhigUnn1gaTFLQCa0kdNhEGDr0hCXOQFDw==", + "dev": true, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true + }, + "supports-color": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-4.4.0.tgz", + "integrity": "sha512-rKC3+DyXWgK0ZLKwmRsrkyHVZAjNkfzeehuFWdGGcqGDTZFH73+RH6S/RDAAxl9GusSjZSUWYLmT9N5pzXFOXQ==", + "dev": true + } + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "nan": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.7.0.tgz", + "integrity": "sha1-2Vv3IeyHfgjbJ27T/G63j5CDrUY=", + "dev": true, + "optional": true + }, + "normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha512-9jjUFbTPfEy3R/ad/2oNbKtW9Hgovl5O1FvFWKkKblNXoN/Oou6+9+KKohPK13Yc3/TyunyWhJp6gvRNR/PPAw==", + "dev": true + }, + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true + }, + "npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "dev": true + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object.omit": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", + "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", + "dev": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", + "dev": true + }, + "os-locale": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-2.1.0.tgz", + "integrity": "sha512-3sslG3zJbEYcaC4YVAvDorjGxc7tv6KVATnLPZONiljsUncvihe9BQoVCEs0RZ1kmf4Hk9OBqlZfJZWI4GanKA==", + "dev": true + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "output-file-sync": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/output-file-sync/-/output-file-sync-1.1.2.tgz", + "integrity": "sha1-0KM+7+YaIF+suQCS6CZZjVJFznY=", + "dev": true + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", + "dev": true + }, + "p-limit": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.1.0.tgz", + "integrity": "sha1-sH/y2aXYi+yAYDWJWiurZqJ5iLw=", + "dev": true + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true + }, + "parse-glob": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", + "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", + "dev": true, + "optional": true + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "path-parse": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz", + "integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME=", + "dev": true + }, + "path-type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", + "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", + "dev": true + }, + "pathval": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz", + "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=", + "dev": true + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "dev": true + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true + }, + "preserve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", + "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", + "dev": true, + "optional": true + }, + "private": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.7.tgz", + "integrity": "sha1-aM5eih7woju1cMwoU3tTMqumPvE=" + }, + "process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=", + "dev": true, + "optional": true + }, + "pseudomap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", + "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", + "dev": true + }, + "randomatic": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-1.1.7.tgz", + "integrity": "sha512-D5JUjPyJbaJDkuAazpVnSfVkLlpeO3wDlPROTMLGKG1zMFNFRgrciKo1ltz/AzNTkqE0HzDx655QOL51N06how==", + "dev": true, + "optional": true, + "dependencies": { + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dev": true, + "optional": true, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "optional": true + } + } + }, + "kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "dev": true, + "optional": true + } + } + }, + "read-pkg": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", + "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", + "dev": true + }, + "read-pkg-up": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", + "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", + "dev": true + }, + "readable-stream": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.3.tgz", + "integrity": "sha512-m+qzzcn7KUxEmd1gMbchF+Y2eIUbieUaxkWtptyHywrX0rE8QEYqPC07Vuy4Wm32/xE16NcdBctb8S0Xe/5IeQ==", + "dev": true, + "optional": true + }, + "readdirp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz", + "integrity": "sha1-TtCtBg3zBzMAxIRANz9y0cxkLXg=", + "dev": true, + "optional": true + }, + "regenerate": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.3.3.tgz", + "integrity": "sha512-jVpo1GadrDAK59t/0jRx5VxYWQEDkkEKi6+HjE3joFVLfDOh9Xrdh0dF1eSq+BI/SwvTQ44gSscJ8N5zYL61sg==", + "dev": true + }, + "regenerator-runtime": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.0.tgz", + "integrity": "sha512-/aA0kLeRb5N9K0d4fw7ooEbI+xDe+DKD499EQqygGqeS8N3xto15p09uY2xj7ixP81sNPXvRLnAQIqdVStgb1A==", + "dev": true + }, + "regenerator-transform": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", + "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", + "dev": true + }, + "regex-cache": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", + "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", + "dev": true, + "optional": true + }, + "regexpu-core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", + "integrity": "sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA=", + "dev": true + }, + "regjsgen": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", + "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=", + "dev": true + }, + "regjsparser": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", + "integrity": "sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw=", + "dev": true, + "dependencies": { + "jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", + "dev": true + } + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "repeat-element": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.2.tgz", + "integrity": "sha1-7wiaF40Ug7quTZPrmLT55OEdmQo=", + "dev": true + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true, + "optional": true + }, + "repeating": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", + "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", + "dev": true + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-main-filename": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-1.0.1.tgz", + "integrity": "sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=", + "dev": true + }, + "resolve": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.4.0.tgz", + "integrity": "sha512-aW7sVKPufyHqOmyyLzg/J+8606v5nevBgaliIlV7nUpVMsDnoBGV/cbSLNjZAg9q0Cfd/+easKVKQ8vOu8fn1Q==", + "dev": true + }, + "rollup": { + "version": "0.50.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.50.0.tgz", + "integrity": "sha512-7RqCBQ9iwsOBPkjYgoIaeUij606mSkDMExP0NT7QDI3bqkHYQHrQ83uoNIXwPcQm/vP2VbsUz3kiyZZ1qPlLTQ==", + "dev": true + }, + "rollup-plugin-node-resolve": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-3.0.0.tgz", + "integrity": "sha1-i4l8TDAw1QASd7BRSyXSygloPuA=", + "dev": true + }, + "safe-buffer": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==", + "dev": true + }, + "semver": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.4.1.tgz", + "integrity": "sha512-WfG/X9+oATh81XtllIo/I8gOiY9EXRdv1cQdyykeXK17YcUW3EXUAi2To4pcH6nZtJPr7ZOpM5OMyWJZm+8Rsg==", + "dev": true + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "set-immediate-shim": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz", + "integrity": "sha1-SysbJ+uAip+NzEgaWOXlb1mfP2E=", + "dev": true, + "optional": true + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "slash": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", + "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "dev": true + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "source-map-support": { + "version": "0.4.18", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", + "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", + "dev": true, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "spdx-correct": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-1.0.2.tgz", + "integrity": "sha1-SzBz2TP/UfORLwOsVRlJikFQ20A=", + "dev": true + }, + "spdx-expression-parse": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz", + "integrity": "sha1-m98vIOH0DtRH++JzJmGR/O1RYmw=", + "dev": true + }, + "spdx-license-ids": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz", + "integrity": "sha1-yd96NCRZSt5r0RkA1ZZpbcBrrFc=", + "dev": true + }, + "string_decoder": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", + "integrity": "sha512-4AH6Z5fzNNBcH+6XDMfA/BTt87skxqJlO0lAh3Dker5zThcAxG6mKz+iGu308UKoPPQ8Dcqx/4JhujzltRa+hQ==", + "dev": true, + "optional": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true + } + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true + }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true + }, + "strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", + "dev": true + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + }, + "to-fast-properties": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", + "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=", + "dev": true + }, + "trim-right": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", + "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=", + "dev": true + }, + "type-detect": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.3.tgz", + "integrity": "sha1-Dj8mcLRAmbC0bChNE2p+9Jx0wuo=", + "dev": true + }, + "user-home": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/user-home/-/user-home-1.1.1.tgz", + "integrity": "sha1-K1viOjK2Onyd640PKNSFcko98ZA=", + "dev": true + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true, + "optional": true + }, + "v8flags": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-2.1.1.tgz", + "integrity": "sha1-qrGh+jDUX4jdMhFIh1rALAtV5bQ=", + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz", + "integrity": "sha1-KAS6vnEq0zeUWaz74kdGqywwP7w=", + "dev": true + }, + "which": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.0.tgz", + "integrity": "sha512-xcJpopdamTuY5duC/KnTTNBraPK54YwpenP4lzxU8H91GudWpFv38u0CKjclE1Wi2EH2EDz5LRcHcKbCIzqGyg==", + "dev": true + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "wrap-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", + "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "dev": true, + "dependencies": { + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "y18n": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", + "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=", + "dev": true + }, + "yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", + "dev": true + }, + "yargs": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-8.0.2.tgz", + "integrity": "sha1-YpmpBVsc78lp/355wdkY3Osiw2A=", + "dev": true + }, + "yargs-parser": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-7.0.0.tgz", + "integrity": "sha1-jQrELxbqVd69MyyvTEA4s+P139k=", + "dev": true + } + } +} diff --git a/package.json b/package.json index 76f52d4..e6694fa 100644 --- a/package.json +++ b/package.json @@ -1,14 +1,21 @@ { - "name": "ebnf-parser", - "version": "0.1.10", + "author": { + "name": "Zach Carter", + "email": "zach@carter.name", + "url": "http://zaa.ch" + }, + "name": "@gerhobbelt/ebnf-parser", + "version": "0.6.1-205", "description": "A parser for BNF and EBNF grammars used by jison", - "main": "ebnf-parser.js", + "main": "dist/ebnf-parser-cjs-es5.js", + "module": "ebnf-parser.js", "scripts": { - "test": "make test" + "test": "make test", + "pub": "echo '### WARNING/NOTICE: publish from the jison monorepo! ###' && false" }, "repository": { "type": "git", - "url": "https://github.com/zaach/ebnf-parser.git" + "url": "https://github.com/GerHobbelt/ebnf-parser.git" }, "keywords": [ "bnf", @@ -17,11 +24,24 @@ "parser", "jison" ], - "author": "Zach Carter", "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "dependencies": { + "@gerhobbelt/lex-parser": "0.6.1-205", + "@gerhobbelt/xregexp": "3.2.0-22", + "jison-helpers-lib": "0.6.1-205" + }, "devDependencies": { - "jison": "git://github.com/zaach/jison.git#ef2647", - "lex-parser": "0.1.0", - "test": "*" + "babel-cli": "6.26.0", + "babel-preset-env": "1.6.1", + "babel-preset-modern-browsers": "10.0.1", + "chai": "4.1.2", + "globby": "6.1.0", + "jison-gho": "0.6.1-205", + "mocha": "4.0.1", + "rollup-plugin-node-resolve": "3.0.0", + "rollup": "0.50.0" } } diff --git a/parser.js b/parser.js new file mode 100644 index 0000000..93c3977 --- /dev/null +++ b/parser.js @@ -0,0 +1,7808 @@ + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + +/* + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +import XRegExp from '@gerhobbelt/xregexp'; // for helping out the `%options xregexp` in the lexer + import helpers from 'jison-helpers-lib'; + import fs from 'fs'; + import transform from './ebnf-transform'; + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + // helper: reconstruct the defaultActions[] table + function bda(s) { + var rv = {}; + var d = s.idx; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var j = d[i]; + rv[j] = g[i]; + } + return rv; + } + + + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... true + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... true + // assigns location: ................ true + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... true + // has error recovery: .............. true + // has error reporting: ............. true + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "%%": 14, + "(": 7, + ")": 8, + "*": 9, + "+": 11, + ":": 5, + ";": 4, + "=": 3, + "?": 10, + "ACTION": 15, + "ACTION_BODY": 43, + "ALIAS": 39, + "ARROW_ACTION": 42, + "CODE": 46, + "DEBUG": 19, + "EBNF": 20, + "EOF": 1, + "EOF_ID": 40, + "EPSILON": 38, + "ID": 24, + "IMPORT": 22, + "INCLUDE": 44, + "INIT_CODE": 23, + "INTEGER": 37, + "LEFT": 33, + "LEX_BLOCK": 17, + "NAME": 25, + "NONASSOC": 35, + "OPTIONS": 27, + "OPTIONS_END": 28, + "OPTION_STRING_VALUE": 29, + "OPTION_VALUE": 30, + "PARSER_TYPE": 32, + "PARSE_PARAM": 31, + "PATH": 45, + "PREC": 41, + "RIGHT": 34, + "START": 16, + "STRING": 26, + "TOKEN": 18, + "TOKEN_TYPE": 36, + "UNKNOWN_DECL": 21, + "action": 85, + "action_body": 86, + "action_comments_body": 87, + "action_ne": 84, + "associativity": 61, + "declaration": 51, + "declaration_list": 50, + "error": 2, + "expression": 79, + "extra_parser_module_code": 88, + "full_token_definitions": 63, + "grammar": 69, + "handle": 76, + "handle_action": 75, + "handle_list": 74, + "handle_sublist": 77, + "id": 83, + "id_list": 68, + "import_name": 53, + "import_path": 54, + "include_macro_code": 89, + "init_code_name": 52, + "module_code_chunk": 90, + "one_full_token": 64, + "operator": 60, + "option": 57, + "option_list": 56, + "optional_action_header_block": 49, + "optional_end_block": 48, + "optional_module_code_chunk": 91, + "optional_production_description": 73, + "optional_token_type": 65, + "options": 55, + "parse_params": 58, + "parser_type": 59, + "prec": 81, + "production": 71, + "production_id": 72, + "production_list": 70, + "spec": 47, + "suffix": 80, + "suffixed_expression": 78, + "symbol": 82, + "token_description": 67, + "token_list": 62, + "token_value": 66, + "{": 12, + "|": 6, + "}": 13 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "=", + 4: ";", + 5: ":", + 6: "|", + 7: "(", + 8: ")", + 9: "*", + 10: "?", + 11: "+", + 12: "{", + 13: "}", + 14: "%%", + 15: "ACTION", + 16: "START", + 17: "LEX_BLOCK", + 18: "TOKEN", + 19: "DEBUG", + 20: "EBNF", + 21: "UNKNOWN_DECL", + 22: "IMPORT", + 23: "INIT_CODE", + 24: "ID", + 25: "NAME", + 26: "STRING", + 27: "OPTIONS", + 28: "OPTIONS_END", + 29: "OPTION_STRING_VALUE", + 30: "OPTION_VALUE", + 31: "PARSE_PARAM", + 32: "PARSER_TYPE", + 33: "LEFT", + 34: "RIGHT", + 35: "NONASSOC", + 36: "TOKEN_TYPE", + 37: "INTEGER", + 38: "EPSILON", + 39: "ALIAS", + 40: "EOF_ID", + 41: "PREC", + 42: "ARROW_ACTION", + 43: "ACTION_BODY", + 44: "INCLUDE", + 45: "PATH", + 46: "CODE" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp({ + pop: u([ + s, + [47, 3], + 48, + 48, + s, + [49, 3], + s, + [50, 3], + s, + [51, 20], + s, + [52, 3], + 53, + 53, + 54, + 54, + s, + [55, 3], + 56, + 56, + s, + [57, 6], + 58, + 58, + 59, + 59, + 60, + 60, + s, + [61, 3], + 62, + 62, + 63, + 63, + s, + [64, 3], + 65, + s, + [65, 4, 1], + 68, + 69, + 70, + 70, + s, + [71, 3], + 72, + 72, + 73, + 73, + s, + [74, 4], + s, + [75, 3], + 76, + 76, + 77, + 77, + 78, + 78, + s, + [79, 5], + s, + [80, 4], + s, + [81, 3], + 82, + 82, + 83, + s, + [84, 4], + s, + [85, 3], + s, + [86, 5], + 87, + 87, + 88, + 88, + 89, + 89, + s, + [90, 3], + 91, + 91 +]), + rule: u([ + 5, + 5, + 3, + 0, + 2, + 0, + s, + [2, 3], + c, + [4, 3], + 1, + 1, + c, + [3, 3], + s, + [1, 6], + s, + [3, 5], + s, + [2, 3], + c, + [15, 9], + c, + [11, 4], + c, + [20, 7], + s, + [2, 4], + s, + [1, 3], + 2, + 1, + 2, + 2, + c, + [15, 3], + 0, + c, + [11, 7], + c, + [36, 4], + 3, + 3, + 1, + 0, + 3, + c, + [39, 4], + c, + [80, 4], + c, + [9, 3], + c, + [39, 4], + 3, + 3, + c, + [34, 5], + c, + [40, 5], + c, + [32, 3], + s, + [1, 3], + 0, + 0, + 1, + 5, + 4, + 4, + c, + [53, 3], + c, + [85, 4], + c, + [35, 3], + 0 +]) +}), +performAction: function parser__PerformAction(yyloc, yystate /* action[1] */, yysp, yyvstack, yylstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + + + switch (yystate) { +case 0: + /*! Production:: $accept : spec $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yylstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 1: + /*! Production:: spec : declaration_list "%%" grammar optional_end_block EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4]; + if (yyvstack[yysp - 1].trim() !== '') { + yy.addDeclaration(this.$, { include: yyvstack[yysp - 1] }); + } + return extend(this.$, yyvstack[yysp - 2]); + break; + +case 2: + /*! Production:: spec : declaration_list "%%" grammar error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 4]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate trailing code from the grammar rule set with a '%%' marker on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 3: + /*! Production:: spec : declaration_list error EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Maybe you did not correctly separate the parse 'header section' (token definitions, options, lexer spec, etc.) from the grammar rule set with a '%%' on an otherwise empty line? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 4: + /*! Production:: optional_end_block : %epsilon */ +case 100: + /*! Production:: suffix : %epsilon */ +case 116: + /*! Production:: action : %epsilon */ +case 117: + /*! Production:: action_body : %epsilon */ +case 132: + /*! Production:: optional_module_code_chunk : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ''; + break; + +case 5: + /*! Production:: optional_end_block : "%%" extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + The extra parser module code section (a.k.a. 'epilogue') does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = yyvstack[yysp]; + break; + +case 6: + /*! Production:: optional_action_header_block : %epsilon */ +case 10: + /*! Production:: declaration_list : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; + break; + +case 7: + /*! Production:: optional_action_header_block : optional_action_header_block ACTION */ +case 8: + /*! Production:: optional_action_header_block : optional_action_header_block include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + yy.addDeclaration(this.$, { actionInclude: yyvstack[yysp] }); + break; + +case 9: + /*! Production:: declaration_list : declaration_list declaration */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; yy.addDeclaration(this.$, yyvstack[yysp]); + break; + +case 11: + /*! Production:: declaration_list : declaration_list error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + declaration list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 12: + /*! Production:: declaration : START id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {start: yyvstack[yysp]}; + break; + +case 13: + /*! Production:: declaration : LEX_BLOCK */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {lex: {text: yyvstack[yysp], position: yylstack[yysp]}}; + break; + +case 14: + /*! Production:: declaration : operator */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {operator: yyvstack[yysp]}; + break; + +case 15: + /*! Production:: declaration : TOKEN full_token_definitions */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {token_list: yyvstack[yysp]}; + break; + +case 16: + /*! Production:: declaration : ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 17: + /*! Production:: declaration : include_macro_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + action header code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp])} + `); + } + this.$ = {include: yyvstack[yysp]}; + break; + +case 18: + /*! Production:: declaration : parse_params */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parseParams: yyvstack[yysp]}; + break; + +case 19: + /*! Production:: declaration : parser_type */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {parserType: yyvstack[yysp]}; + break; + +case 20: + /*! Production:: declaration : options */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: yyvstack[yysp]}; + break; + +case 21: + /*! Production:: declaration : DEBUG */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {options: [['debug', true]]}; + break; + +case 22: + /*! Production:: declaration : EBNF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + ebnf = true; + this.$ = {options: [['ebnf', true]]}; + break; + +case 23: + /*! Production:: declaration : UNKNOWN_DECL */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {unknownDecl: yyvstack[yysp]}; + break; + +case 24: + /*! Production:: declaration : IMPORT import_name import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {imports: {name: yyvstack[yysp - 1], path: yyvstack[yysp]}}; + break; + +case 25: + /*! Production:: declaration : IMPORT import_name error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + You did not specify a legal file path for the '%import' initialization code statement, which must have the format: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 26: + /*! Production:: declaration : IMPORT error import_path */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%import'-ed initialization code section must be qualified by a name, e.g. 'required' before the import path itself: + + %import qualifier_name file_path + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 27: + /*! Production:: declaration : INIT_CODE init_code_name action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + %code "${$init_code_name}" initialization section action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$ = { + initCode: { + qualifier: yyvstack[yysp - 1], + include: yyvstack[yysp] + } + }; + break; + +case 28: + /*! Production:: declaration : INIT_CODE error action_ne */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Each '%code' initialization code section must be qualified by a name, e.g. 'required' before the action code itself: + + %code qualifier_name {action code} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 29: + /*! Production:: declaration : START error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %start token error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 30: + /*! Production:: declaration : TOKEN error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %token definition list error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 31: + /*! Production:: declaration : IMPORT error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %import name or source filename missing maybe? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 32: + /*! Production:: init_code_name : ID */ +case 33: + /*! Production:: init_code_name : NAME */ +case 34: + /*! Production:: init_code_name : STRING */ +case 35: + /*! Production:: import_name : ID */ +case 36: + /*! Production:: import_name : STRING */ +case 37: + /*! Production:: import_path : ID */ +case 38: + /*! Production:: import_path : STRING */ +case 67: + /*! Production:: optional_token_type : TOKEN_TYPE */ +case 68: + /*! Production:: token_value : INTEGER */ +case 69: + /*! Production:: token_description : STRING */ +case 80: + /*! Production:: optional_production_description : STRING */ +case 95: + /*! Production:: expression : ID */ +case 101: + /*! Production:: suffix : "*" */ +case 102: + /*! Production:: suffix : "?" */ +case 103: + /*! Production:: suffix : "+" */ +case 107: + /*! Production:: symbol : id */ +case 108: + /*! Production:: symbol : STRING */ +case 109: + /*! Production:: id : ID */ +case 112: + /*! Production:: action_ne : ACTION */ +case 113: + /*! Production:: action_ne : include_macro_code */ +case 114: + /*! Production:: action : action_ne */ +case 118: + /*! Production:: action_body : action_comments_body */ +case 122: + /*! Production:: action_comments_body : ACTION_BODY */ +case 124: + /*! Production:: extra_parser_module_code : optional_module_code_chunk */ +case 128: + /*! Production:: module_code_chunk : CODE */ +case 131: + /*! Production:: optional_module_code_chunk : module_code_chunk */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 39: + /*! Production:: options : OPTIONS option_list OPTIONS_END */ +case 110: + /*! Production:: action_ne : "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + break; + +case 40: + /*! Production:: options : OPTIONS error OPTIONS_END */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options ill defined / error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2], yylstack[yysp])} + `); + break; + +case 41: + /*! Production:: options : OPTIONS error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %options don't seem terminated? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 42: + /*! Production:: option_list : option_list option */ +case 59: + /*! Production:: token_list : token_list symbol */ +case 70: + /*! Production:: id_list : id_list id */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; this.$.push(yyvstack[yysp]); + break; + +case 43: + /*! Production:: option_list : option */ +case 60: + /*! Production:: token_list : symbol */ +case 71: + /*! Production:: id_list : id */ +case 83: + /*! Production:: handle_list : handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp]]; + break; + +case 44: + /*! Production:: option : NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp], true]; + break; + +case 45: + /*! Production:: option : NAME "=" OPTION_STRING_VALUE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 46: + /*! Production:: option : NAME "=" OPTION_VALUE */ +case 47: + /*! Production:: option : NAME "=" NAME */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], parseValue(yyvstack[yysp])]; + break; + +case 48: + /*! Production:: option : NAME "=" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value error for ${yyvstack[yysp - 2]}? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 49: + /*! Production:: option : NAME error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + named %option value assignment error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 50: + /*! Production:: parse_params : PARSE_PARAM token_list */ +case 52: + /*! Production:: parser_type : PARSER_TYPE symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp]; + break; + +case 51: + /*! Production:: parse_params : PARSE_PARAM error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parse-params declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 53: + /*! Production:: parser_type : PARSER_TYPE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %parser-type declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 54: + /*! Production:: operator : associativity token_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 1]]; this.$.push.apply(this.$, yyvstack[yysp]); + break; + +case 55: + /*! Production:: operator : associativity error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + operator token list error in an associativity statement? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 56: + /*! Production:: associativity : LEFT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'left'; + break; + +case 57: + /*! Production:: associativity : RIGHT */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'right'; + break; + +case 58: + /*! Production:: associativity : NONASSOC */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = 'nonassoc'; + break; + +case 61: + /*! Production:: full_token_definitions : optional_token_type id_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var rv = []; + var lst = yyvstack[yysp]; + for (var i = 0, len = lst.length; i < len; i++) { + var id = lst[i]; + var m = {id: id}; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + rv.push(m); + } + this.$ = rv; + break; + +case 62: + /*! Production:: full_token_definitions : optional_token_type one_full_token */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var m = yyvstack[yysp]; + if (yyvstack[yysp - 1]) { + m.type = yyvstack[yysp - 1]; + } + this.$ = [m]; + break; + +case 63: + /*! Production:: one_full_token : id token_value token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 2], + value: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 64: + /*! Production:: one_full_token : id token_description */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + description: yyvstack[yysp] + }; + break; + +case 65: + /*! Production:: one_full_token : id token_value */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { + id: yyvstack[yysp - 1], + value: yyvstack[yysp] + }; + break; + +case 66: + /*! Production:: optional_token_type : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = false; + break; + +case 72: + /*! Production:: grammar : optional_action_header_block production_list */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.grammar = yyvstack[yysp]; + break; + +case 73: + /*! Production:: production_list : production_list production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + if (yyvstack[yysp][0] in this.$) { + this.$[yyvstack[yysp][0]] = this.$[yyvstack[yysp][0]].concat(yyvstack[yysp][1]); + } else { + this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + } + break; + +case 74: + /*! Production:: production_list : production */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = {}; this.$[yyvstack[yysp][0]] = yyvstack[yysp][1]; + break; + +case 75: + /*! Production:: production : production_id handle_list ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp - 2], yyvstack[yysp - 1]]; + break; + +case 76: + /*! Production:: production : production_id error ";" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 1], yylstack[yysp - 2])} + `); + break; + +case 77: + /*! Production:: production : production_id error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule production declaration error: did you terminate the rule production set with a semicolon? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 78: + /*! Production:: production_id : id optional_production_description ":" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + + // TODO: carry rule description support into the parser generator... + break; + +case 79: + /*! Production:: production_id : id optional_production_description error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule id should be followed by a colon, but that one seems missing? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 81: + /*! Production:: optional_production_description : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = undefined; + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + break; + +case 82: + /*! Production:: handle_list : handle_list "|" handle_action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp]); + break; + +case 84: + /*! Production:: handle_list : handle_list "|" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + rule alternative production declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 85: + /*! Production:: handle_list : handle_list ":" error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + multiple alternative rule productions should be separated by a '|' pipe character, not a ':' colon! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 86: + /*! Production:: handle_action : handle prec action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [(yyvstack[yysp - 2].length ? yyvstack[yysp - 2].join(' ') : '')]; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (yyvstack[yysp - 1]) { + if (yyvstack[yysp - 2].length === 0) { + yyparser.yyError(rmCommonWS` + You cannot specify a precedence override for an epsilon (a.k.a. empty) rule! + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp - 2])} + `); + } + this.$.push(yyvstack[yysp - 1]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 87: + /*! Production:: handle_action : EPSILON action */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = ['']; + if (yyvstack[yysp]) { + var rv = checkActionBlock(yyvstack[yysp], yylstack[yysp]); + if (rv) { + yyparser.yyError(rmCommonWS` + epsilon production rule action code block does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + this.$.push(yyvstack[yysp]); + } + if (this.$.length === 1) { + this.$ = this.$[0]; + } + break; + +case 88: + /*! Production:: handle_action : EPSILON error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %epsilon rule action declaration error? + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 89: + /*! Production:: handle : handle suffixed_expression */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1]; + this.$.push(yyvstack[yysp]); + break; + +case 90: + /*! Production:: handle : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = []; + break; + +case 91: + /*! Production:: handle_sublist : handle_sublist "|" handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2]; + this.$.push(yyvstack[yysp].join(' ')); + break; + +case 92: + /*! Production:: handle_sublist : handle */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = [yyvstack[yysp].join(' ')]; + break; + +case 93: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + "[" + yyvstack[yysp] + "]"; + break; + +case 94: + /*! Production:: suffixed_expression : expression suffix */ +case 123: + /*! Production:: action_comments_body : action_comments_body ACTION_BODY */ +case 129: + /*! Production:: module_code_chunk : module_code_chunk CODE */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 96: + /*! Production:: expression : EOF_ID */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$end'; + break; + +case 97: + /*! Production:: expression : STRING */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + // Re-encode the string *anyway* as it will + // be made part of the rule rhs a.k.a. production (type: *string*) again and we want + // to be able to handle all tokens, including *significant space* + // encoded as literal tokens in a grammar such as this: `rule: A ' ' B`. + this.$ = dquote(yyvstack[yysp]); + break; + +case 98: + /*! Production:: expression : "(" handle_sublist ")" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '(' + yyvstack[yysp - 1].join(' | ') + ')'; + break; + +case 99: + /*! Production:: expression : "(" handle_sublist error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a grammar rule sublist in '( ... )' brackets. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 104: + /*! Production:: prec : PREC symbol */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = { prec: yyvstack[yysp] }; + break; + +case 105: + /*! Production:: prec : PREC error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + %prec precedence override declaration error? + + Erroneous precedence declaration: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + break; + +case 106: + /*! Production:: prec : %epsilon */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(null, null, null, null, true); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = null; + break; + +case 111: + /*! Production:: action_ne : "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 2]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly bracket a parser rule action block in curly braces: '{ ... }'. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 115: + /*! Production:: action : ARROW_ACTION */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = '$$ = ' + yyvstack[yysp]; + break; + +case 119: + /*! Production:: action_body : action_body "{" action_body "}" action_comments_body */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 4, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 4] + yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 120: + /*! Production:: action_body : action_body "{" action_body "}" */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 3] + yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 121: + /*! Production:: action_body : action_body "{" action_body error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 3]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 3, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + Seems you did not correctly match curly braces '{ ... }' in a parser rule action block. + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 2])} + `); + break; + +case 125: + /*! Production:: extra_parser_module_code : optional_module_code_chunk include_macro_code extra_parser_module_code */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 2, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + this.$ = yyvstack[yysp - 2] + yyvstack[yysp - 1] + yyvstack[yysp]; + break; + +case 126: + /*! Production:: include_macro_code : INCLUDE PATH */ + + // default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-): + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,VU,-,LT,LA,-,-) + + + var fileContent = fs.readFileSync(yyvstack[yysp], { encoding: 'utf-8' }); + var rv = checkActionBlock(fileContent); + if (rv) { + yyparser.yyError(rmCommonWS` + included action code file "${$PATH}" does not compile: ${rv} + + Erroneous area: + ${yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])} + `); + } + // And no, we don't support nested '%include': + this.$ = '\n// Included by Jison: ' + yyvstack[yysp] + ':\n\n' + fileContent + '\n\n// End Of Include by Jison: ' + yyvstack[yysp] + '\n\n'; + break; + +case 127: + /*! Production:: include_macro_code : INCLUDE error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp - 1]; + this._$ = yyparser.yyMergeLocationInfo(yysp - 1, yysp); + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + yyparser.yyError(rmCommonWS` + %include MUST be followed by a valid file path. + + Erroneous path: + ` + yylexer.prettyPrintRange(yylstack[yysp], yylstack[yysp - 1])); + break; + +case 130: + /*! Production:: module_code_chunk : error */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-): + this.$ = yyvstack[yysp]; + this._$ = yylstack[yysp]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,LT,LA,-,-) + + + // TODO ... + yyparser.yyError(rmCommonWS` + module code declaration error? + + Erroneous area: + ` + yylexer.prettyPrintRange(yylstack[yysp])); + break; + +case 164: // === NO_ACTION[1] :: ensures that anyone (but us) using this new state will fail dramatically! + // error recovery reduction action (action generated by jison, + // using the user-specified `%code error_recovery_reduction` %{...%} + // code chunk below. + + + break; + +} +}, +table: bt({ + len: u([ + 20, + 1, + 25, + 5, + 19, + 18, + 3, + 18, + 18, + 5, + s, + [18, 8], + 4, + 5, + 6, + 2, + s, + [6, 4, -1], + 3, + 3, + 4, + 8, + 1, + 18, + 18, + 26, + c, + [18, 3], + 1, + 4, + 21, + 3, + 3, + 5, + 5, + s, + [3, 3], + 22, + 18, + 20, + 25, + 25, + 24, + 24, + 22, + s, + [18, 3], + 3, + 19, + 2, + 4, + 1, + 1, + 7, + 7, + c, + [40, 3], + 17, + 4, + 20, + 18, + 23, + s, + [18, 6], + 6, + 21, + 21, + 18, + 20, + 18, + 2, + 18, + 4, + 2, + s, + [1, 3], + s, + [3, 4], + 4, + 3, + 5, + 3, + 15, + 11, + 2, + 2, + 19, + 20, + 18, + c, + [104, 3], + 4, + 4, + s, + [2, 4], + 7, + 3, + 4, + 16, + 1, + 4, + 10, + 14, + c, + [122, 3], + 18, + 18, + 9, + s, + [3, 4], + 14, + 14, + 18, + 21, + 21, + 6, + 4, + c, + [50, 5], + 7, + 7, + s, + [15, 4], + 3, + 9, + 3, + 14, + 18, + 18, + 8, + 5, + 3, + 9, + 4 +]), + symbol: u([ + 2, + s, + [14, 10, 1], + 27, + s, + [31, 5, 1], + 44, + 47, + 50, + 1, + c, + [21, 18], + 51, + 55, + s, + [58, 4, 1], + 89, + 15, + 24, + 44, + 49, + 69, + c, + [31, 19], + c, + [18, 19], + 24, + 83, + c, + [39, 38], + 36, + 63, + 65, + c, + [41, 37], + c, + [18, 108], + 24, + 26, + 53, + 2, + 24, + 25, + 26, + 52, + c, + [9, 3], + 62, + 82, + 83, + 2, + 45, + c, + [8, 7], + 24, + 26, + c, + [5, 3], + 25, + 56, + 57, + c, + [9, 3], + c, + [3, 6], + c, + [266, 3], + 48, + c, + [275, 3], + 70, + 71, + 72, + 83, + 89, + c, + [278, 38], + 4, + 5, + 6, + 12, + s, + [14, 11, 1], + 26, + c, + [24, 6], + 37, + 42, + c, + [152, 37], + 24, + 64, + 68, + 83, + 24, + c, + [119, 3], + 54, + c, + [27, 11], + c, + [67, 8], + 44, + 54, + c, + [147, 6], + 12, + 15, + 44, + 84, + 89, + c, + [5, 8], + c, + [3, 6], + c, + [46, 20], + c, + [201, 3], + c, + [113, 28], + c, + [40, 9], + c, + [177, 23], + c, + [176, 3], + c, + [25, 24], + 1, + c, + [26, 4], + c, + [25, 11], + c, + [73, 7], + 46, + c, + [24, 24], + c, + [158, 51], + c, + [18, 25], + 25, + 28, + 57, + c, + [21, 12], + 28, + c, + [22, 8], + 2, + 3, + 25, + 28, + s, + [1, 3], + 2, + 44, + 46, + 88, + 90, + 91, + c, + [425, 3], + 24, + c, + [433, 3], + c, + [440, 3], + c, + [3, 3], + c, + [13, 4], + c, + [153, 4], + 7, + 12, + 15, + 24, + 26, + 38, + 40, + 41, + 42, + 44, + 74, + 75, + 76, + 2, + 5, + 26, + 73, + c, + [151, 12], + c, + [94, 7], + c, + [307, 38], + 37, + 44, + 66, + 67, + c, + [685, 109], + 12, + 13, + 43, + 86, + 87, + c, + [349, 14], + c, + [445, 11], + c, + [84, 46], + c, + [504, 10], + c, + [348, 19], + c, + [58, 19], + 25, + 29, + 30, + c, + [346, 5], + 1, + 44, + 89, + 1, + c, + [483, 3], + c, + [3, 6], + c, + [339, 3], + c, + [121, 3], + c, + [496, 3], + c, + [8, 5], + c, + [349, 8], + c, + [348, 4], + 78, + 79, + 81, + c, + [568, 5], + 15, + 42, + 44, + 84, + 85, + 89, + 2, + 5, + 2, + 5, + c, + [359, 19], + c, + [19, 11], + c, + [142, 8], + c, + [337, 30], + c, + [180, 26], + c, + [284, 3], + c, + [287, 4], + c, + [4, 4], + 25, + 28, + 25, + 28, + c, + [4, 4], + c, + [517, 8], + c, + [168, 6], + c, + [507, 14], + c, + [506, 3], + c, + [189, 7], + c, + [162, 8], + s, + [4, 5, 1], + c, + [190, 8], + c, + [1024, 6], + s, + [4, 9, 1], + c, + [22, 3], + s, + [39, 4, 1], + 44, + 80, + c, + [19, 18], + c, + [18, 37], + c, + [16, 3], + c, + [88, 3], + 76, + 77, + c, + [292, 6], + c, + [3, 6], + c, + [144, 14], + c, + [14, 15], + c, + [480, 39], + c, + [21, 21], + c, + [549, 6], + c, + [6, 3], + 1, + c, + [111, 12], + c, + [234, 7], + c, + [7, 7], + c, + [238, 10], + c, + [179, 11], + c, + [15, 40], + 6, + 8, + c, + [209, 7], + 78, + 79, + c, + [374, 4], + c, + [313, 14], + c, + [271, 43], + c, + [164, 4], + c, + [169, 4], + c, + [78, 12], + 43 +]), + type: u([ + s, + [2, 18], + 0, + 0, + 1, + c, + [21, 20], + s, + [0, 5], + c, + [10, 5], + s, + [2, 39], + c, + [40, 41], + c, + [41, 40], + s, + [2, 108], + c, + [148, 5], + c, + [239, 6], + c, + [159, 6], + c, + [253, 10], + c, + [176, 14], + c, + [36, 7], + c, + [197, 102], + c, + [103, 7], + c, + [108, 21], + c, + [21, 10], + c, + [423, 36], + c, + [373, 149], + c, + [158, 67], + c, + [57, 32], + c, + [322, 8], + c, + [98, 26], + c, + [489, 7], + c, + [721, 173], + c, + [462, 131], + c, + [130, 37], + c, + [375, 11], + c, + [818, 45], + c, + [223, 79], + c, + [124, 24], + c, + [986, 15], + c, + [38, 19], + c, + [57, 20], + c, + [157, 62], + c, + [443, 106], + c, + [106, 103], + c, + [103, 62], + c, + [1248, 16], + c, + [78, 6] +]), + state: u([ + 1, + 2, + 5, + 14, + 12, + 13, + 8, + 20, + 11, + 29, + 28, + 31, + 34, + 36, + 38, + 42, + 47, + 49, + 50, + 54, + 49, + 50, + 56, + 50, + 58, + 60, + 62, + 65, + 68, + 69, + 70, + 67, + 72, + 71, + 73, + 74, + 78, + 79, + 82, + 83, + 82, + 84, + 50, + 84, + 50, + 86, + 92, + 94, + 93, + 97, + 69, + 70, + 98, + 100, + 101, + 103, + 105, + 106, + 107, + 110, + 111, + 117, + 124, + 126, + 123, + 133, + 131, + 82, + 137, + 142, + 94, + 93, + 143, + 101, + 133, + 146, + 82, + 147, + 50, + 149, + 154, + 153, + 155, + 111, + 124, + 126, + 162, + 163, + 124, + 126 +]), + mode: u([ + s, + [2, 18], + s, + [1, 18], + c, + [21, 4], + s, + [2, 36], + c, + [42, 5], + c, + [38, 34], + c, + [77, 38], + s, + [2, 108], + s, + [1, 20], + c, + [30, 15], + c, + [134, 100], + c, + [106, 4], + c, + [335, 26], + c, + [151, 16], + c, + [376, 48], + c, + [347, 120], + c, + [63, 75], + c, + [13, 9], + c, + [23, 4], + c, + [4, 3], + c, + [587, 6], + c, + [427, 12], + c, + [9, 15], + c, + [335, 13], + c, + [389, 39], + c, + [45, 43], + c, + [509, 77], + c, + [762, 121], + c, + [129, 9], + c, + [756, 14], + c, + [334, 14], + c, + [41, 6], + c, + [367, 5], + c, + [784, 37], + c, + [208, 63], + c, + [1142, 20], + c, + [1081, 10], + c, + [487, 14], + c, + [22, 9], + c, + [151, 17], + c, + [221, 10], + c, + [803, 156], + c, + [318, 61], + c, + [216, 50], + c, + [457, 7], + c, + [455, 38], + c, + [123, 34], + c, + [1206, 8], + 1 +]), + goto: u([ + s, + [10, 18], + 4, + 3, + 10, + 6, + 7, + 9, + s, + [15, 5, 1], + 24, + 22, + 23, + 25, + 26, + 27, + 21, + s, + [6, 3], + 30, + s, + [11, 18], + s, + [9, 18], + 32, + 33, + s, + [13, 18], + s, + [14, 18], + 35, + 66, + 37, + s, + [16, 18], + s, + [17, 18], + s, + [18, 18], + s, + [19, 18], + s, + [20, 18], + s, + [21, 18], + s, + [22, 18], + s, + [23, 18], + 39, + 40, + 41, + s, + [43, 4, 1], + 48, + 33, + 51, + 53, + 52, + 55, + 33, + 51, + 57, + 33, + 51, + 59, + 61, + s, + [56, 3], + s, + [57, 3], + s, + [58, 3], + 4, + 63, + 64, + 66, + 33, + 21, + 3, + s, + [12, 18], + s, + [29, 18], + s, + [109, 26], + s, + [15, 18], + s, + [30, 18], + 33, + 67, + 75, + 76, + 77, + s, + [31, 11], + c, + [13, 9], + s, + [35, 3], + s, + [36, 3], + 80, + 81, + 21, + c, + [3, 3], + s, + [32, 3], + s, + [33, 3], + s, + [34, 3], + s, + [54, 11], + 33, + 51, + s, + [54, 7], + s, + [55, 18], + s, + [60, 20], + s, + [107, 25], + s, + [108, 25], + s, + [126, 24], + s, + [127, 24], + s, + [50, 11], + 33, + 51, + s, + [50, 7], + s, + [51, 18], + s, + [52, 18], + s, + [53, 18], + 61, + 85, + s, + [41, 12], + 87, + s, + [41, 6], + 43, + 43, + 89, + 88, + 44, + 44, + 90, + 91, + 132, + 96, + 132, + 95, + s, + [72, 3], + 33, + s, + [7, 3], + s, + [8, 3], + s, + [74, 4], + 99, + s, + [90, 8], + 102, + s, + [90, 4], + 81, + 81, + 104, + s, + [61, 11], + 33, + s, + [61, 7], + s, + [62, 18], + s, + [71, 12], + 109, + s, + [71, 6], + 108, + 71, + s, + [24, 18], + s, + [25, 18], + s, + [37, 18], + s, + [38, 18], + s, + [26, 18], + s, + [27, 18], + s, + [117, 3], + s, + [112, 22], + s, + [113, 21], + s, + [28, 18], + s, + [59, 20], + s, + [39, 18], + 42, + 42, + s, + [40, 18], + 116, + 115, + 113, + 114, + 49, + 49, + 1, + 2, + 5, + 124, + 21, + 131, + 131, + 118, + s, + [128, 3], + s, + [130, 3], + s, + [73, 4], + 119, + 121, + 120, + 77, + 77, + 122, + 77, + 77, + s, + [83, 3], + s, + [106, 3], + 130, + 106, + 106, + 127, + 129, + 128, + 125, + 106, + 106, + 132, + s, + [116, 3], + 80, + 81, + 134, + 21, + 136, + 135, + 80, + 80, + s, + [70, 19], + s, + [65, 11], + 109, + s, + [65, 7], + s, + [64, 18], + s, + [68, 19], + s, + [69, 18], + 139, + 140, + 138, + s, + [118, 3], + 141, + s, + [122, 4], + 45, + 45, + 46, + 46, + 47, + 47, + 48, + 48, + c, + [494, 4], + s, + [129, 3], + s, + [75, 4], + 144, + c, + [487, 13], + 145, + s, + [76, 4], + c, + [153, 7], + s, + [89, 14], + 148, + 33, + 51, + s, + [100, 6], + 150, + 151, + 152, + s, + [100, 9], + s, + [95, 18], + s, + [96, 18], + s, + [97, 18], + s, + [90, 7], + s, + [87, 3], + s, + [88, 3], + s, + [114, 3], + s, + [115, 3], + s, + [78, 14], + s, + [79, 14], + s, + [63, 18], + s, + [110, 21], + s, + [111, 21], + c, + [526, 4], + s, + [123, 4], + 125, + s, + [82, 3], + s, + [84, 3], + s, + [85, 3], + s, + [86, 3], + s, + [104, 7], + s, + [105, 7], + s, + [94, 10], + 156, + s, + [94, 4], + s, + [101, 15], + s, + [102, 15], + s, + [103, 15], + 158, + 159, + 157, + 92, + 92, + 130, + 92, + c, + [465, 3], + 161, + 140, + 160, + s, + [93, 14], + s, + [98, 18], + s, + [99, 18], + s, + [90, 7], + s, + [120, 3], + 112, + s, + [121, 3], + 91, + 91, + 130, + 91, + c, + [74, 3], + s, + [119, 3], + 141 +]) +}), +defaultActions: bda({ + idx: u([ + 0, + 3, + 5, + 7, + 8, + s, + [10, 8, 1], + 25, + 26, + 27, + s, + [30, 6, 1], + 37, + 40, + 41, + 44, + 45, + 46, + s, + [48, 6, 1], + 55, + 56, + 57, + 60, + 66, + 67, + 68, + 72, + s, + [74, 6, 1], + s, + [81, 7, 1], + s, + [89, 4, 1], + 95, + 96, + 97, + 100, + 104, + 105, + 107, + 108, + 109, + s, + [112, 5, 1], + 118, + 119, + 122, + 124, + s, + [127, 13, 1], + s, + [141, 8, 1], + 150, + 151, + 152, + s, + [156, 4, 1], + 161 +]), + goto: u([ + 10, + 6, + 9, + 13, + 14, + s, + [16, 8, 1], + 56, + 57, + 58, + 3, + 12, + 29, + 109, + 15, + 30, + 67, + 35, + 36, + 32, + 33, + 34, + 55, + 60, + 107, + 108, + 126, + 127, + 51, + 52, + 53, + 43, + 7, + 8, + 74, + 62, + 24, + 25, + 37, + 38, + 26, + 27, + 112, + 113, + 28, + 59, + 39, + 42, + 40, + 49, + 1, + 2, + 5, + 128, + 130, + 73, + 83, + 80, + 70, + 64, + 68, + 69, + 122, + s, + [45, 4, 1], + 129, + 75, + 76, + 89, + 95, + 96, + 97, + 90, + 87, + 88, + 114, + 115, + 78, + 79, + 63, + 110, + 111, + 123, + 125, + 82, + 84, + 85, + 86, + 104, + 105, + 101, + 102, + 103, + 93, + 98, + 99, + 90, + 121 +]) +}), +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + var lstack = new Array(128); // location stack + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + var yyloc; + + var yylineno; + + + var symbol = 0; + var preErrorSymbol = 0; + var lastEofErrorStateDepth = 0; + var recoveringErrorInfo = null; + var recovering = 0; // (only used when the grammar contains error recovery rules) + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = (this.options.errorRecoveryTokenDiscardCount | 0) || 3; + var NO_ACTION = [0, 164 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + this.yyGetErrorInfoTrack = function yyGetErrorInfoTrack() { + return recoveringErrorInfo; + }; + + + // shallow clone objects, straight copy of simple `src` values + // e.g. `lexer.yytext` MAY be a complex value object, + // rather than a simple string/value. + function shallow_copy(src) { + if (typeof src === 'object') { + var dst = {}; + for (var k in src) { + if (Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + return dst; + } + return src; + } + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + function copy_yylloc(loc) { + var rv = shallow_copy(loc); + if (rv && rv.range) { + rv.range = rv.range.slice(0); + } + return rv; + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + // *Always* setup `yyError`, `YYRECOVERING`, `yyErrOk` and `yyClearIn` functions as it is paramount + // to have *their* closure match ours -- if we only set them up once, + // any subsequent `parse()` runs will fail in very obscure ways when + // these functions are invoked in the user action code block(s) as + // their closure will still refer to the `parse()` instance which set + // them up. Hence we MUST set them up at the start of every `parse()` run! + if (this.yyError) { + this.yyError = function yyError(str /*, ...args */) { + + + + + + + + + + + + var error_rule_depth = (this.options.parserErrorsAreRecoverable ? locateNearestErrorRecoveryRule(state) : -1); + var expected = this.collect_expected_token_set(state); + var hash = this.constructParseErrorInfo(str, null, expected, (error_rule_depth >= 0)); + // append to the old one? + if (recoveringErrorInfo) { + var esp = recoveringErrorInfo.info_stack_pointer; + + recoveringErrorInfo.symbol_stack[esp] = symbol; + var v = this.shallowCopyErrorInfo(hash); + v.yyError = true; + v.errorRuleDepth = error_rule_depth; + v.recovering = recovering; + // v.stackSampleLength = error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH; + + recoveringErrorInfo.value_stack[esp] = v; + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + } else { + recoveringErrorInfo = this.shallowCopyErrorInfo(hash); + recoveringErrorInfo.yyError = true; + recoveringErrorInfo.errorRuleDepth = error_rule_depth; + recoveringErrorInfo.recovering = recovering; + } + + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + if (args.length) { + hash.extra_error_attributes = args; + } + + var r = this.parseError(str, hash, this.JisonParserError); + return r; + }; + } + + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + lstack.length = 0; + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + for (var i = this.__error_recovery_infos.length - 1; i >= 0; i--) { + var el = this.__error_recovery_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_recovery_infos.length = 0; + + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + recoveringErrorInfo = undefined; + } + + + } + + return resultValue; + }; + + // merge yylloc info into a new yylloc instance. + // + // `first_index` and `last_index` MAY be UNDEFINED/NULL or these are indexes into the `lstack[]` location stack array. + // + // `first_yylloc` and `last_yylloc` MAY be UNDEFINED/NULL or explicit (custom or regular) `yylloc` instances, in which + // case these override the corresponding first/last indexes. + // + // `dont_look_back` is an optional flag (default: FALSE), which instructs this merge operation NOT to search + // through the parse location stack for a location, which would otherwise be used to construct the new (epsilon!) + // yylloc info. + // + // Note: epsilon rule's yylloc situation is detected by passing both `first_index` and `first_yylloc` as UNDEFINED/NULL. + this.yyMergeLocationInfo = function parser_yyMergeLocationInfo(first_index, last_index, first_yylloc, last_yylloc, dont_look_back) { + var i1 = first_index | 0, + i2 = last_index | 0; + var l1 = first_yylloc, + l2 = last_yylloc; + var rv; + + // rules: + // - first/last yylloc entries override first/last indexes + + if (!l1) { + if (first_index != null) { + for (var i = i1; i <= i2; i++) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + } + + if (!l2) { + if (last_index != null) { + for (var i = i2; i >= i1; i--) { + l2 = lstack[i]; + if (l2) { + break; + } + } + } + } + + // - detect if an epsilon rule is being processed and act accordingly: + if (!l1 && first_index == null) { + // epsilon rule span merger. With optional look-ahead in l2. + if (!dont_look_back) { + for (var i = (i1 || sp) - 1; i >= 0; i--) { + l1 = lstack[i]; + if (l1) { + break; + } + } + } + if (!l1) { + if (!l2) { + // when we still don't have any valid yylloc info, we're looking at an epsilon rule + // without look-ahead and no preceding terms and/or `dont_look_back` set: + // in that case we ca do nothing but return NULL/UNDEFINED: + return undefined; + } else { + // shallow-copy L2: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l2); + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + return rv; + } + } else { + // shallow-copy L1, then adjust first col/row 1 column past the end. + rv = shallow_copy(l1); + rv.first_line = rv.last_line; + rv.first_column = rv.last_column; + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + rv.range[0] = rv.range[1]; + } + + if (l2) { + // shallow-mixin L2, then adjust last col/row accordingly. + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + return rv; + } + } + + if (!l1) { + l1 = l2; + l2 = null; + } + if (!l1) { + return undefined; + } + + // shallow-copy L1|L2, before we try to adjust the yylloc values: after all, we MAY be looking + // at unconventional yylloc info objects... + rv = shallow_copy(l1); + + // first_line: ..., + // first_column: ..., + // last_line: ..., + // last_column: ..., + if (rv.range) { + // shallow copy the yylloc ranges info to prevent us from modifying the original arguments' entries: + rv.range = rv.range.slice(0); + } + + if (l2) { + shallow_copy_noclobber(rv, l2); + rv.last_line = l2.last_line; + rv.last_column = l2.last_column; + if (rv.range && l2.range) { + rv.range[1] = l2.range[1]; + } + } + + return rv; + }; + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + loc: copy_yylloc(lexer.yylloc), + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + location_stack: lstack, + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + // clone some parts of the (possibly enhanced!) errorInfo object + // to give them some persistence. + this.shallowCopyErrorInfo = function parser_shallowCopyErrorInfo(p) { + var rv = shallow_copy(p); + + // remove the large parts which can only cause cyclic references + // and are otherwise available from the parser kernel anyway. + delete rv.sharedState_yy; + delete rv.parser; + delete rv.lexer; + + // lexer.yytext MAY be a complex value object, rather than a simple string/value: + rv.value = shallow_copy(rv.value); + + // yylloc info: + rv.loc = copy_yylloc(rv.loc); + + // the 'expected' set won't be modified, so no need to clone it: + //rv.expected = rv.expected.slice(0); + + //symbol stack is a simple array: + rv.symbol_stack = rv.symbol_stack.slice(0); + // ditto for state stack: + rv.state_stack = rv.state_stack.slice(0); + // clone the yylloc's in the location stack?: + rv.location_stack = rv.location_stack.map(copy_yylloc); + // and the value stack may carry both simple and complex values: + // shallow-copy the latter. + rv.value_stack = rv.value_stack.map(shallow_copy); + + // and we don't bother with the sharedState_yy reference: + //delete rv.yy; + + // now we prepare for tracking the COMBINE actions + // in the error recovery code path: + // + // as we want to keep the maximum error info context, we + // *scan* the state stack to find the first *empty* slot. + // This position will surely be AT OR ABOVE the current + // stack pointer, but we want to keep the 'used but discarded' + // part of the parse stacks *intact* as those slots carry + // error context that may be useful when you want to produce + // very detailed error diagnostic reports. + // + // ### Purpose of each stack pointer: + // + // - stack_pointer: points at the top of the parse stack + // **as it existed at the time of the error + // occurrence, i.e. at the time the stack + // snapshot was taken and copied into the + // errorInfo object.** + // - base_pointer: the bottom of the **empty part** of the + // stack, i.e. **the start of the rest of + // the stack space /above/ the existing + // parse stack. This section will be filled + // by the error recovery process as it + // travels the parse state machine to + // arrive at the resolving error recovery rule.** + // - info_stack_pointer: + // this stack pointer points to the **top of + // the error ecovery tracking stack space**, i.e. + // this stack pointer takes up the role of + // the `stack_pointer` for the error recovery + // process. Any mutations in the **parse stack** + // are **copy-appended** to this part of the + // stack space, keeping the bottom part of the + // stack (the 'snapshot' part where the parse + // state at the time of error occurrence was kept) + // intact. + // - root_failure_pointer: + // copy of the `stack_pointer`... + // + for (var i = rv.stack_pointer; typeof rv.state_stack[i] !== 'undefined'; i++) { + // empty + } + rv.base_pointer = i; + rv.info_stack_pointer = i; + + rv.root_failure_pointer = rv.stack_pointer; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_recovery_infos.push(rv); + + return rv; + }; + + function getNonTerminalFromCode(symbol) { + var tokenName = self.getSymbolName(symbol); + if (!tokenName) { + tokenName = symbol; + } + return tokenName; + } + + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + // Return the rule stack depth where the nearest error rule can be found. + // Return -1 when no error recovery rule was found. + function locateNearestErrorRecoveryRule(state) { + var stack_probe = sp - 1; + var depth = 0; + + // try to recover from error + for (;;) { + // check for error recovery rule in this state + + + + + + + + + + var t = table[state][TERROR] || NO_ACTION; + if (t[0]) { + // We need to make sure we're not cycling forever: + // once we hit EOF, even when we `yyerrok()` an error, we must + // prevent the core from running forever, + // e.g. when parent rules are still expecting certain input to + // follow after this, for example when you handle an error inside a set + // of braces which are matched by a parent rule in your grammar. + // + // Hence we require that every error handling/recovery attempt + // *after we've hit EOF* has a diminishing state stack: this means + // we will ultimately have unwound the state stack entirely and thus + // terminate the parse in a controlled fashion even when we have + // very complex error/recovery code interplay in the core + user + // action code blocks: + + + + + + + + + + if (symbol === EOF) { + if (!lastEofErrorStateDepth) { + lastEofErrorStateDepth = sp - 1 - depth; + } else if (lastEofErrorStateDepth <= sp - 1 - depth) { + + + + + + + + + + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + continue; + } + } + return depth; + } + if (state === 0 /* $accept rule */ || stack_probe < 1) { + + + + + + + + + + return -1; // No suitable error recovery rule available. + } + --stack_probe; // popStack(1): [symbol, action] + state = sstack[stack_probe]; + ++depth; + } + } + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + yyloc = lexer.yylloc; + lstack[sp] = yyloc; + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + // first see if there's any chance at hitting an error recovery rule: + var error_rule_depth = locateNearestErrorRecoveryRule(state); + var errStr = null; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + if (!recovering) { + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + + p = this.constructParseErrorInfo(errStr, null, expected, (error_rule_depth >= 0)); + + // cleanup the old one before we start the new error info track: + if (recoveringErrorInfo && typeof recoveringErrorInfo.destroy === 'function') { + recoveringErrorInfo.destroy(); + } + recoveringErrorInfo = this.shallowCopyErrorInfo(p); + + r = this.parseError(p.errStr, p, this.JisonParserError); + + + + + + + + + + // Protect against overly blunt userland `parseError` code which *sets* + // the `recoverable` flag without properly checking first: + // we always terminate the parse when there's no recovery rule available anyhow! + if (!p.recoverable || error_rule_depth < 0) { + retval = r; + break; + } else { + // TODO: allow parseError callback to edit symbol and or state at the start of the error recovery process... + } + } + + + + + + + + + + + var esp = recoveringErrorInfo.info_stack_pointer; + + // just recovered from another error + if (recovering === ERROR_RECOVERY_TOKEN_DISCARD_COUNT && error_rule_depth >= 0) { + // SHIFT current lookahead and grab another + recoveringErrorInfo.symbol_stack[esp] = symbol; + recoveringErrorInfo.value_stack[esp] = shallow_copy(lexer.yytext); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState; // push state + ++esp; + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + preErrorSymbol = 0; + symbol = lex(); + + + + + + + + + + } + + // try to recover from error + if (error_rule_depth < 0) { + ASSERT(recovering > 0); + recoveringErrorInfo.info_stack_pointer = esp; + + // barf a fatal hairball when we're out of look-ahead symbols and none hit a match + // while we are still busy recovering from another error: + var po = this.__error_infos[this.__error_infos.length - 1]; + if (!po) { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error.', null, expected, false); + } else { + p = this.constructParseErrorInfo('Parsing halted while starting to recover from another error. Previous error which resulted in this fatal result: ' + po.errStr, null, expected, false); + p.extra_error_attributes = po; + } + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + preErrorSymbol = (symbol === TERROR ? 0 : symbol); // save the lookahead token + symbol = TERROR; // insert generic error symbol as new lookahead + + const EXTRA_STACK_SAMPLE_DEPTH = 3; + + // REDUCE/COMBINE the pushed terms/tokens to a new ERROR token: + recoveringErrorInfo.symbol_stack[esp] = preErrorSymbol; + if (errStr) { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + errorStr: errStr, + errorSymbolDescr: errSymbolDescr, + expectedStr: expected, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + + + + + + + + + + } else { + recoveringErrorInfo.value_stack[esp] = { + yytext: shallow_copy(lexer.yytext), + errorRuleDepth: error_rule_depth, + stackSampleLength: error_rule_depth + EXTRA_STACK_SAMPLE_DEPTH + }; + } + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lexer.yylloc); + recoveringErrorInfo.state_stack[esp] = newState || NO_ACTION[1]; + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + yyval.$ = recoveringErrorInfo; + yyval._$ = undefined; + + yyrulelen = error_rule_depth; + + + + + + + + + + r = this.performAction.call(yyval, yyloc, NO_ACTION[1], sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // and move the top entries + discarded part of the parse stacks onto the error info stack: + for (var idx = sp - EXTRA_STACK_SAMPLE_DEPTH, top = idx + yyrulelen; idx < top; idx++, esp++) { + recoveringErrorInfo.symbol_stack[esp] = stack[idx]; + recoveringErrorInfo.value_stack[esp] = shallow_copy(vstack[idx]); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(lstack[idx]); + recoveringErrorInfo.state_stack[esp] = sstack[idx]; + } + + recoveringErrorInfo.symbol_stack[esp] = TERROR; + recoveringErrorInfo.value_stack[esp] = shallow_copy(yyval.$); + recoveringErrorInfo.location_stack[esp] = copy_yylloc(yyval._$); + + // goto new state = table[STATE][NONTERMINAL] + newState = sstack[sp - 1]; + + if (this.defaultActions[newState]) { + recoveringErrorInfo.state_stack[esp] = this.defaultActions[newState]; + } else { + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + recoveringErrorInfo.state_stack[esp] = t[1]; + } + + ++esp; + recoveringErrorInfo.info_stack_pointer = esp; + + // allow N (default: 3) real symbols to be shifted before reporting a new error + recovering = ERROR_RECOVERY_TOKEN_DISCARD_COUNT; + + + + + + + + + + + // Now duplicate the standard parse machine here, at least its initial + // couple of rounds until the TERROR symbol is **pushed onto the parse stack**, + // as we wish to push something special then! + + + // Run the state machine in this copy of the parser state machine + // until we *either* consume the error symbol (and its related information) + // *or* we run into another error while recovering from this one + // *or* we execute a `reduce` action which outputs a final parse + // result (yes, that MAY happen!)... + + ASSERT(recoveringErrorInfo); + ASSERT(symbol === TERROR); + while (symbol) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + // encountered another parse error? If so, break out to main loop + // and take it from there! + if (!action) { + newState = state; + break; + } + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + // signal end of error recovery loop AND end of outer parse loop + action = 3; + break; + + // shift: + case 1: + stack[sp] = symbol; + //vstack[sp] = lexer.yytext; + ASSERT(recoveringErrorInfo); + vstack[sp] = recoveringErrorInfo; + //lstack[sp] = copy_yylloc(lexer.yylloc); + lstack[sp] = this.yyMergeLocationInfo(null, null, recoveringErrorInfo.loc, lexer.yylloc, true); + sstack[sp] = newState; // push state + ++sp; + symbol = 0; + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + // once we have pushed the special ERROR token value, we're done in this inner loop! + break; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + // signal end of error recovery loop AND end of outer parse loop + action = 3; + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + break; + } + + // break out of loop: we accept or fail with error + break; + } + + // should we also break out of the regular/outer parse loop, + // i.e. did the parser already produce a parse result in here?! + if (action === 3) { + break; + } + continue; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + lstack[sp] = copy_yylloc(lexer.yylloc); + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + ASSERT(preErrorSymbol === 0); + if (!preErrorSymbol) { // normal execution / no error + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + yyloc = lexer.yylloc; + + if (recovering > 0) { + recovering--; + + + + + + + + + + } + } else { + // error just occurred, resume old lookahead f/ before error, *unless* that drops us straight back into error mode: + symbol = preErrorSymbol; + preErrorSymbol = 0; + + + + + + + + + + // read action for current state and first input + t = (table[newState] && table[newState][symbol]) || NO_ACTION; + if (!t[0] || symbol === TERROR) { + // forget about that symbol and move forward: this wasn't a 'forgot to insert' error type where + // (simple) stuff might have been missing before the token which caused the error we're + // recovering from now... + // + // Also check if the LookAhead symbol isn't the ERROR token we set as part of the error + // recovery, for then this we would we idling (cycling) on the error forever. + // Yes, this does not take into account the possibility that the *lexer* may have + // produced a *new* TERROR token all by itself, but that would be a very peculiar grammar! + + + + + + + + + + symbol = 0; + } + } + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, yyloc, newState, sp - 1, vstack, lstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + lstack[sp] = yyval._$; + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +}, +yyError: 1 +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + +var ebnf = false; + + + +var rmCommonWS = helpers.rmCommonWS; +var dquote = helpers.dquote; +var checkActionBlock = helpers.checkActionBlock; + + +// transform ebnf to bnf if necessary +function extend(json, grammar) { + if (ebnf) { + json.ebnf = grammar.grammar; // keep the original source EBNF around for possible pretty-printing & AST exports. + json.bnf = transform(grammar.grammar); + } + else { + json.bnf = grammar.grammar; + } + if (grammar.actionInclude) { + json.actionInclude = grammar.actionInclude; + } + return json; +} + +// convert string value to number or boolean value, when possible +// (and when this is more or less obviously the intent) +// otherwise produce the string itself as value. +function parseValue(v) { + if (v === 'false') { + return false; + } + if (v === 'true') { + return true; + } + // http://stackoverflow.com/questions/175739/is-there-a-built-in-way-in-javascript-to-check-if-a-string-is-a-valid-number + // Note that the `v` check ensures that we do not convert `undefined`, `null` and `''` (empty string!) + if (v && !isNaN(v)) { + var rv = +v; + if (isFinite(rv)) { + return rv; + } + } + return v; +} + + +parser.warn = function p_warn() { + console.warn.apply(console, arguments); +}; + +parser.log = function p_log() { + console.log.apply(console, arguments); +}; +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } + + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } + } + + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); + } + + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; + + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... true +// location assignment: ............. true +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- + +EOF: 1, + ERROR: 2, + + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; + } else { + msg += pos_str; + } + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + + this.recoverable = rec; + } + }; + + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } + } + + throw new ExceptionClass(str, hash); + }, + + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + + this.__error_infos.length = 0; + } + + return this; + }, + + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } + } + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; + } + + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; + }, + + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } + + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; + + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; + } + } + + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, + + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); + } + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; + }, + + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; + }, + + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; + }, + + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); + }, + + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; + }, + + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; + }, + + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; + }, + + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var error_size = loc.last_line - loc.first_line; + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; + } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; + + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; + } + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done + }; + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; + } + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; + }, + + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + + if (token !== false) { + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + } else if (!this.options.flex) { + break; + } + } + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; + } + + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } + } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; + }, + + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); + }, + + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; + }, + + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } + }, + + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } + }, + + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } + }, + + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; + }, + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + parseActionsUseYYMERGELOCATIONINFO: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + var YYSTATE = YY_START; + + switch (yyrulenumber) { + case 2: + /*! Conditions:: action */ + /*! Rule:: \/[^ /]*?['"{}][^ ]*?\/ */ + return 43; // regexp with braces or quotes (and no spaces) + + break; + + case 7: + /*! Conditions:: action */ + /*! Rule:: \{ */ + yy.depth++; + + return 12; + break; + + case 8: + /*! Conditions:: action */ + /*! Rule:: \} */ + if (yy.depth === 0) { + this.popState(); + } else { + yy.depth--; + } + + return 13; + break; + + case 9: + /*! Conditions:: token */ + /*! Rule:: {BR} */ + this.popState(); + + break; + + case 10: + /*! Conditions:: token */ + /*! Rule:: %% */ + this.popState(); + + break; + + case 11: + /*! Conditions:: token */ + /*! Rule:: ; */ + this.popState(); + + break; + + case 12: + /*! Conditions:: bnf ebnf */ + /*! Rule:: %% */ + this.pushState('code'); + + return 14; + break; + + case 25: + /*! Conditions:: options */ + /*! Rule:: = */ + this.pushState('option_values'); + + return 3; + break; + + case 26: + /*! Conditions:: option_values */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 27: + /*! Conditions:: option_values */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 28: + /*! Conditions:: option_values */ + /*! Rule:: `{ES2017_STRING_CONTENT}` */ + yy_.yytext = unescQuote(this.matches[1], /\\`/g); + + this.popState(); + return 29; // value is always a string type + break; + + case 29: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\/[^\r\n]* */ + /* skip single-line comment */ + break; + + case 30: + /*! Conditions:: INITIAL ebnf bnf token path options option_values */ + /*! Rule:: \/\*[^]*?\*\/ */ + /* skip multi-line comment */ + break; + + case 31: + /*! Conditions:: option_values */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 30; + break; + + case 32: + /*! Conditions:: options */ + /*! Rule:: {BR}{WS}+(?=\S) */ + /* skip leading whitespace on the next line of input, when followed by more options */ + break; + + case 33: + /*! Conditions:: options */ + /*! Rule:: {BR} */ + this.popState(); + + return 28; + break; + + case 34: + /*! Conditions:: options option_values */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 35: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {WS}+ */ + /* skip whitespace */ + break; + + case 36: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {BR}+ */ + /* skip newlines */ + break; + + case 37: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 39; + break; + + case 42: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1], /\\"/g); + + return 26; + break; + + case 43: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1], /\\'/g); + + return 26; + break; + + case 48: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %% */ + this.pushState((yy.ebnf ? 'ebnf' : 'bnf')); + + return 14; + break; + + case 49: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %ebnf\b */ + yy.ebnf = true; + + return 20; + break; + + case 57: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %token\b */ + this.pushState('token'); + + return 18; + break; + + case 59: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %options\b */ + this.pushState('options'); + + return 27; + break; + + case 60: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %lex{LEX_CONTENT}\/lex\b */ + // remove the %lex../lex wrapper and return the pure lex section: + yy_.yytext = this.matches[1]; + + return 17; + break; + + case 63: + /*! Conditions:: INITIAL ebnf bnf code */ + /*! Rule:: %include\b */ + this.pushState('path'); + + return 44; + break; + + case 64: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %{NAME}([^\r\n]*) */ + /* ignore unrecognized decl */ + this.warn(rmCommonWS` + EBNF: ignoring unsupported parser option ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + yy_.yytext = [ + this.matches[1], // {NAME} + this.matches[2].trim() // optional value/parameters + ]; + + return 21; + break; + + case 65: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: <{ID}> */ + yy_.yytext = this.matches[1]; + + return 36; + break; + + case 66: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{\{([^]*?)\}\} */ + yy_.yytext = this.matches[1].replace(/\}\\\}/g, '}}'); // unescape any literal '}\}' that exists within the action code block + + return 15; + break; + + case 67: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %\{([^]*?)%\} */ + yy_.yytext = this.matches[1].replace(/%\\\}/g, '%}'); // unescape any literal '%\}' that exists within the action code block + + return 15; + break; + + case 68: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \{ */ + yy.depth = 0; + + this.pushState('action'); + return 12; + break; + + case 69: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ->.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 70: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: →.* */ + yy_.yytext = yy_.yytext.substr(1, yy_.yyleng - 1).trim(); + + return 42; + break; + + case 71: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: =>.* */ + yy_.yytext = yy_.yytext.substr(2, yy_.yyleng - 2).trim(); + + return 42; + break; + + case 72: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {HEX_NUMBER} */ + yy_.yytext = parseInt(yy_.yytext, 16); + + return 37; + break; + + case 73: + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {DECIMAL_NUMBER}(?![xX0-9a-fA-F]) */ + yy_.yytext = parseInt(yy_.yytext, 10); + + return 37; + break; + + case 75: + /*! Conditions:: code */ + /*! Rule:: [^\r\n]+ */ + return 46; // the bit of CODE just before EOF... + + break; + + case 76: + /*! Conditions:: path */ + /*! Rule:: {BR} */ + this.popState(); + + this.unput(yy_.yytext); + break; + + case 77: + /*! Conditions:: path */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 78: + /*! Conditions:: path */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + yy_.yytext = unescQuote(this.matches[1]); + + this.popState(); + return 45; + break; + + case 79: + /*! Conditions:: path */ + /*! Rule:: {WS}+ */ + // skip whitespace in the line + break; + + case 80: + /*! Conditions:: path */ + /*! Rule:: [^\s\r\n]+ */ + this.popState(); + + return 45; + break; + + case 81: + /*! Conditions:: action */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 82: + /*! Conditions:: action */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 83: + /*! Conditions:: action */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in lexer rule action block. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 84: + /*! Conditions:: option_values */ + /*! Rule:: " */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 85: + /*! Conditions:: option_values */ + /*! Rule:: ' */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 86: + /*! Conditions:: option_values */ + /*! Rule:: ` */ + yy_.yyerror(rmCommonWS` + unterminated string constant in %options entry. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 87: + /*! Conditions:: * */ + /*! Rule:: " */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 88: + /*! Conditions:: * */ + /*! Rule:: ' */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 89: + /*! Conditions:: * */ + /*! Rule:: ` */ + var rules = (this.topState() === 'macro' ? 'macro\'s' : this.topState()); + + yy_.yyerror(rmCommonWS` + unterminated string constant encountered while lexing + ${rules}. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + return 2; + break; + + case 90: + /*! Conditions:: * */ + /*! Rule:: . */ + /* b0rk on bad characters */ + yy_.yyerror(rmCommonWS` + unsupported parser input: ${dquote(yy_.yytext)} + while lexing in ${dquote(this.topState())} state. + + Erroneous area: + ` + this.prettyPrintRange(yy_.yylloc)); + + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: action */ + /*! Rule:: \/\*[^]*?\*\/ */ + 0: 43, + + /*! Conditions:: action */ + /*! Rule:: \/\/[^\r\n]* */ + 1: 43, + + /*! Conditions:: action */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 3: 43, + + /*! Conditions:: action */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 43, + + /*! Conditions:: action */ + /*! Rule:: [/"'][^{}/"']+ */ + 5: 43, + + /*! Conditions:: action */ + /*! Rule:: [^{}/"']+ */ + 6: 43, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %empty\b */ + 13: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: %epsilon\b */ + 14: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u0190 */ + 15: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u025B */ + 16: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03B5 */ + 17: 38, + + /*! Conditions:: bnf ebnf */ + /*! Rule:: \u03F5 */ + 18: 38, + + /*! Conditions:: ebnf */ + /*! Rule:: \( */ + 19: 7, + + /*! Conditions:: ebnf */ + /*! Rule:: \) */ + 20: 8, + + /*! Conditions:: ebnf */ + /*! Rule:: \* */ + 21: 9, + + /*! Conditions:: ebnf */ + /*! Rule:: \? */ + 22: 10, + + /*! Conditions:: ebnf */ + /*! Rule:: \+ */ + 23: 11, + + /*! Conditions:: options */ + /*! Rule:: {NAME} */ + 24: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {ID} */ + 38: 24, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: {NAME} */ + 39: 25, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$end\b */ + 40: 40, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \$eof\b */ + 41: 40, + + /*! Conditions:: token */ + /*! Rule:: [^\s\r\n]+ */ + 44: 'TOKEN_WORD', + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: : */ + 45: 5, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: ; */ + 46: 4, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: \| */ + 47: 6, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %debug\b */ + 50: 19, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parser-type\b */ + 51: 32, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %prec\b */ + 52: 41, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %start\b */ + 53: 16, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %left\b */ + 54: 33, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %right\b */ + 55: 34, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %nonassoc\b */ + 56: 35, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %parse-param\b */ + 58: 31, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %code\b */ + 61: 23, + + /*! Conditions:: token bnf ebnf INITIAL */ + /*! Rule:: %import\b */ + 62: 22, + + /*! Conditions:: code */ + /*! Rule:: [^\r\n]*(\r|\n)+ */ + 74: 46, + + /*! Conditions:: * */ + /*! Rule:: $ */ + 91: 1 + }, + + rules: [ + /* 0: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 1: */ /^(?:\/\/[^\r\n]*)/, + /* 2: */ /^(?:\/[^ \/]*?['"{}][^ ]*?\/)/, + /* 3: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 4: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 5: */ /^(?:[\/"'][^{}\/"']+)/, + /* 6: */ /^(?:[^{}\/"']+)/, + /* 7: */ /^(?:\{)/, + /* 8: */ /^(?:\})/, + /* 9: */ /^(?:(\r\n|\n|\r))/, + /* 10: */ /^(?:%%)/, + /* 11: */ /^(?:;)/, + /* 12: */ /^(?:%%)/, + /* 13: */ /^(?:%empty\b)/, + /* 14: */ /^(?:%epsilon\b)/, + /* 15: */ /^(?:\u0190)/, + /* 16: */ /^(?:\u025B)/, + /* 17: */ /^(?:\u03B5)/, + /* 18: */ /^(?:\u03F5)/, + /* 19: */ /^(?:\()/, + /* 20: */ /^(?:\))/, + /* 21: */ /^(?:\*)/, + /* 22: */ /^(?:\?)/, + /* 23: */ /^(?:\+)/, + /* 24: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 25: */ /^(?:=)/, + /* 26: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 27: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 28: */ /^(?:`((?:\\`|\\[^`]|[^\\`])*)`)/, + /* 29: */ /^(?:\/\/[^\r\n]*)/, + /* 30: */ new XRegExp('^(?:\\/\\*[^]*?\\*\\/)', ''), + /* 31: */ /^(?:\S+)/, + /* 32: */ /^(?:(\r\n|\n|\r)([^\S\n\r])+(?=\S))/, + /* 33: */ /^(?:(\r\n|\n|\r))/, + /* 34: */ /^(?:([^\S\n\r])+)/, + /* 35: */ /^(?:([^\S\n\r])+)/, + /* 36: */ /^(?:(\r\n|\n|\r)+)/, + /* 37: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 38: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 39: */ new XRegExp( + '^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?))', + '' + ), + /* 40: */ /^(?:\$end\b)/, + /* 41: */ /^(?:\$eof\b)/, + /* 42: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 43: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 44: */ /^(?:\S+)/, + /* 45: */ /^(?::)/, + /* 46: */ /^(?:;)/, + /* 47: */ /^(?:\|)/, + /* 48: */ /^(?:%%)/, + /* 49: */ /^(?:%ebnf\b)/, + /* 50: */ /^(?:%debug\b)/, + /* 51: */ /^(?:%parser-type\b)/, + /* 52: */ /^(?:%prec\b)/, + /* 53: */ /^(?:%start\b)/, + /* 54: */ /^(?:%left\b)/, + /* 55: */ /^(?:%right\b)/, + /* 56: */ /^(?:%nonassoc\b)/, + /* 57: */ /^(?:%token\b)/, + /* 58: */ /^(?:%parse-param\b)/, + /* 59: */ /^(?:%options\b)/, + /* 60: */ new XRegExp( + '^(?:%lex((?:[^\\S\\n\\r])*(?:(?:\\r\\n|\\n|\\r)[^]*?)?(?:\\r\\n|\\n|\\r)(?:[^\\S\\n\\r])*)\\/lex\\b)', + '' + ), + /* 61: */ /^(?:%code\b)/, + /* 62: */ /^(?:%import\b)/, + /* 63: */ /^(?:%include\b)/, + /* 64: */ new XRegExp( + '^(?:%([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}\\-_]*(?:[\\p{Alphabetic}\\p{Number}_]))?)([^\\n\\r]*))', + '' + ), + /* 65: */ new XRegExp('^(?:<([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)>)', ''), + /* 66: */ new XRegExp('^(?:\\{\\{([^]*?)\\}\\})', ''), + /* 67: */ new XRegExp('^(?:%\\{([^]*?)%\\})', ''), + /* 68: */ /^(?:\{)/, + /* 69: */ /^(?:->.*)/, + /* 70: */ /^(?:→.*)/, + /* 71: */ /^(?:=>.*)/, + /* 72: */ /^(?:(0[Xx][\dA-Fa-f]+))/, + /* 73: */ /^(?:([1-9]\d*)(?![\dA-FXa-fx]))/, + /* 74: */ /^(?:[^\r\n]*(\r|\n)+)/, + /* 75: */ /^(?:[^\r\n]+)/, + /* 76: */ /^(?:(\r\n|\n|\r))/, + /* 77: */ /^(?:"((?:\\"|\\[^"]|[^\n\r"\\])*)")/, + /* 78: */ /^(?:'((?:\\'|\\[^']|[^\n\r'\\])*)')/, + /* 79: */ /^(?:([^\S\n\r])+)/, + /* 80: */ /^(?:\S+)/, + /* 81: */ /^(?:")/, + /* 82: */ /^(?:')/, + /* 83: */ /^(?:`)/, + /* 84: */ /^(?:")/, + /* 85: */ /^(?:')/, + /* 86: */ /^(?:`)/, + /* 87: */ /^(?:")/, + /* 88: */ /^(?:')/, + /* 89: */ /^(?:`)/, + /* 90: */ /^(?:.)/, + /* 91: */ /^(?:$)/ + ], + + conditions: { + 'action': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 81, 82, 83, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'code': { + rules: [63, 74, 75, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'path': { + rules: [29, 30, 76, 77, 78, 79, 80, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'options': { + rules: [24, 25, 29, 30, 32, 33, 34, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'option_values': { + rules: [26, 27, 28, 29, 30, 31, 34, 84, 85, 86, 87, 88, 89, 90, 91], + inclusive: false + }, + + 'token': { + rules: [ + 9, + 10, + 11, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'bnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'ebnf': { + rules: [ + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + }, + + 'INITIAL': { + rules: [ + 29, + 30, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 87, + 88, + 89, + 90, + 91 + ], + + inclusive: true + } + } + }; + + var rmCommonWS = helpers.rmCommonWS; + var dquote = helpers.dquote; + + function indent(s, i) { + var a = s.split('\n'); + var pf = new Array(i + 1).join(' '); + return pf + a.join('\n' + pf); + } + + // unescape a string value which is wrapped in quotes/doublequotes + function unescQuote(str) { + str = '' + str; + var a = str.split('\\\\'); + + a = a.map(function(s) { + return s.replace(/\\'/g, '\'').replace(/\\"/g, '"'); + }); + + str = a.join('\\\\'); + return str; + } + + lexer.warn = function l_warn() { + if (this.yy && this.yy.parser && typeof this.yy.parser.warn === 'function') { + return this.yy.parser.warn.apply(this, arguments); + } else { + console.warn.apply(console, arguments); + } + }; + + lexer.log = function l_log() { + if (this.yy && this.yy.parser && typeof this.yy.parser.log === 'function') { + return this.yy.parser.log.apply(this, arguments); + } else { + console.log.apply(console, arguments); + } + }; + + return lexer; +}(); +parser.lexer = lexer; + +function Parser() { + this.yy = {}; +} +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); +} + + + +export default { + parser, + Parser, + parse: yyparse, + +}; + diff --git a/rollup.config.js b/rollup.config.js new file mode 100644 index 0000000..b6bb8df --- /dev/null +++ b/rollup.config.js @@ -0,0 +1,62 @@ +// rollup.config.js +import resolve from 'rollup-plugin-node-resolve'; + +export default { + input: 'ebnf-parser.js', + output: [ + { + file: 'dist/ebnf-parser-cjs.js', + format: 'cjs' + }, + { + file: 'dist/ebnf-parser-es6.js', + format: 'es' + }, + { + file: 'dist/ebnf-parser-umd.js', + name: 'ebnf-parser', + format: 'umd' + } + ], + plugins: [ + resolve({ + // use "module" field for ES6 module if possible + module: true, // Default: true + + // use "main" field or index.js, even if it's not an ES6 module + // (needs to be converted from CommonJS to ES6 + // � see https://github.com/rollup/rollup-plugin-commonjs + main: true, // Default: true + + // not all files you want to resolve are .js files + extensions: [ '.js' ], // Default: ['.js'] + + // whether to prefer built-in modules (e.g. `fs`, `path`) or + // local ones with the same names + preferBuiltins: true, // Default: true + + // If true, inspect resolved files to check that they are + // ES2015 modules + modulesOnly: true, // Default: false + }) + ], + external: [ + '@gerhobbelt/ast-util', + '@gerhobbelt/json5', + '@gerhobbelt/nomnom', + '@gerhobbelt/prettier-miscellaneous', + '@gerhobbelt/recast', + '@gerhobbelt/xregexp', + 'jison-helpers-lib', + '@gerhobbelt/lex-parser', + '@gerhobbelt/jison-lex', + '@gerhobbelt/ebnf-parser', + '@gerhobbelt/jison2json', + '@gerhobbelt/json2jison', + 'jison-gho', + 'assert', + 'fs', + 'path', + 'process', + ] +}; diff --git a/tests/all-tests.js b/tests/all-tests.js deleted file mode 100755 index 232b542..0000000 --- a/tests/all-tests.js +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env narwhal - -//exports.testBNF = require("./bnf"); -exports.testBNFParse = require("./bnf_parse"); -exports.testEBNF = require("./ebnf"); -exports.testEBNFParse = require("./ebnf_parse"); - -if (require.main === module) - require("test").run(exports); diff --git a/tests/bnf.js b/tests/bnf.js index df1a1b9..f1c88ed 100644 --- a/tests/bnf.js +++ b/tests/bnf.js @@ -1,13 +1,15 @@ -var Jison = require("../setup").Jison, - Lexer = require("../setup").Lexer, - assert = require("assert"); +var assert = require("chai").assert; +var bnf = require("../dist/ebnf-parser-cjs-es5"); -exports["test BNF parser"] = function () { +var Jison = require('../../../../jison/'); // jison-gho + +describe("BNF parser", function () { + it("test BNF production", function () { var grammar = { "lex": { "rules": [ ["\\s+", "/* skip whitespace */"], - ["[a-zA-Z][a-zA-Z0-9_-]*", "return 'ID';"], + ["[a-zA-Z][a-zA-Z0-9_]*", "return 'ID';"], ["\"[^\"]+\"", "yytext = yytext.substr(1, yyleng-2); return 'STRING';"], ["'[^']+'", "yytext = yytext.substr(1, yyleng-2); return 'STRING';"], [":", "return ':';"], @@ -74,7 +76,7 @@ exports["test BNF parser"] = function () { var parser = new Jison.Parser(grammar); parser.yy.addDeclaration = function (grammar, decl) { if (decl.start) { - grammar.start = decl.start + grammar.start = decl.start; } if (decl.operator) { if (!grammar.operators) { @@ -82,10 +84,10 @@ exports["test BNF parser"] = function () { } grammar.operators.push(decl.operator); } - }; var result = parser.parse('%start foo %left "+" "-" %right "*" "/" %nonassoc "=" STUFF %left UMINUS %% foo : bar baz blitz { stuff } %prec GEMINI | bar %prec UMINUS | ;\nbar: { things };\nbaz: | foo ;'); assert.ok(result, "parse bnf production"); -}; + }); +}); diff --git a/tests/bnf_parse.js b/tests/bnf_parse.js index 05d21e0..29312c6 100644 --- a/tests/bnf_parse.js +++ b/tests/bnf_parse.js @@ -1,221 +1,381 @@ -var assert = require("assert"), - bnf = require("../ebnf-parser"); +var assert = require("chai").assert; +var bnf = require("../dist/ebnf-parser-cjs-es5"); -exports["test basic grammar"] = function () { + +function parser_reset() { + if (bnf.bnf_parser.parser.yy) { + var y = bnf.bnf_parser.parser.yy; + if (y.parser) { + delete y.parser; + } + if (y.lexer) { + delete y.lexer; + } + } + + //bnf.bnf_parser.parser.yy = {}; + + var debug = 0; + + if (!debug) { + // silence warn+log messages from the test internals: + bnf.bnf_parser.parser.warn = function bnf_warn() { + // console.warn("TEST WARNING: ", arguments); + }; + + bnf.bnf_parser.parser.log = function bnf_log() { + // console.warn("TEST LOG: ", arguments); + }; + } +} + + +describe("BNF parser", function () { + beforeEach(function beforeEachTest() { + parser_reset(); + }); + + it("test basic grammar", function () { var grammar = "%% test: foo bar | baz ; hello: world ;"; var expected = {bnf: {test: ["foo bar", "baz"], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test multiple same rule"] = function () { + it("test multiple same rule", function () { var grammar = "%% test: foo bar | baz ; test: world ;"; var expected = {bnf: {test: ["foo bar", "baz", "world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test classy grammar"] = function () { + it("test classy grammar", function () { var grammar = "%%\n\npgm \n: cdl MAIN LBRACE vdl el RBRACE ENDOFFILE \n; cdl \n: c cdl \n| \n;"; var expected = {bnf: {pgm: ["cdl MAIN LBRACE vdl el RBRACE ENDOFFILE"], cdl: ["c cdl", ""]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test advanced grammar"] = function () { + it("test advanced grammar", function () { var grammar = "%% test: foo bar {action} | baz ; hello: world %prec UMINUS ;extra: foo %prec '-' {action} ;"; var expected = {bnf: {test: [["foo bar", "action" ], "baz"], hello: [[ "world", {prec:"UMINUS"} ]], extra: [[ "foo", "action", {prec: "-"} ]]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test nullable rule"] = function () { + it("test nullable rule", function () { var grammar = "%% test: foo bar | ; hello: world ;"; var expected = {bnf: {test: ["foo bar", ""], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test nullable rule with action"] = function () { + it("test nullable rule with action", function () { var grammar = "%% test: foo bar | {action}; hello: world ;"; var expected = {bnf: {test: ["foo bar", [ "", "action" ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test nullable rule with %{ %} delimited action"] = function () { - var grammar = "%% test: foo bar | %{action{}%}; hello: world ;"; - var expected = {bnf: {test: ["foo bar", [ "", "action{}" ]], hello: ["world"]}}; + it("test nullable rule with %{ %} delimited action", function () { + var grammar = "%% test: foo bar | %{action={}%}; hello: world ;"; + var expected = {bnf: {test: ["foo bar", [ "", "action={}" ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test nullable rule with {{ }} delimited action"] = function () { - var grammar = "%% test: foo bar | {{action{};}}; hello: world ;"; - var expected = {bnf: {test: ["foo bar", [ "", "action{};" ]], hello: ["world"]}}; + it("test nullable rule with {{ }} delimited action", function () { + var grammar = "%% test: foo bar | {{action={};}}; hello: world ;"; + var expected = {bnf: {test: ["foo bar", [ "", "action={};" ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test rule with {{ }} delimited action"] = function () { + it("test rule with {{ }} delimited action", function () { var grammar = "%% test: foo bar {{ node({}, node({})); }}; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}, node({})); " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test comment"] = function () { + it("test comment", function () { var grammar = "/* comment */ %% hello: world ;"; var expected = {bnf: {hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); + + it("test multi-line comment", function () { + var grammar = "/* comment\n comment\n comment */ %% hello: world ;"; + var expected = {bnf: {hello: ["world"]}}; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); -exports["test single line comment"] = function () { + it("test single line comment", function () { var grammar = "//comment \n %% hello: world ;"; var expected = {bnf: {hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parse comment"); -}; + }); -exports["test comment with nested *"] = function () { + it("test comment with nested *", function () { var grammar = "/* comment * not done */ %% hello: /* oh hai */ world ;"; var expected = {bnf: {hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test token"] = function () { + it("test comment with nested //", function () { + var grammar = "/* comment // nested ** not done */ %% hello: /* oh hai */ world ;"; + var expected = {bnf: {hello: ["world"]}}; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + + var grammar2 = "/* comment \n// nested ** not done */ %% hello: /* oh hai */ world ;"; + + assert.deepEqual(bnf.parse(grammar2), expected, "grammar should be parsed correctly"); + }); + + it("test token", function () { var grammar = "%token blah\n%% test: foo bar | baz ; hello: world ;"; - var expected = {bnf: {test: ["foo bar", "baz"], hello: ["world"]}}; + var expected = {bnf: {test: ["foo bar", "baz"], hello: ["world"]}, + extra_tokens: [{id: "blah"}]}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test token with type"] = function () { + it("test token with type", function () { var grammar = "%type blah\n%% test: foo bar | baz ; hello: world ;"; - var expected = {bnf: {test: ["foo bar", "baz"], hello: ["world"]}}; + var expected = {bnf: {test: ["foo bar", "baz"], hello: ["world"]}, unknownDecls: [['type', ' blah']]}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test embedded lexical block"] = function () { + it("test embedded lexical block", function () { var grammar = "%lex \n%%\n'foo' return 'foo';\n'bar' {return 'bar';}\n'baz' {return 'baz';}\n'world' {return 'world';}\n/lex\ %% test: foo bar | baz ; hello: world ;"; var expected = { lex: { + macros: {}, + rules: [ + ["foo", "return 'foo';"], + ["bar", "return 'bar';"], + ["baz", "return 'baz';"], + ["world", "return 'world';"] + ], + startConditions: {}, + unknownDecls: [] + }, + bnf: {test: ["foo bar", "baz"], hello: ["world"]} + }; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); + + it("test lexer %options easy_keyword_rules", function () { + var grammar = "%lex \n%options easy_keyword_rules\n%%\n'foo' return 'foo';\n'bar' {return 'bar';}\n'baz' {return 'baz';}\n'world' {return 'world';}\n/lex\ + %% test: foo bar | baz ; hello: world ;"; + var expected = { + lex: { + macros: {}, rules: [ ["foo\\b", "return 'foo';"], ["bar\\b", "return 'bar';"], ["baz\\b", "return 'baz';"], ["world\\b", "return 'world';"] - ] + ], + options: { + easy_keyword_rules: true + }, + startConditions: {}, + unknownDecls: [] }, bnf: {test: ["foo bar", "baz"], hello: ["world"]} }; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test balanced braces"] = function () { + it("test balanced braces", function () { var grammar = "%% test: foo bar { node({}, node({foo:'bar'})); }; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}, node({foo:'bar'})); " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test brace within a multi-line comment"] = function () { + it("test brace within a multi-line comment", function () { var grammar = "%% test: foo bar { node({}, 3 / 4); /* { */ }; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}, 3 / 4); /* { */ " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test brace within a single-line comment"] = function () { + it("test brace within a single-line comment", function () { var grammar = "%% test: foo bar { node({}); // {\n }; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}); // {\n " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test brace within a string"] = function () { + it("test brace within a string", function () { var grammar = "%% test: foo bar { node({}, 3 / 4, '{'); /* { */ }; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}, 3 / 4, '{'); /* { */ " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test brace within a string with double quotes"] = function () { + it("test brace within a string with double quotes", function () { var grammar = "%% test: foo bar { node({}, 3 / 4, \"{\"); /* { */ }; hello: world ;"; var expected = {bnf: {test: [["foo bar"," node({}, 3 / 4, \"{\"); /* { */ " ]], hello: ["world"]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test uneven braces and quotes within regex"] = function () { + it("test uneven braces and quotes within regex", function () { var grammar = "%% test: foo bar { node({}, 3 / 4, \"{\"); /{'\"/g; 1 / 2; }; hello: world { blah / bah };"; var expected = {bnf: {test: [["foo bar"," node({}, 3 / 4, \"{\"); /{'\"/g; 1 / 2; " ]], hello: [["world", " blah / bah "]]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test code declaration"] = function () { + it("test code declaration", function () { var grammar = "%{var foo = 'bar';%}\n%%hello: world;"; var expected = {bnf: {hello: ["world"]}, moduleInclude: "var foo = 'bar';"}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test remainder code"] = function () { + it("test remainder code", function () { var grammar = "%%hello: world;%%var foo = 'bar';"; var expected = {bnf: {hello: ["world"]}, moduleInclude: "var foo = 'bar';"}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test remainder and declarations code"] = function () { + it("test remainder and declarations code", function () { var grammar = "%{test;%}\n%%hello: world;%%var foo = 'bar';"; var expected = {bnf: {hello: ["world"]}, moduleInclude: "test;var foo = 'bar';"}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test expression action"] = function () { + it("test expression action", function () { var grammar = "%% test: foo bar -> $foo\n;"; - var expected = {bnf: {test: [["foo bar","$$ = $foo;"]]}}; + var expected = {bnf: {test: [["foo bar","$$ = $foo"]]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test quote in rule"] = function () { + it("test quote in rule", function () { var grammar = "%lex\n%%\n\\' return \"'\"\n/lex\n%% test: foo bar \"'\";"; var expected = {lex: { + macros: {}, rules: [ ["'", "return \"'\""] - ] + ], + startConditions: {}, + unknownDecls: [] }, - bnf: {test: ["foo bar '"]}}; + bnf: {test: ["foo bar \"'\""]}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test windows line endings"] = function () { + it("test windows line endings", function () { var grammar = "%{baz\r\n%}%% test: foo bar | {\r\naction;\r\nhi};\r\nhello: world ;%%foo;\r\nbar;"; var expected = {bnf: {test: ["foo bar", [ "", "\r\naction;\r\nhi" ]], hello: ["world"]}, moduleInclude: 'baz\r\nfoo;\r\nbar;'}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test parse params"] = function () { + it("test parse params", function () { var grammar = "%parse-param first second\n%%hello: world;%%"; var expected = {bnf: {hello: ["world"]}, parseParams: ["first", "second"]}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); -exports["test options"] = function () { + it("test boolean options", function () { var grammar = "%options one two\n%%hello: world;%%"; var expected = {bnf: {hello: ["world"]}, options: {one: true, two: true}}; assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); -}; + }); + + it("test if %options names with a hyphen are correctly recognized", function () { + var grammar = '%options bug-a-boo\n%%hello: world;%%'; + var expected = { + bnf: { + hello: ["world"] + }, + options: { + "bug-a-boo": true + } + }; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); + + it("test options with values", function () { + var grammar = '%options ping=666 bla=blub bool1 s1="s1value" s2=\'s2value\' s3=false s4="false"\n%%hello: world;%%'; + var expected = { + bnf: { + hello: ["world"] + }, + options: { + ping: 666, + bla: "blub", + bool1: true, + s1: "s1value", + s2: "s2value", + s3: false, + s4: "false" + } + }; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); + + it("test options spread across multiple lines", function () { + var grammar = '%options ping=666\n bla=blub\n bool1\n s1="s1value"\n s2=\'s2value\'\n s3=false\n s4="false"\n%%hello: world;%%'; + var expected = { + bnf: { + hello: ["world"] + }, + options: { + ping: 666, + bla: "blub", + bool1: true, + s1: "s1value", + s2: "s2value", + s3: false, + s4: "false" + } + }; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); + + it("test options with string values which have embedded quotes", function () { + var grammar = '%options s1="s1\\"val\'ue" s2=\'s2\\\\x\\\'val\"ue\'\n%%\nhello: world;\n%%'; + var expected = { + bnf: { + hello: ["world"] + }, + options: { + s1: "s1\"val'ue", + s2: "s2\\\\x'val\"ue" + } + }; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); + + it("test unknown decls", function () { + var grammar = "%foo bar\n%foo baz\n%qux { fizzle }\n%%hello: world;%%"; + var expected = {bnf: {hello: ["world"]}, unknownDecls: [['foo', 'bar'], ['foo', 'baz'], ['qux', '{ fizzle }']]}; + + assert.deepEqual(bnf.parse(grammar), expected, "grammar should be parsed correctly"); + }); +}); diff --git a/tests/ebnf.js b/tests/ebnf.js index a9f2ebd..d813194 100644 --- a/tests/ebnf.js +++ b/tests/ebnf.js @@ -1,6 +1,8 @@ -var assert = require("assert"), - ebnf = require("../ebnf-transform"); -var Parser = require('jison').Parser; +var assert = require("chai").assert; +var bnf = require("../dist/ebnf-parser-cjs-es5"); +var ebnf = bnf.ebnf_parser; +var Jison = require('../../../../jison/'); // jison-gho +var Parser = Jison.Parser; function testParse(top, strings) { return function() { @@ -10,10 +12,14 @@ function testParse(top, strings) { ["\\s+", ''], ["[A-Za-z]+", "return 'word';"], [",", "return ',';"], + ["\"'", "return \"\\\"'\";"], + ["'", "return \"'\";"], + ['"', "return '\"';"], ["$", "return 'EOF';"] ] }, "start": "top", + "ebnf": {"top": [top]}, "bnf": ebnf.transform({"top": [top]}) }; strings = (typeof(strings) === 'string' ? [strings] : strings); @@ -39,7 +45,9 @@ function testBadParse(top, strings) { }; strings = (typeof(strings) === 'string' ? [strings] : strings); strings.forEach(function(string) { - assert.throws(function () {new Parser(grammar).parse(string);}) + assert.throws(function () { + new Parser(grammar).parse(string); + }); }); }; } @@ -56,6 +64,7 @@ function testAlias(top, obj, str) { ] }, "start": "top", + "ebnf": {"top": [top]}, "bnf": ebnf.transform({"top": [top]}) }; assert.deepEqual(grammar['bnf'], obj); @@ -79,27 +88,62 @@ var tests = { "test repeat (+) on multiple words": testParse("word+ EOF", "multiple words"), "test option (?) on empty string": testParse("word? EOF", ""), "test option (?) on single word": testParse("word? EOF", "oneword"), +// "test single quote (') tokens": testParse("'\\'' EOF", "'"), + "test single quote (') tokens (alt.)": testParse("\"'\" EOF", "'"), +// "test double quote (\") tokens": testParse("\"\\\"\" EOF", "\""), + "test double quote (\") tokens (alt.)": testParse("'\"' EOF", "\""), +// "test quoted tokens (edge case #1)": testParse("'\"\\'' EOF", "\"'"), // a weird 'token' consisting of a single AND a double-quote: either way, one of them will end up being escaped! + "test quoted tokens (edge case #2)": testParse('"\\"\'" EOF', "\"'"), // a weird 'token' consisting of a single AND a double-quote: either way, one of them will end up being escaped! "test group () on simple phrase": testParse("(word word) EOF", "two words"), "test group () with multiple options on first option": testParse("((word word) | word) EOF", "hi there"), "test group () with multiple options on second option": testParse("((word word) | word) EOF", "hi"), "test complex expression ( *, ?, () )": testParse("(word (',' word)*)? EOF ", ["", "hi", "hi, there"]), "test named repeat (*)": testAlias("word*[bob] EOF", { top: [ 'bob EOF' ], - bob: [ [ '', '$$ = [];' ], [ 'bob word', '$1.push($2);' ] ] }, "word"), + bob: [ [ '', '$$ = [];' ], [ 'bob word', '$1.push($2);\n$$ = $1;' ] ] }, "word"), "test named repeat (+)": testAlias("word+[bob] EOF", { top: [ 'bob EOF' ], - bob: [ [ 'word', '$$ = [$1];' ], [ 'bob word', '$1.push($2);' ] ] }, "wordy word"), + bob: [ [ 'word', '$$ = [$1];' ], [ 'bob word', '$1.push($2);\n$$ = $1;' ] ] }, "wordy word"), "test named group ()": testAlias("word[alice] (',' word)*[bob] EOF", - {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob , word","$1.push($2);"]]}, + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' word","$1.push([$2, $3]);\n$$ = $1;"]]}, "one, two"), - "test named option (?)": testAlias("word[alex] word?[bob] EOF", { top: [ 'word[alex] bob EOF' ], bob: [ '', 'word' ] }, "oneor two"), + "test nested named groups ()": testAlias("word[alice] (',' (word word)*[uncle] )*[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' uncle","$1.push([$2, $3]);\n$$ = $1;"]],"uncle":[["","$$ = [];"],["uncle word word","$1.push([$2, $3]);\n$$ = $1;"]]}, + "one, two three four five"), + "test named group () without wildcard operator": testAlias("word[alice] (',' word)[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["',' word","$$ = [$1, $2];"]]}, + "one, two"), + "test unnamed group () without wildcard operator": testAlias("word[alice] (',' word) EOF", + {"top":["word[alice] ',' word EOF"]}, + "one, two"), + "test nested unnamed groups () without wildcard operator #1": testAlias("word[alice] ( (',' word) ) EOF", + {"top":["word[alice] ',' word EOF"]}, + "one, two"), + "test nested unnamed groups () without wildcard operator #2": testAlias("word[alice] ( ',' ( word word) ) EOF", + {"top":["word[alice] ',' word word EOF"]}, + "one, two three"), + "test nested named groups () mix #1": testAlias("word[alice] (',' (word word)[uncle] )*[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' uncle","$1.push([$2, $3]);\n$$ = $1;"]],"uncle":[["word word","$$ = [$1, $2];"]]}, + "one, two three, four five"), + "test nested named groups () mix #2": testAlias("word[alice] (',' (word word) )*[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' word word","$1.push([$2, $3, $4]);\n$$ = $1;"]]}, + "one, two three, four five"), + "test nested named groups () mix #3": testAlias("word[alice] (',' (word word) (word word) )*[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' word word word word","$1.push([$2, $3, $4, $5, $6]);\n$$ = $1;"]]}, + "one, two three four five, six seven eight nine"), + "test nested named groups () mix #4": testAlias("word[alice] (',' (word)[uncle] )*[bob] EOF", + {"top":["word[alice] bob EOF"],"bob":[["","$$ = [];"],["bob ',' uncle","$1.push([$2, $3]);\n$$ = $1;"]],"uncle":[["word","$$ = $1;"]]}, + "one, two, three, four"), + "test named option (?)": testAlias("word[alex] word?[bob] EOF", { top: [ 'word[alex] bob EOF' ], bob: [['', '$$ = undefined;'], ['word', '$$ = $1;']] }, "oneor two"), "test named complex expression (())": testAlias("word[alpha] (word[alex] (word[bob] word[carol] ',')+[david] word ',')*[enoch] EOF", - {"top":["word[alpha] enoch EOF"],"david":[["word[bob] word[carol] ,","$$ = [$1];"],["david word[bob] word[carol] ,","$1.push($2);"]], - "enoch":[["","$$ = [];"],["enoch word[alex] david word ,","$1.push($2);"]]}, + {"top":["word[alpha] enoch EOF"],"david":[["word[bob] word[carol] ','","$$ = [[$1, $2, $3]];"],["david word[bob] word[carol] ','","$1.push([$2, $3, $4]);\n$$ = $1;"]], + "enoch":[["","$$ = [];"],["enoch word[alex] david word ','","$1.push([$2, $3, $4, $5]);\n$$ = $1;"]]}, "one two three four, five," ) }; -for (var test in tests) { - exports[test] = tests[test]; -} +describe("EBNF", function () { + for (var test in tests) { + it(test, tests[test]); + } +}); diff --git a/tests/ebnf_parse.js b/tests/ebnf_parse.js index 36fcf48..fb347e7 100644 --- a/tests/ebnf_parse.js +++ b/tests/ebnf_parse.js @@ -1,13 +1,17 @@ -var assert = require("assert"), - bnf = require("../ebnf-parser"), - ebnf = require("../ebnf-transform"); +var assert = require("chai").assert; +var bnf = require("../dist/ebnf-parser-cjs-es5"); +var ebnf = bnf.ebnf_parser; function testParse(top, strings) { return function() { var expected = { + "options": { + "ebnf": true + }, + "ebnf": {"top": [top]}, "bnf": ebnf.transform({"top": [top]}) }; - var grammar = "%ebnf\n%%\ntop : "+top+";"; + var grammar = "%ebnf\n%%\ntop : " + top + ";"; assert.deepEqual(bnf.parse(grammar), expected); }; } @@ -30,9 +34,11 @@ var tests = { "test group () on simple phrase": testParse("(word word) EOF", "two words"), "test group () with multiple options on first option": testParse("((word word) | word) EOF", "hi there"), "test group () with multiple options on second option": testParse("((word word) | word) EOF", "hi"), - "test complex expression ( *, ?, () )": testParse("(word (',' word)*)? EOF", ["", "hi", "hi, there"]) + "test complex expression ( *, ?, () )": testParse("(word (\",\" word)*)? EOF", ["", "hi", "hi, there"]) }; -for (var test in tests) { - exports[test] = tests[test]; -} +describe("EBNF parser", function () { + for (var test in tests) { + it(test, tests[test]); + } +}); diff --git a/tests/index.html b/tests/index.html new file mode 100644 index 0000000..a73b4e8 --- /dev/null +++ b/tests/index.html @@ -0,0 +1,28 @@ + + + + EBNF Parser Tests + + + + + +
+ + + + + + + + + + + + + + diff --git a/transform-parser.js b/transform-parser.js index 4ef7195..53dcf67 100644 --- a/transform-parser.js +++ b/transform-parser.js @@ -1,629 +1,3278 @@ -/* parser generated by jison 0.4.11 */ + +// hack: +var assert; + +/* parser generated by jison 0.6.1-205 */ + /* - Returns a Parser object of the following structure: + * Returns a Parser object of the following structure: + * + * Parser: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a derivative/copy of this one, + * not a direct reference! + * } + * + * Parser.prototype: { + * yy: {}, + * EOF: 1, + * TERROR: 2, + * + * trace: function(errorMessage, ...), + * + * JisonParserError: function(msg, hash), + * + * quoteName: function(name), + * Helper function which can be overridden by user code later on: put suitable + * quotes around literal IDs in a description string. + * + * originalQuoteName: function(name), + * The basic quoteName handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `quoteName()` to reference this function + * at the end of the `parse()`. + * + * describeSymbol: function(symbol), + * Return a more-or-less human-readable description of the given symbol, when + * available, or the symbol itself, serving as its own 'description' for lack + * of something better to serve up. + * + * Return NULL when the symbol is unknown to the parser. + * + * symbols_: {associative list: name ==> number}, + * terminals_: {associative list: number ==> name}, + * nonterminals: {associative list: rule-name ==> {associative list: number ==> rule-alt}}, + * terminal_descriptions_: (if there are any) {associative list: number ==> description}, + * productions_: [...], + * + * performAction: function parser__performAction(yytext, yyleng, yylineno, yyloc, yystate, yysp, yyvstack, yylstack, yystack, yysstack), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `yyval` internal object, which has members (`$` and `_$`) + * to store/reference the rule value `$$` and location info `@$`. + * + * One important thing to note about `this` a.k.a. `yyval`: every *reduce* action gets + * to see the same object via the `this` reference, i.e. if you wish to carry custom + * data from one reduce action through to the next within a single parse run, then you + * may get nasty and use `yyval` a.k.a. `this` for storing you own semi-permanent data. + * + * `this.yy` is a direct reference to the `yy` shared state object. + * + * `%parse-param`-specified additional `parse()` arguments have been added to this `yy` + * object at `parse()` start and are therefore available to the action code via the + * same named `yy.xxxx` attributes (where `xxxx` represents a identifier name from + * the %parse-param` list. + * + * - `yytext` : reference to the lexer value which belongs to the last lexer token used + * to match this rule. This is *not* the look-ahead token, but the last token + * that's actually part of this rule. + * + * Formulated another way, `yytext` is the value of the token immediately preceeding + * the current look-ahead token. + * Caveats apply for rules which don't require look-ahead, such as epsilon rules. + * + * - `yyleng` : ditto as `yytext`, only now for the lexer.yyleng value. + * + * - `yylineno`: ditto as `yytext`, only now for the lexer.yylineno value. + * + * - `yyloc` : ditto as `yytext`, only now for the lexer.yylloc lexer token location info. + * + * WARNING: since jison 0.4.18-186 this entry may be NULL/UNDEFINED instead + * of an empty object when no suitable location info can be provided. + * + * - `yystate` : the current parser state number, used internally for dispatching and + * executing the action code chunk matching the rule currently being reduced. + * + * - `yysp` : the current state stack position (a.k.a. 'stack pointer') + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * Also note that you can access this and other stack index values using the new double-hash + * syntax, i.e. `##$ === ##0 === yysp`, while `##1` is the stack index for all things + * related to the first rule term, just like you have `$1`, `@1` and `#1`. + * This is made available to write very advanced grammar action rules, e.g. when you want + * to investigate the parse state stack in your action code, which would, for example, + * be relevant when you wish to implement error diagnostics and reporting schemes similar + * to the work described here: + * + * + Pottier, F., 2016. Reachability and error diagnosis in LR(1) automata. + * In Journées Francophones des Languages Applicatifs. + * + * + Jeffery, C.L., 2003. Generating LR syntax error messages from examples. + * ACM Transactions on Programming Languages and Systems (TOPLAS), 25(5), pp.631–640. + * + * - `yyrulelength`: the current rule's term count, i.e. the number of entries occupied on the stack. + * + * This one comes in handy when you are going to do advanced things to the parser + * stacks, all of which are accessible from your action code (see the next entries below). + * + * - `yyvstack`: reference to the parser value stack. Also accessed via the `$1` etc. + * constructs. + * + * - `yylstack`: reference to the parser token location stack. Also accessed via + * the `@1` etc. constructs. + * + * WARNING: since jison 0.4.18-186 this array MAY contain slots which are + * UNDEFINED rather than an empty (location) object, when the lexer/parser + * action code did not provide a suitable location info object when such a + * slot was filled! + * + * - `yystack` : reference to the parser token id stack. Also accessed via the + * `#1` etc. constructs. + * + * Note: this is a bit of a **white lie** as we can statically decode any `#n` reference to + * its numeric token id value, hence that code wouldn't need the `yystack` but *you* might + * want access this array for your own purposes, such as error analysis as mentioned above! + * + * Note that this stack stores the current stack of *tokens*, that is the sequence of + * already parsed=reduced *nonterminals* (tokens representing rules) and *terminals* + * (lexer tokens *shifted* onto the stack until the rule they belong to is found and + * *reduced*. + * + * - `yysstack`: reference to the parser state stack. This one carries the internal parser + * *states* such as the one in `yystate`, which are used to represent + * the parser state machine in the *parse table*. *Very* *internal* stuff, + * what can I say? If you access this one, you're clearly doing wicked things + * + * - `...` : the extra arguments you specified in the `%parse-param` statement in your + * grammar definition file. + * + * table: [...], + * State transition table + * ---------------------- + * + * index levels are: + * - `state` --> hash table + * - `symbol` --> action (number or array) + * + * If the `action` is an array, these are the elements' meaning: + * - index [0]: 1 = shift, 2 = reduce, 3 = accept + * - index [1]: GOTO `state` + * + * If the `action` is a number, it is the GOTO `state` + * + * defaultActions: {...}, + * + * parseError: function(str, hash, ExceptionClass), + * yyError: function(str, ...), + * yyRecovering: function(), + * yyErrOk: function(), + * yyClearIn: function(), + * + * constructParseErrorInfo: function(error_message, exception_object, expected_token_set, is_recoverable), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this parser kernel in many places; example usage: + * + * var infoObj = parser.constructParseErrorInfo('fail!', null, + * parser.collect_expected_token_set(state), true); + * var retVal = parser.parseError(infoObj.errStr, infoObj, parser.JisonParserError); + * + * originalParseError: function(str, hash, ExceptionClass), + * The basic `parseError` handler provided by JISON. + * `cleanupAfterParse()` will clean up and reset `parseError()` to reference this function + * at the end of the `parse()`. + * + * options: { ... parser %options ... }, + * + * parse: function(input[, args...]), + * Parse the given `input` and return the parsed value (or `true` when none was provided by + * the root action, in which case the parser is acting as a *matcher*). + * You MAY use the additional `args...` parameters as per `%parse-param` spec of this grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Parser's additional `args...` parameters (via `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * The lexer MAY add its own set of additional parameters (via the `%parse-param` line in + * the lexer section of the grammar spec): these will be inserted in the `yy` shared state + * object and any collision with those will be reported by the lexer via a thrown exception. + * + * cleanupAfterParse: function(resultValue, invoke_post_methods, do_not_nuke_errorinfos), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API is invoked at the end of the `parse()` call, unless an exception was thrown + * and `%options no-try-catch` has been defined for this grammar: in that case this helper MAY + * be invoked by calling user code to ensure the `post_parse` callbacks are invoked and + * the internal parser gets properly garbage collected under these particular circumstances. + * + * yyMergeLocationInfo: function(first_index, last_index, first_yylloc, last_yylloc, dont_look_back), + * Helper function **which will be set up during the first invocation of the `parse()` method**. + * This helper API can be invoked to calculate a spanning `yylloc` location info object. + * + * Note: %epsilon rules MAY specify no `first_index` and `first_yylloc`, in which case + * this function will attempt to obtain a suitable location marker by inspecting the location stack + * backwards. + * + * For more info see the documentation comment further below, immediately above this function's + * implementation. + * + * lexer: { + * yy: {...}, A reference to the so-called "shared state" `yy` once + * received via a call to the `.setInput(input, yy)` lexer API. + * EOF: 1, + * ERROR: 2, + * JisonLexerError: function(msg, hash), + * parseError: function(str, hash, ExceptionClass), + * setInput: function(input, [yy]), + * input: function(), + * unput: function(str), + * more: function(), + * reject: function(), + * less: function(n), + * pastInput: function(n), + * upcomingInput: function(n), + * showPosition: function(), + * test_match: function(regex_match_array, rule_index, ...), + * next: function(...), + * lex: function(...), + * begin: function(condition), + * pushState: function(condition), + * popState: function(), + * topState: function(), + * _currentRules: function(), + * stateStackSize: function(), + * cleanupAfterLex: function() + * + * options: { ... lexer %options ... }, + * + * performAction: function(yy, yy_, $avoiding_name_collisions, YY_START, ...), + * rules: [...], + * conditions: {associative list: name ==> set}, + * } + * } + * + * + * token location info (@$, _$, etc.): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer and + * parser errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * } + * + * parser (grammar) errors will also provide these additional members: + * + * { + * expected: (array describing the set of expected tokens; + * may be UNDEFINED when we cannot easily produce such a set) + * state: (integer (or array when the table includes grammar collisions); + * represents the current internal state of the parser kernel. + * can, for example, be used to pass to the `collect_expected_token_set()` + * API to obtain the expected token set) + * action: (integer; represents the current internal action which will be executed) + * new_state: (integer; represents the next/planned internal state, once the current + * action has executed) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * state_stack: (array: the current parser LALR/LR internal state stack; this can be used, + * for instance, for advanced error analysis and reporting) + * value_stack: (array: the current parser LALR/LR internal `$$` value stack; this can be used, + * for instance, for advanced error analysis and reporting) + * location_stack: (array: the current parser LALR/LR internal location stack; this can be used, + * for instance, for advanced error analysis and reporting) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * parser: (reference to the current parser instance) + * } + * + * while `this` will reference the current parser instance. + * + * When `parseError` is invoked by the lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * lexer: (reference to the current lexer instance which reported the error) + * } + * + * When `parseError` is invoked by the parser due to a **JavaScript exception** being fired + * from either the parser or lexer, `this` will still reference the related *parser* + * instance, while these additional `hash` fields will also be provided: + * + * { + * exception: (reference to the exception thrown) + * } + * + * Please do note that in the latter situation, the `expected` field will be omitted as + * this type of failure is assumed not to be due to *parse errors* but rather due to user + * action code in either parser or lexer failing unexpectedly. + * + * --- + * + * You can specify parser options by setting / modifying the `.yy` object of your Parser instance. + * These options are available: + * + * ### options which are global for all parser instances + * + * Parser.pre_parse: function(yy) + * optional: you can specify a pre_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. + * Parser.post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: you can specify a post_parse() function in the chunk following + * the grammar, i.e. after the last `%%`. When it does not return any value, + * the parser will return the original `retval`. + * + * ### options which can be set up per parser instance + * + * yy: { + * pre_parse: function(yy) + * optional: is invoked before the parse cycle starts (and before the first + * invocation of `lex()`) but immediately after the invocation of + * `parser.pre_parse()`). + * post_parse: function(yy, retval, parseInfo) { return retval; } + * optional: is invoked when the parse terminates due to success ('accept') + * or failure (even when exceptions are thrown). + * `retval` contains the return value to be produced by `Parser.parse()`; + * this function can override the return value by returning another. + * When it does not return any value, the parser will return the original + * `retval`. + * This function is invoked immediately before `parser.post_parse()`. + * + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * quoteName: function(name), + * optional: overrides the default `quoteName` function. + * } + * + * parser.lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this `%option` has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +import XRegExp from '@gerhobbelt/xregexp'; // for helping out the `%options xregexp` in the lexer; + +// See also: +// http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 +// but we keep the prototype.constructor and prototype.name assignment lines too for compatibility +// with userland code which might access the derived class in a 'classic' way. +function JisonParserError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonParserError' + }); + + if (msg == null) msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + + var stacktrace; + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { // V8/Chrome engine + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = (new Error(msg)).stack; + } + } + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } +} + +if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonParserError.prototype, Error.prototype); +} else { + JisonParserError.prototype = Object.create(Error.prototype); +} +JisonParserError.prototype.constructor = JisonParserError; +JisonParserError.prototype.name = 'JisonParserError'; + + + + // helper: reconstruct the productions[] table + function bp(s) { + var rv = []; + var p = s.pop; + var r = s.rule; + for (var i = 0, l = p.length; i < l; i++) { + rv.push([ + p[i], + r[i] + ]); + } + return rv; + } + + + + + + // helper: reconstruct the 'goto' table + function bt(s) { + var rv = []; + var d = s.len; + var y = s.symbol; + var t = s.type; + var a = s.state; + var m = s.mode; + var g = s.goto; + for (var i = 0, l = d.length; i < l; i++) { + var n = d[i]; + var q = {}; + for (var j = 0; j < n; j++) { + var z = y.shift(); + switch (t.shift()) { + case 2: + q[z] = [ + m.shift(), + g.shift() + ]; + break; + + case 0: + q[z] = a.shift(); + break; + + default: + // type === 1: accept + q[z] = [ + 3 + ]; + } + } + rv.push(q); + } + return rv; + } + + + + // helper: runlength encoding with increment step: code, length: step (default step = 0) + // `this` references an array + function s(c, l, a) { + a = a || 0; + for (var i = 0; i < l; i++) { + this.push(c); + c += a; + } + } + + // helper: duplicate sequence from *relative* offset and length. + // `this` references an array + function c(i, l) { + i = this.length - i; + for (l += i; i < l; i++) { + this.push(this[i]); + } + } + + // helper: unpack an array using helpers and data, all passed in an array argument 'a'. + function u(a) { + var rv = []; + for (var i = 0, l = a.length; i < l; i++) { + var e = a[i]; + // Is this entry a helper function? + if (typeof e === 'function') { + i++; + e.apply(rv, a[i]); + } else { + rv.push(e); + } + } + return rv; + } + + +var parser = { + // Code Generator Information Report + // --------------------------------- + // + // Options: + // + // default action mode: ............. classic,merge + // no try..catch: ................... false + // no default resolve on conflict: false + // on-demand look-ahead: ............ false + // error recovery token skip maximum: 3 + // yyerror in parse actions is: ..... NOT recoverable, + // yyerror in lexer actions and other non-fatal lexer are: + // .................................. NOT recoverable, + // debug grammar/output: ............ false + // has partial LR conflict upgrade: true + // rudimentary token-stack support: false + // parser table compression mode: ... 2 + // export debug tables: ............. false + // export *all* tables: ............. false + // module type: ..................... es + // parser engine type: .............. lalr + // output main() in the module: ..... true + // has user-specified main(): ....... false + // has user-specified require()/import modules for main(): + // .................................. false + // number of expected conflicts: .... 0 + // + // + // Parser Analysis flags: + // + // no significant actions (parser is a language matcher only): + // .................................. false + // uses yyleng: ..................... false + // uses yylineno: ................... false + // uses yytext: ..................... false + // uses yylloc: ..................... false + // uses ParseError API: ............. false + // uses YYERROR: .................... false + // uses YYRECOVERING: ............... false + // uses YYERROK: .................... false + // uses YYCLEARIN: .................. false + // tracks rule values: .............. true + // assigns rule values: ............. true + // uses location tracking: .......... false + // assigns location: ................ false + // uses yystack: .................... false + // uses yysstack: ................... false + // uses yysp: ....................... true + // uses yyrulelength: ............... false + // uses yyMergeLocationInfo API: .... false + // has error recovery: .............. false + // has error reporting: ............. false + // + // --------- END OF REPORT ----------- + +trace: function no_op_trace() {}, +JisonParserError: JisonParserError, +yy: {}, +options: { + type: "lalr", + hasPartialLrUpgradeOnConflict: true, + errorRecoveryTokenDiscardCount: 3 +}, +symbols_: { + "$accept": 0, + "$end": 1, + "(": 4, + ")": 5, + "*": 6, + "+": 8, + "?": 7, + "ALIAS": 9, + "EOF": 1, + "SYMBOL": 10, + "error": 2, + "expression": 16, + "handle": 13, + "handle_list": 12, + "production": 11, + "rule": 14, + "suffix": 17, + "suffixed_expression": 15, + "|": 3 +}, +terminals_: { + 1: "EOF", + 2: "error", + 3: "|", + 4: "(", + 5: ")", + 6: "*", + 7: "?", + 8: "+", + 9: "ALIAS", + 10: "SYMBOL" +}, +TERROR: 2, +EOF: 1, + +// internals: defined here so the object *structure* doesn't get modified by parse() et al, +// thus helping JIT compilers like Chrome V8. +originalQuoteName: null, +originalParseError: null, +cleanupAfterParse: null, +constructParseErrorInfo: null, +yyMergeLocationInfo: null, + +__reentrant_call_depth: 0, // INTERNAL USE ONLY +__error_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup +__error_recovery_infos: [], // INTERNAL USE ONLY: the set of parseErrorInfo objects created since the last cleanup + +// APIs which will be set up depending on user action code analysis: +//yyRecovering: 0, +//yyErrOk: 0, +//yyClearIn: 0, + +// Helper APIs +// ----------- + +// Helper function which can be overridden by user code later on: put suitable quotes around +// literal IDs in a description string. +quoteName: function parser_quoteName(id_str) { + return '"' + id_str + '"'; +}, + +// Return the name of the given symbol (terminal or non-terminal) as a string, when available. +// +// Return NULL when the symbol is unknown to the parser. +getSymbolName: function parser_getSymbolName(symbol) { + if (this.terminals_[symbol]) { + return this.terminals_[symbol]; + } + + // Otherwise... this might refer to a RULE token i.e. a non-terminal: see if we can dig that one up. + // + // An example of this may be where a rule's action code contains a call like this: + // + // parser.getSymbolName(#$) + // + // to obtain a human-readable name of the current grammar rule. + var s = this.symbols_; + for (var key in s) { + if (s[key] === symbol) { + return key; + } + } + return null; +}, + +// Return a more-or-less human-readable description of the given symbol, when available, +// or the symbol itself, serving as its own 'description' for lack of something better to serve up. +// +// Return NULL when the symbol is unknown to the parser. +describeSymbol: function parser_describeSymbol(symbol) { + if (symbol !== this.EOF && this.terminal_descriptions_ && this.terminal_descriptions_[symbol]) { + return this.terminal_descriptions_[symbol]; + } else if (symbol === this.EOF) { + return 'end of input'; + } + var id = this.getSymbolName(symbol); + if (id) { + return this.quoteName(id); + } + return null; +}, + +// Produce a (more or less) human-readable list of expected tokens at the point of failure. +// +// The produced list may contain token or token set descriptions instead of the tokens +// themselves to help turning this output into something that easier to read by humans +// unless `do_not_describe` parameter is set, in which case a list of the raw, *numeric*, +// expected terminals and nonterminals is produced. +// +// The returned list (array) will not contain any duplicate entries. +collect_expected_token_set: function parser_collect_expected_token_set(state, do_not_describe) { + var TERROR = this.TERROR; + var tokenset = []; + var check = {}; + // Has this (error?) state been outfitted with a custom expectations description text for human consumption? + // If so, use that one instead of the less palatable token set. + if (!do_not_describe && this.state_descriptions_ && this.state_descriptions_[state]) { + return [this.state_descriptions_[state]]; + } + for (var p in this.table[state]) { + p = +p; + if (p !== TERROR) { + var d = do_not_describe ? p : this.describeSymbol(p); + if (d && !check[d]) { + tokenset.push(d); + check[d] = true; // Mark this token description as already mentioned to prevent outputting duplicate entries. + } + } + } + return tokenset; +}, +productions_: bp({ + pop: u([ + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + s, + [17, 4] +]), + rule: u([ + 2, + 1, + 3, + 0, + 1, + 1, + 2, + 3, + c, + [8, 6], + 1 +]) +}), +performAction: function parser__PerformAction(yystate /* action[1] */, yysp, yyvstack) { + + /* this == yyval */ + + // the JS engine itself can go and remove these statements when `yy` turns out to be unused in any action code! + var yy = this.yy; + var yyparser = yy.parser; + var yylexer = yy.lexer; + + + + switch (yystate) { +case 0: + /*! Production:: $accept : production $end */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + break; + +case 1: + /*! Production:: production : handle EOF */ + + // default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-): + this.$ = yyvstack[yysp - 1]; + // END of default action (generated by JISON mode classic/merge :: VT,VA,-,-,-,-,-,-) + + + return yyvstack[yysp - 1]; + break; + +case 2: + /*! Production:: handle_list : handle */ +case 6: + /*! Production:: rule : suffixed_expression */ + + this.$ = [yyvstack[yysp]]; + break; + +case 3: + /*! Production:: handle_list : handle_list "|" handle */ + + yyvstack[yysp - 2].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 2]; + break; + +case 4: + /*! Production:: handle : %epsilon */ + + this.$ = []; + break; + +case 5: + /*! Production:: handle : rule */ +case 13: + /*! Production:: suffix : "*" */ +case 14: + /*! Production:: suffix : "?" */ +case 15: + /*! Production:: suffix : "+" */ + + this.$ = yyvstack[yysp]; + break; + +case 7: + /*! Production:: rule : rule suffixed_expression */ + + yyvstack[yysp - 1].push(yyvstack[yysp]); + this.$ = yyvstack[yysp - 1]; + break; + +case 8: + /*! Production:: suffixed_expression : expression suffix ALIAS */ + + this.$ = ['xalias', yyvstack[yysp - 1], yyvstack[yysp - 2], yyvstack[yysp]]; + break; + +case 9: + /*! Production:: suffixed_expression : expression suffix */ + + if (yyvstack[yysp]) { + this.$ = [yyvstack[yysp], yyvstack[yysp - 1]]; + } else { + this.$ = yyvstack[yysp - 1]; + } + break; + +case 10: + /*! Production:: expression : SYMBOL */ + + this.$ = ['symbol', yyvstack[yysp]]; + break; + +case 11: + /*! Production:: expression : "(" handle_list ")" */ + + this.$ = ['()', yyvstack[yysp - 1]]; + break; + +case 12: + /*! Production:: suffix : %epsilon */ + + this.$ = undefined; + break; + +} +}, +table: bt({ + len: u([ + 8, + 1, + 1, + 7, + 0, + 10, + 0, + 9, + 0, + 0, + 6, + s, + [0, 3], + 2, + s, + [0, 3], + 8, + 0 +]), + symbol: u([ + 1, + 4, + 10, + 11, + s, + [13, 4, 1], + s, + [1, 3], + 3, + 4, + 5, + 10, + c, + [9, 3], + s, + [3, 8, 1], + 17, + c, + [16, 4], + s, + [12, 5, 1], + c, + [19, 4], + 9, + 10, + 3, + 5, + c, + [17, 4], + c, + [16, 4] +]), + type: u([ + s, + [2, 3], + s, + [0, 5], + 1, + s, + [2, 6], + 0, + 0, + s, + [2, 9], + c, + [10, 5], + s, + [0, 5], + s, + [2, 12], + s, + [0, 4] +]), + state: u([ + s, + [1, 5, 1], + 9, + 5, + 10, + 14, + 15, + c, + [8, 3], + 19, + c, + [4, 3] +]), + mode: u([ + 2, + s, + [1, 3], + 2, + 2, + 1, + 2, + c, + [5, 3], + c, + [7, 3], + c, + [12, 4], + c, + [13, 9], + c, + [15, 3], + c, + [5, 4] +]), + goto: u([ + 4, + 7, + 6, + 8, + 5, + 5, + 7, + 5, + 6, + s, + [12, 4], + 11, + 12, + 13, + 12, + 12, + 4, + 7, + 4, + 6, + s, + [9, 4], + 16, + 9, + 18, + 17, + c, + [12, 4] +]) +}), +defaultActions: { + 4: 6, + 6: 10, + 8: 1, + 9: 7, + 11: 13, + 12: 14, + 13: 15, + 15: 2, + 16: 8, + 17: 11, + 19: 3 +}, +parseError: function parseError(str, hash, ExceptionClass) { + if (hash.recoverable && typeof this.trace === 'function') { + this.trace(str); + hash.destroy(); // destroy... well, *almost*! + } else { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + throw new ExceptionClass(str, hash); + } +}, +parse: function parse(input) { + var self = this; + var stack = new Array(128); // token stack: stores token which leads to state at the same index (column storage) + var sstack = new Array(128); // state stack: stores states (column storage) + + var vstack = new Array(128); // semantic value stack + + var table = this.table; + var sp = 0; // 'stack pointer': index into the stacks + + + var yylineno; + + + var symbol = 0; + + + + var TERROR = this.TERROR; + var EOF = this.EOF; + var ERROR_RECOVERY_TOKEN_DISCARD_COUNT = (this.options.errorRecoveryTokenDiscardCount | 0) || 3; + var NO_ACTION = [0, 20 /* === table.length :: ensures that anyone using this new state will fail dramatically! */]; + + var lexer; + if (this.__lexer__) { + lexer = this.__lexer__; + } else { + lexer = this.__lexer__ = Object.create(this.lexer); + } + + var sharedState_yy = { + parseError: undefined, + quoteName: undefined, + lexer: undefined, + parser: undefined, + pre_parse: undefined, + post_parse: undefined, + pre_lex: undefined, + post_lex: undefined // WARNING: must be written this way for the code expanders to work correctly in both ES5 and ES6 modes! + }; + + var ASSERT; + if (typeof assert !== 'function') { + ASSERT = function JisonAssert(cond, msg) { + if (!cond) { + throw new Error('assertion failed: ' + (msg || '***')); + } + }; + } else { + ASSERT = assert; + } + + this.yyGetSharedState = function yyGetSharedState() { + return sharedState_yy; + }; + + + + + + + + + function shallow_copy_noclobber(dst, src) { + for (var k in src) { + if (typeof dst[k] === 'undefined' && Object.prototype.hasOwnProperty.call(src, k)) { + dst[k] = src[k]; + } + } + } + + // copy state + shallow_copy_noclobber(sharedState_yy, this.yy); + + sharedState_yy.lexer = lexer; + sharedState_yy.parser = this; + + + + + + + // Does the shared state override the default `parseError` that already comes with this instance? + if (typeof sharedState_yy.parseError === 'function') { + this.parseError = function parseErrorAlt(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonParserError; + } + return sharedState_yy.parseError.call(this, str, hash, ExceptionClass); + }; + } else { + this.parseError = this.originalParseError; + } + + // Does the shared state override the default `quoteName` that already comes with this instance? + if (typeof sharedState_yy.quoteName === 'function') { + this.quoteName = function quoteNameAlt(id_str) { + return sharedState_yy.quoteName.call(this, id_str); + }; + } else { + this.quoteName = this.originalQuoteName; + } + + // set up the cleanup function; make it an API so that external code can re-use this one in case of + // calamities or when the `%options no-try-catch` option has been specified for the grammar, in which + // case this parse() API method doesn't come with a `finally { ... }` block any more! + // + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `sharedState`, etc. references will be *wrong*! + this.cleanupAfterParse = function parser_cleanupAfterParse(resultValue, invoke_post_methods, do_not_nuke_errorinfos) { + var rv; + + if (invoke_post_methods) { + var hash; + + if (sharedState_yy.post_parse || this.post_parse) { + // create an error hash info instance: we re-use this API in a **non-error situation** + // as this one delivers all parser internals ready for access by userland code. + hash = this.constructParseErrorInfo(null /* no error! */, null /* no exception! */, null, false); + } + + if (sharedState_yy.post_parse) { + rv = sharedState_yy.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + if (this.post_parse) { + rv = this.post_parse.call(this, sharedState_yy, resultValue, hash); + if (typeof rv !== 'undefined') resultValue = rv; + } + + // cleanup: + if (hash && hash.destroy) { + hash.destroy(); + } + } + + if (this.__reentrant_call_depth > 1) return resultValue; // do not (yet) kill the sharedState when this is a reentrant run. + + // clean up the lingering lexer structures as well: + if (lexer.cleanupAfterLex) { + lexer.cleanupAfterLex(do_not_nuke_errorinfos); + } + + // prevent lingering circular references from causing memory leaks: + if (sharedState_yy) { + sharedState_yy.lexer = undefined; + sharedState_yy.parser = undefined; + if (lexer.yy === sharedState_yy) { + lexer.yy = undefined; + } + } + sharedState_yy = undefined; + this.parseError = this.originalParseError; + this.quoteName = this.originalQuoteName; + + // nuke the vstack[] array at least as that one will still reference obsoleted user values. + // To be safe, we nuke the other internal stack columns as well... + stack.length = 0; // fastest way to nuke an array without overly bothering the GC + sstack.length = 0; + + vstack.length = 0; + sp = 0; + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + if (el && typeof el.destroy === 'function') { + el.destroy(); + } + } + this.__error_infos.length = 0; + + + } + + return resultValue; + }; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // NOTE: as this API uses parse() as a closure, it MUST be set again on every parse() invocation, + // or else your `lexer`, `sharedState`, etc. references will be *wrong*! + this.constructParseErrorInfo = function parser_constructParseErrorInfo(msg, ex, expected, recoverable) { + var pei = { + errStr: msg, + exception: ex, + text: lexer.match, + value: lexer.yytext, + token: this.describeSymbol(symbol) || symbol, + token_id: symbol, + line: lexer.yylineno, + + expected: expected, + recoverable: recoverable, + state: state, + action: action, + new_state: newState, + symbol_stack: stack, + state_stack: sstack, + value_stack: vstack, + + stack_pointer: sp, + yy: sharedState_yy, + lexer: lexer, + parser: this, + + // and make sure the error info doesn't stay due to potential + // ref cycle via userland code manipulations. + // These would otherwise all be memory leak opportunities! + // + // Note that only array and object references are nuked as those + // constitute the set of elements which can produce a cyclic ref. + // The rest of the members is kept intact as they are harmless. + destroy: function destructParseErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // info.value = null; + // info.value_stack = null; + // ... + var rec = !!this.recoverable; + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; + } + } + this.recoverable = rec; + } + }; + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); + return pei; + }; + + + + + + + + + + + + + + function getNonTerminalFromCode(symbol) { + var tokenName = self.getSymbolName(symbol); + if (!tokenName) { + tokenName = symbol; + } + return tokenName; + } + + + function lex() { + var token = lexer.lex(); + // if token isn't its numeric value, convert + if (typeof token !== 'number') { + token = self.symbols_[token] || token; + } + + return token || EOF; + } + + + var state, action, r, t; + var yyval = { + $: true, + _$: undefined, + yy: sharedState_yy + }; + var p; + var yyrulelen; + var this_production; + var newState; + var retval = false; + + + try { + this.__reentrant_call_depth++; + + lexer.setInput(input, sharedState_yy); + + + + vstack[sp] = null; + sstack[sp] = 0; + stack[sp] = 0; + ++sp; + + + + + + if (this.pre_parse) { + this.pre_parse.call(this, sharedState_yy); + } + if (sharedState_yy.pre_parse) { + sharedState_yy.pre_parse.call(this, sharedState_yy); + } + + newState = sstack[sp - 1]; + for (;;) { + // retrieve state number from top of stack + state = newState; // sstack[sp - 1]; + + // use default actions if available + if (this.defaultActions[state]) { + action = 2; + newState = this.defaultActions[state]; + } else { + // The single `==` condition below covers both these `===` comparisons in a single + // operation: + // + // if (symbol === null || typeof symbol === 'undefined') ... + if (!symbol) { + symbol = lex(); + } + // read action for current state and first input + t = (table[state] && table[state][symbol]) || NO_ACTION; + newState = t[1]; + action = t[0]; + + + + + + + + + + + + // handle parse error + if (!action) { + var errStr; + var errSymbolDescr = (this.describeSymbol(symbol) || symbol); + var expected = this.collect_expected_token_set(state); + + // Report error + if (typeof lexer.yylineno === 'number') { + errStr = 'Parse error on line ' + (lexer.yylineno + 1) + ': '; + } else { + errStr = 'Parse error: '; + } + if (typeof lexer.showPosition === 'function') { + errStr += '\n' + lexer.showPosition(79 - 10, 10) + '\n'; + } + if (expected.length) { + errStr += 'Expecting ' + expected.join(', ') + ', got unexpected ' + errSymbolDescr; + } else { + errStr += 'Unexpected ' + errSymbolDescr; + } + // we cannot recover from the error! + p = this.constructParseErrorInfo(errStr, null, expected, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + + + } + + + + + + + + + + + switch (action) { + // catch misc. parse failures: + default: + // this shouldn't happen, unless resolve defaults are off + if (action instanceof Array) { + p = this.constructParseErrorInfo('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol, null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + } + // Another case of better safe than sorry: in case state transitions come out of another error recovery process + // or a buggy LUT (LookUp Table): + p = this.constructParseErrorInfo('Parsing halted. No viable error recovery approach available due to internal system failure.', null, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + break; + + // shift: + case 1: + stack[sp] = symbol; + vstack[sp] = lexer.yytext; + + sstack[sp] = newState; // push state + + ++sp; + symbol = 0; + + + // Pick up the lexer details for the current symbol as that one is not 'look-ahead' any more: + + + + + + + + + + + + + + + + continue; + + // reduce: + case 2: + this_production = this.productions_[newState - 1]; // `this.productions_[]` is zero-based indexed while states start from 1 upwards... + yyrulelen = this_production[1]; + + + + + + + + + + + r = this.performAction.call(yyval, newState, sp - 1, vstack); + + if (typeof r !== 'undefined') { + retval = r; + break; + } + + // pop off stack + sp -= yyrulelen; + + // don't overwrite the `symbol` variable: use a local var to speed things up: + var ntsymbol = this_production[0]; // push nonterminal (reduce) + stack[sp] = ntsymbol; + vstack[sp] = yyval.$; + + // goto new state = table[STATE][NONTERMINAL] + newState = table[sstack[sp - 1]][ntsymbol]; + sstack[sp] = newState; + ++sp; + + + + + + + + + + continue; + + // accept: + case 3: + retval = true; + // Return the `$accept` rule's `$$` result, if available. + // + // Also note that JISON always adds this top-most `$accept` rule (with implicit, + // default, action): + // + // $accept: $end + // %{ $$ = $1; @$ = @1; %} + // + // which, combined with the parse kernel's `$accept` state behaviour coded below, + // will produce the `$$` value output of the rule as the parse result, + // IFF that result is *not* `undefined`. (See also the parser kernel code.) + // + // In code: + // + // %{ + // @$ = @1; // if location tracking support is included + // if (typeof $1 !== 'undefined') + // return $1; + // else + // return true; // the default parse result if the rule actions don't produce anything + // %} + sp--; + if (typeof vstack[sp] !== 'undefined') { + retval = vstack[sp]; + } + + break; + } + + // break out of loop: we accept or fail with error + break; + } + } catch (ex) { + // report exceptions through the parseError callback too, but keep the exception intact + // if it is a known parser or lexer error which has been thrown by parseError() already: + if (ex instanceof this.JisonParserError) { + throw ex; + } + else if (lexer && typeof lexer.JisonLexerError === 'function' && ex instanceof lexer.JisonLexerError) { + throw ex; + } + else { + p = this.constructParseErrorInfo('Parsing aborted due to exception.', ex, null, false); + retval = this.parseError(p.errStr, p, this.JisonParserError); + } + } finally { + retval = this.cleanupAfterParse(retval, true, true); + this.__reentrant_call_depth--; + } // /finally + + return retval; +} +}; +parser.originalParseError = parser.parseError; +parser.originalQuoteName = parser.quoteName; + + +/* lexer generated by jison-lex 0.6.1-205 */ + +/* + * Returns a Lexer object of the following structure: + * + * Lexer: { + * yy: {} The so-called "shared state" or rather the *source* of it; + * the real "shared state" `yy` passed around to + * the rule actions, etc. is a direct reference! + * + * This "shared context" object was passed to the lexer by way of + * the `lexer.setInput(str, yy)` API before you may use it. + * + * This "shared context" object is passed to the lexer action code in `performAction()` + * so userland code in the lexer actions may communicate with the outside world + * and/or other lexer rules' actions in more or less complex ways. + * + * } + * + * Lexer.prototype: { + * EOF: 1, + * ERROR: 2, + * + * yy: The overall "shared context" object reference. + * + * JisonLexerError: function(msg, hash), + * + * performAction: function lexer__performAction(yy, yyrulenumber, YY_START), + * + * The function parameters and `this` have the following value/meaning: + * - `this` : reference to the `lexer` instance. + * `yy_` is an alias for `this` lexer instance reference used internally. + * + * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer + * by way of the `lexer.setInput(str, yy)` API before. + * + * Note: + * The extra arguments you specified in the `%parse-param` statement in your + * **parser** grammar definition file are passed to the lexer via this object + * reference as member variables. + * + * - `yyrulenumber` : index of the matched lexer rule (regex), used internally. + * + * - `YY_START`: the current lexer "start condition" state. + * + * parseError: function(str, hash, ExceptionClass), + * + * constructLexErrorInfo: function(error_message, is_recoverable), + * Helper function. + * Produces a new errorInfo 'hash object' which can be passed into `parseError()`. + * See it's use in this lexer kernel in many places; example usage: + * + * var infoObj = lexer.constructParseErrorInfo('fail!', true); + * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError); + * + * options: { ... lexer %options ... }, + * + * lex: function(), + * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API. + * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar: + * these extra `args...` are added verbatim to the `yy` object reference as member variables. + * + * WARNING: + * Lexer's additional `args...` parameters (via lexer's `%parse-param`) MAY conflict with + * any attributes already added to `yy` by the **parser** or the jison run-time; + * when such a collision is detected an exception is thrown to prevent the generated run-time + * from silently accepting this confusing and potentially hazardous situation! + * + * cleanupAfterLex: function(do_not_nuke_errorinfos), + * Helper function. + * + * This helper API is invoked when the **parse process** has completed: it is the responsibility + * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. + * + * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected. + * + * setInput: function(input, [yy]), + * + * + * input: function(), + * + * + * unput: function(str), + * + * + * more: function(), + * + * + * reject: function(), + * + * + * less: function(n), + * + * + * pastInput: function(n), + * + * + * upcomingInput: function(n), + * + * + * showPosition: function(), + * + * + * test_match: function(regex_match_array, rule_index), + * + * + * next: function(), + * + * + * begin: function(condition), + * + * + * pushState: function(condition), + * + * + * popState: function(), + * + * + * topState: function(), + * + * + * _currentRules: function(), + * + * + * stateStackSize: function(), + * + * + * performAction: function(yy, yy_, yyrulenumber, YY_START), + * + * + * rules: [...], + * + * + * conditions: {associative list: name ==> set}, + * } + * + * + * token location info (`yylloc`): { + * first_line: n, + * last_line: n, + * first_column: n, + * last_column: n, + * range: [start_number, end_number] + * (where the numbers are indexes into the input string, zero-based) + * } + * + * --- + * + * The `parseError` function receives a 'hash' object with these members for lexer errors: + * + * { + * text: (matched text) + * token: (the produced terminal token, if any) + * token_id: (the produced terminal token numeric ID, if any) + * line: (yylineno) + * loc: (yylloc) + * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule + * available for this particular error) + * yy: (object: the current parser internal "shared state" `yy` + * as is also available in the rule actions; this can be used, + * for instance, for advanced error analysis and reporting) + * lexer: (reference to the current lexer instance used by the parser) + * } + * + * while `this` will reference the current lexer instance. + * + * When `parseError` is invoked by the lexer, the default implementation will + * attempt to invoke `yy.parser.parseError()`; when this callback is not provided + * it will try to invoke `yy.parseError()` instead. When that callback is also not + * provided, a `JisonLexerError` exception will be thrown containing the error + * message and `hash`, as constructed by the `constructLexErrorInfo()` API. + * + * Note that the lexer's `JisonLexerError` error class is passed via the + * `ExceptionClass` argument, which is invoked to construct the exception + * instance to be thrown, so technically `parseError` will throw the object + * produced by the `new ExceptionClass(str, hash)` JavaScript expression. + * + * --- + * + * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance. + * These options are available: + * + * (Options are permanent.) + * + * yy: { + * parseError: function(str, hash, ExceptionClass) + * optional: overrides the default `parseError` function. + * } + * + * lexer.options: { + * pre_lex: function() + * optional: is invoked before the lexer is invoked to produce another token. + * `this` refers to the Lexer object. + * post_lex: function(token) { return token; } + * optional: is invoked when the lexer has produced a token `token`; + * this function can override the returned token value by returning another. + * When it does not return any (truthy) value, the lexer will return + * the original `token`. + * `this` refers to the Lexer object. + * + * WARNING: the next set of options are not meant to be changed. They echo the abilities of + * the lexer as per when it was compiled! + * + * ranges: boolean + * optional: `true` ==> token location info will include a .range[] member. + * flex: boolean + * optional: `true` ==> flex-like lexing behaviour where the rules are tested + * exhaustively to find the longest match. + * backtrack_lexer: boolean + * optional: `true` ==> lexer regexes are tested in order and for invoked; + * the lexer terminates the scan when a token is returned by the action code. + * xregexp: boolean + * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the + * `XRegExp` library. When this %option has not been specified at compile time, all lexer + * rule regexes have been written as standard JavaScript RegExp expressions. + * } + */ + + +var lexer = function() { + /** + * See also: + * http://stackoverflow.com/questions/1382107/whats-a-good-way-to-extend-error-in-javascript/#35881508 + * but we keep the prototype.constructor and prototype.name assignment lines too for compatibility + * with userland code which might access the derived class in a 'classic' way. + * + * @public + * @constructor + * @nocollapse + */ + function JisonLexerError(msg, hash) { + Object.defineProperty(this, 'name', { + enumerable: false, + writable: false, + value: 'JisonLexerError' + }); + + if (msg == null) + msg = '???'; + + Object.defineProperty(this, 'message', { + enumerable: false, + writable: true, + value: msg + }); + + this.hash = hash; + var stacktrace; + + if (hash && hash.exception instanceof Error) { + var ex2 = hash.exception; + this.message = ex2.message || msg; + stacktrace = ex2.stack; + } + + if (!stacktrace) { + if (Error.hasOwnProperty('captureStackTrace')) { + // V8 + Error.captureStackTrace(this, this.constructor); + } else { + stacktrace = new Error(msg).stack; + } + } - Parser: { - yy: {} + if (stacktrace) { + Object.defineProperty(this, 'stack', { + enumerable: false, + writable: false, + value: stacktrace + }); + } } - Parser.prototype: { - yy: {}, - trace: function(), - symbols_: {associative list: name ==> number}, - terminals_: {associative list: number ==> name}, - productions_: [...], - performAction: function anonymous(yytext, yyleng, yylineno, yy, yystate, $$, _$), - table: [...], - defaultActions: {...}, - parseError: function(str, hash), - parse: function(input), - - lexer: { - EOF: 1, - parseError: function(str, hash), - setInput: function(input), - input: function(), - unput: function(str), - more: function(), - less: function(n), - pastInput: function(), - upcomingInput: function(), - showPosition: function(), - test_match: function(regex_match_array, rule_index), - next: function(), - lex: function(), - begin: function(condition), - popState: function(), - _currentRules: function(), - topState: function(), - pushState: function(condition), - - options: { - ranges: boolean (optional: true ==> token location info will include a .range[] member) - flex: boolean (optional: true ==> flex-like lexing behaviour where the rules are tested exhaustively to find the longest match) - backtrack_lexer: boolean (optional: true ==> lexer regexes are tested in order and for each matching regex the action code is invoked; the lexer terminates the scan when a token is returned by the action code) - }, - - performAction: function(yy, yy_, $avoiding_name_collisions, YY_START), - rules: [...], - conditions: {associative list: name ==> set}, - } + if (typeof Object.setPrototypeOf === 'function') { + Object.setPrototypeOf(JisonLexerError.prototype, Error.prototype); + } else { + JisonLexerError.prototype = Object.create(Error.prototype); } + JisonLexerError.prototype.constructor = JisonLexerError; + JisonLexerError.prototype.name = 'JisonLexerError'; - token location info (@$, _$, etc.): { - first_line: n, - last_line: n, - first_column: n, - last_column: n, - range: [start_number, end_number] (where the numbers are indexes into the input string, regular zero-based) - } + var lexer = { + +// Code Generator Information Report +// --------------------------------- +// +// Options: +// +// backtracking: .................... false +// location.ranges: ................. true +// location line+column tracking: ... true +// +// +// Forwarded Parser Analysis flags: +// +// uses yyleng: ..................... false +// uses yylineno: ................... false +// uses yytext: ..................... false +// uses yylloc: ..................... false +// uses lexer values: ............... true / true +// location tracking: ............... false +// location assignment: ............. false +// +// +// Lexer Analysis flags: +// +// uses yyleng: ..................... ??? +// uses yylineno: ................... ??? +// uses yytext: ..................... ??? +// uses yylloc: ..................... ??? +// uses ParseError API: ............. ??? +// uses yyerror: .................... ??? +// uses location tracking & editing: ??? +// uses more() API: ................. ??? +// uses unput() API: ................ ??? +// uses reject() API: ............... ??? +// uses less() API: ................. ??? +// uses display APIs pastInput(), upcomingInput(), showPosition(): +// ............................. ??? +// uses describeYYLLOC() API: ....... ??? +// +// --------- END OF REPORT ----------- +EOF: 1, + ERROR: 2, - the parseError function receives a 'hash' object with these members for lexer and parser errors: { - text: (matched text) - token: (the produced terminal token, if any) - line: (yylineno) - } - while parser (grammar) errors will also provide these members, i.e. parser errors deliver a superset of attributes: { - loc: (yylloc) - expected: (string describing the set of expected tokens) - recoverable: (boolean: TRUE when the parser has a error recovery rule available for this particular error) - } -*/ -var ebnf = (function(){ -var parser = {trace: function trace() { }, -yy: {}, -symbols_: {"error":2,"production":3,"handle":4,"EOF":5,"handle_list":6,"|":7,"expression_suffix":8,"expression":9,"suffix":10,"ALIAS":11,"symbol":12,"(":13,")":14,"*":15,"?":16,"+":17,"$accept":0,"$end":1}, -terminals_: {2:"error",5:"EOF",7:"|",11:"ALIAS",12:"symbol",13:"(",14:")",15:"*",16:"?",17:"+"}, -productions_: [0,[3,2],[6,1],[6,3],[4,0],[4,2],[8,3],[8,2],[9,1],[9,3],[10,0],[10,1],[10,1],[10,1]], -performAction: function anonymous(yytext, yyleng, yylineno, yy, yystate /* action[1] */, $$ /* vstack */, _$ /* lstack */) { -/* this == yyval */ - -var $0 = $$.length - 1; -switch (yystate) { -case 1: return $$[$0-1]; -break; -case 2: this.$ = [$$[$0]]; -break; -case 3: $$[$0-2].push($$[$0]); -break; -case 4: this.$ = []; -break; -case 5: $$[$0-1].push($$[$0]); -break; -case 6: this.$ = ['xalias', $$[$0-1], $$[$0-2], $$[$0]]; -break; -case 7: if ($$[$0]) this.$ = [$$[$0], $$[$0-1]]; else this.$ = $$[$0-1]; -break; -case 8: this.$ = ['symbol', $$[$0]]; -break; -case 9: this.$ = ['()', $$[$0-1]]; -break; -} -}, -table: [{3:1,4:2,5:[2,4],12:[2,4],13:[2,4]},{1:[3]},{5:[1,3],8:4,9:5,12:[1,6],13:[1,7]},{1:[2,1]},{5:[2,5],7:[2,5],12:[2,5],13:[2,5],14:[2,5]},{5:[2,10],7:[2,10],10:8,11:[2,10],12:[2,10],13:[2,10],14:[2,10],15:[1,9],16:[1,10],17:[1,11]},{5:[2,8],7:[2,8],11:[2,8],12:[2,8],13:[2,8],14:[2,8],15:[2,8],16:[2,8],17:[2,8]},{4:13,6:12,7:[2,4],12:[2,4],13:[2,4],14:[2,4]},{5:[2,7],7:[2,7],11:[1,14],12:[2,7],13:[2,7],14:[2,7]},{5:[2,11],7:[2,11],11:[2,11],12:[2,11],13:[2,11],14:[2,11]},{5:[2,12],7:[2,12],11:[2,12],12:[2,12],13:[2,12],14:[2,12]},{5:[2,13],7:[2,13],11:[2,13],12:[2,13],13:[2,13],14:[2,13]},{7:[1,16],14:[1,15]},{7:[2,2],8:4,9:5,12:[1,6],13:[1,7],14:[2,2]},{5:[2,6],7:[2,6],12:[2,6],13:[2,6],14:[2,6]},{5:[2,9],7:[2,9],11:[2,9],12:[2,9],13:[2,9],14:[2,9],15:[2,9],16:[2,9],17:[2,9]},{4:17,7:[2,4],12:[2,4],13:[2,4],14:[2,4]},{7:[2,3],8:4,9:5,12:[1,6],13:[1,7],14:[2,3]}], -defaultActions: {3:[2,1]}, -parseError: function parseError(str, hash) { - if (hash.recoverable) { - this.trace(str); - } else { - throw new Error(str); - } -}, -parse: function parse(input) { - var self = this, stack = [0], vstack = [null], lstack = [], table = this.table, yytext = '', yylineno = 0, yyleng = 0, recovering = 0, TERROR = 2, EOF = 1; - var args = lstack.slice.call(arguments, 1); - this.lexer.setInput(input); - this.lexer.yy = this.yy; - this.yy.lexer = this.lexer; - this.yy.parser = this; - if (typeof this.lexer.yylloc == 'undefined') { - this.lexer.yylloc = {}; - } - var yyloc = this.lexer.yylloc; - lstack.push(yyloc); - var ranges = this.lexer.options && this.lexer.options.ranges; - if (typeof this.yy.parseError === 'function') { - this.parseError = this.yy.parseError; - } else { - this.parseError = Object.getPrototypeOf(this).parseError; - } - function popStack(n) { - stack.length = stack.length - 2 * n; - vstack.length = vstack.length - n; - lstack.length = lstack.length - n; - } - function lex() { - var token; - token = self.lexer.lex() || EOF; - if (typeof token !== 'number') { - token = self.symbols_[token] || token; - } - return token; - } - var symbol, preErrorSymbol, state, action, a, r, yyval = {}, p, len, newState, expected; - while (true) { - state = stack[stack.length - 1]; - if (this.defaultActions[state]) { - action = this.defaultActions[state]; - } else { - if (symbol === null || typeof symbol == 'undefined') { - symbol = lex(); - } - action = table[state] && table[state][symbol]; - } - if (typeof action === 'undefined' || !action.length || !action[0]) { - var errStr = ''; - expected = []; - for (p in table[state]) { - if (this.terminals_[p] && p > TERROR) { - expected.push('\'' + this.terminals_[p] + '\''); - } - } - if (this.lexer.showPosition) { - errStr = 'Parse error on line ' + (yylineno + 1) + ':\n' + this.lexer.showPosition() + '\nExpecting ' + expected.join(', ') + ', got \'' + (this.terminals_[symbol] || symbol) + '\''; - } else { - errStr = 'Parse error on line ' + (yylineno + 1) + ': Unexpected ' + (symbol == EOF ? 'end of input' : '\'' + (this.terminals_[symbol] || symbol) + '\''); - } - this.parseError(errStr, { - text: this.lexer.match, - token: this.terminals_[symbol] || symbol, - line: this.lexer.yylineno, - loc: yyloc, - expected: expected - }); - } - if (action[0] instanceof Array && action.length > 1) { - throw new Error('Parse Error: multiple actions possible at state: ' + state + ', token: ' + symbol); - } - switch (action[0]) { - case 1: - stack.push(symbol); - vstack.push(this.lexer.yytext); - lstack.push(this.lexer.yylloc); - stack.push(action[1]); - symbol = null; - if (!preErrorSymbol) { - yyleng = this.lexer.yyleng; - yytext = this.lexer.yytext; - yylineno = this.lexer.yylineno; - yyloc = this.lexer.yylloc; - if (recovering > 0) { - recovering--; - } + // JisonLexerError: JisonLexerError, /// <-- injected by the code generator + + // options: {}, /// <-- injected by the code generator + + // yy: ..., /// <-- injected by setInput() + + __currentRuleSet__: null, /// INTERNAL USE ONLY: internal rule set cache for the current lexer state + + __error_infos: [], /// INTERNAL USE ONLY: the set of lexErrorInfo objects created since the last cleanup + __decompressed: false, /// INTERNAL USE ONLY: mark whether the lexer instance has been 'unfolded' completely and is now ready for use + done: false, /// INTERNAL USE ONLY + _backtrack: false, /// INTERNAL USE ONLY + _input: '', /// INTERNAL USE ONLY + _more: false, /// INTERNAL USE ONLY + _signaled_error_token: false, /// INTERNAL USE ONLY + conditionStack: [], /// INTERNAL USE ONLY; managed via `pushState()`, `popState()`, `topState()` and `stateStackSize()` + match: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction. `match` is identical to `yytext` except that this one still contains the matched input string after `lexer.performAction()` has been invoked, where userland code MAY have changed/replaced the `yytext` value entirely! + matched: '', /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks entire input which has been matched so far + matches: false, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks RE match result for last (successful) match attempt + yytext: '', /// ADVANCED USE ONLY: tracks input which has been matched so far for the lexer token under construction; this value is transferred to the parser as the 'token value' when the parser consumes the lexer token produced through a call to the `lex()` API. + offset: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks the 'cursor position' in the input string, i.e. the number of characters matched so far + yyleng: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: length of matched input for the token under construction (`yytext`) + yylineno: 0, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: 'line number' at which the token under construction is located + yylloc: null, /// READ-ONLY EXTERNAL ACCESS - ADVANCED USE ONLY: tracks location info (lines + columns) for the token under construction + + /** + * INTERNAL USE: construct a suitable error info hash object instance for `parseError`. + * + * @public + * @this {RegExpLexer} + */ + constructLexErrorInfo: function lexer_constructLexErrorInfo(msg, recoverable, show_input_position) { + msg = '' + msg; + + // heuristic to determine if the error message already contains a (partial) source code dump + // as produced by either `showPosition()` or `prettyPrintRange()`: + if (show_input_position == undefined) { + show_input_position = !(msg.indexOf('\n') > 0 && msg.indexOf('^') > 0); + } + + if (this.yylloc && show_input_position) { + if (typeof this.prettyPrintRange === 'function') { + var pretty_src = this.prettyPrintRange(this.yylloc); + + if (!/\n\s*$/.test(msg)) { + msg += '\n'; + } + + msg += '\n Erroneous area:\n' + this.prettyPrintRange(this.yylloc); + } else if (typeof this.showPosition === 'function') { + var pos_str = this.showPosition(); + + if (pos_str) { + if (msg.length && msg[msg.length - 1] !== '\n' && pos_str[0] !== '\n') { + msg += '\n' + pos_str; } else { - symbol = preErrorSymbol; - preErrorSymbol = null; - } - break; - case 2: - len = this.productions_[action[1]][1]; - yyval.$ = vstack[vstack.length - len]; - yyval._$ = { - first_line: lstack[lstack.length - (len || 1)].first_line, - last_line: lstack[lstack.length - 1].last_line, - first_column: lstack[lstack.length - (len || 1)].first_column, - last_column: lstack[lstack.length - 1].last_column - }; - if (ranges) { - yyval._$.range = [ - lstack[lstack.length - (len || 1)].range[0], - lstack[lstack.length - 1].range[1] - ]; - } - r = this.performAction.apply(yyval, [ - yytext, - yyleng, - yylineno, - this.yy, - action[1], - vstack, - lstack - ].concat(args)); - if (typeof r !== 'undefined') { - return r; + msg += pos_str; } - if (len) { - stack = stack.slice(0, -1 * len * 2); - vstack = vstack.slice(0, -1 * len); - lstack = lstack.slice(0, -1 * len); + } + } + } + + /** @constructor */ + var pei = { + errStr: msg, + recoverable: !!recoverable, + text: this.match, // This one MAY be empty; userland code should use the `upcomingInput` API to obtain more text which follows the 'lexer cursor position'... + token: null, + line: this.yylineno, + loc: this.yylloc, + yy: this.yy, + lexer: this, + + /** + * and make sure the error info doesn't stay due to potential + * ref cycle via userland code manipulations. + * These would otherwise all be memory leak opportunities! + * + * Note that only array and object references are nuked as those + * constitute the set of elements which can produce a cyclic ref. + * The rest of the members is kept intact as they are harmless. + * + * @public + * @this {LexErrorInfo} + */ + destroy: function destructLexErrorInfo() { + // remove cyclic references added to error info: + // info.yy = null; + // info.lexer = null; + // ... + var rec = !!this.recoverable; + + for (var key in this) { + if (this.hasOwnProperty(key) && typeof key === 'object') { + this[key] = undefined; } - stack.push(this.productions_[action[1]][0]); - vstack.push(yyval.$); - lstack.push(yyval._$); - newState = table[stack[stack.length - 2]][stack[stack.length - 1]]; - stack.push(newState); - break; - case 3: - return true; + } + + this.recoverable = rec; } - } - return true; -}}; -/* generated by jison-lex 0.2.1 */ -var lexer = (function(){ -var lexer = { + }; -EOF:1, + // track this instance so we can `destroy()` it once we deem it superfluous and ready for garbage collection! + this.__error_infos.push(pei); -parseError:function parseError(str, hash) { - if (this.yy.parser) { - this.yy.parser.parseError(str, hash); - } else { - throw new Error(str); + return pei; + }, + + /** + * handler which is invoked when a lexer error occurs. + * + * @public + * @this {RegExpLexer} + */ + parseError: function lexer_parseError(str, hash, ExceptionClass) { + if (!ExceptionClass) { + ExceptionClass = this.JisonLexerError; + } + + if (this.yy) { + if (this.yy.parser && typeof this.yy.parser.parseError === 'function') { + return this.yy.parser.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; + } else if (typeof this.yy.parseError === 'function') { + return this.yy.parseError.call(this, str, hash, ExceptionClass) || this.ERROR; } + } + + throw new ExceptionClass(str, hash); }, -// resets the lexer, sets new input -setInput:function (input) { - this._input = input; - this._more = this._backtrack = this.done = false; - this.yylineno = this.yyleng = 0; - this.yytext = this.matched = this.match = ''; - this.conditionStack = ['INITIAL']; - this.yylloc = { - first_line: 1, - first_column: 0, - last_line: 1, - last_column: 0 - }; - if (this.options.ranges) { - this.yylloc.range = [0,0]; + /** + * method which implements `yyerror(str, ...args)` functionality for use inside lexer actions. + * + * @public + * @this {RegExpLexer} + */ + yyerror: function yyError(str /*, ...args */) { + var lineno_msg = ''; + + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': ' + str, + this.options.lexerErrorsAreRecoverable + ); + + // Add any extra args to the hash under the name `extra_error_attributes`: + var args = Array.prototype.slice.call(arguments, 1); + + if (args.length) { + p.extra_error_attributes = args; + } + + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + }, + + /** + * final cleanup function for when we have completed lexing the input; + * make it an API so that external code can use this one once userland + * code has decided it's time to destroy any lingering lexer error + * hash object instances and the like: this function helps to clean + * up these constructs, which *may* carry cyclic references which would + * otherwise prevent the instances from being properly and timely + * garbage-collected, i.e. this function helps prevent memory leaks! + * + * @public + * @this {RegExpLexer} + */ + cleanupAfterLex: function lexer_cleanupAfterLex(do_not_nuke_errorinfos) { + // prevent lingering circular references from causing memory leaks: + this.setInput('', {}); + + // nuke the error hash info instances created during this run. + // Userland code must COPY any data/references + // in the error hash instance(s) it is more permanently interested in. + if (!do_not_nuke_errorinfos) { + for (var i = this.__error_infos.length - 1; i >= 0; i--) { + var el = this.__error_infos[i]; + + if (el && typeof el.destroy === 'function') { + el.destroy(); + } } - this.offset = 0; - return this; + + this.__error_infos.length = 0; + } + + return this; }, -// consumes and returns one char from the input -input:function () { - var ch = this._input[0]; - this.yytext += ch; - this.yyleng++; - this.offset++; - this.match += ch; - this.matched += ch; - var lines = ch.match(/(?:\r\n?|\n).*/g); - if (lines) { - this.yylineno++; - this.yylloc.last_line++; - } else { - this.yylloc.last_column++; + /** + * clear the lexer token context; intended for internal use only + * + * @public + * @this {RegExpLexer} + */ + clear: function lexer_clear() { + this.yytext = ''; + this.yyleng = 0; + this.match = ''; + + // - DO NOT reset `this.matched` + this.matches = false; + + this._more = false; + this._backtrack = false; + var col = (this.yylloc ? this.yylloc.last_column : 0); + + this.yylloc = { + first_line: this.yylineno + 1, + first_column: col, + last_line: this.yylineno + 1, + last_column: col, + range: [this.offset, this.offset] + }; + }, + + /** + * resets the lexer, sets new input + * + * @public + * @this {RegExpLexer} + */ + setInput: function lexer_setInput(input, yy) { + this.yy = yy || this.yy || {}; + + // also check if we've fully initialized the lexer instance, + // including expansion work to be done to go from a loaded + // lexer to a usable lexer: + if (!this.__decompressed) { + // step 1: decompress the regex list: + var rules = this.rules; + + for (var i = 0, len = rules.length; i < len; i++) { + var rule_re = rules[i]; + + // compression: is the RE an xref to another RE slot in the rules[] table? + if (typeof rule_re === 'number') { + rules[i] = rules[rule_re]; + } } - if (this.options.ranges) { - this.yylloc.range[1]++; + + // step 2: unfold the conditions[] set to make these ready for use: + var conditions = this.conditions; + + for (var k in conditions) { + var spec = conditions[k]; + var rule_ids = spec.rules; + var len = rule_ids.length; + var rule_regexes = new Array(len + 1); // slot 0 is unused; we use a 1-based index approach here to keep the hottest code in `lexer_next()` fast and simple! + var rule_new_ids = new Array(len + 1); + + for (var i = 0; i < len; i++) { + var idx = rule_ids[i]; + var rule_re = rules[idx]; + rule_regexes[i + 1] = rule_re; + rule_new_ids[i + 1] = idx; + } + + spec.rules = rule_new_ids; + spec.__rule_regexes = rule_regexes; + spec.__rule_count = len; } - this._input = this._input.slice(1); - return ch; + this.__decompressed = true; + } + + this._input = input || ''; + this.clear(); + this._signaled_error_token = false; + this.done = false; + this.yylineno = 0; + this.matched = ''; + this.conditionStack = ['INITIAL']; + this.__currentRuleSet__ = null; + + this.yylloc = { + first_line: 1, + first_column: 0, + last_line: 1, + last_column: 0, + range: [0, 0] + }; + + this.offset = 0; + return this; + }, + + /** + * edit the remaining input via user-specified callback. + * This can be used to forward-adjust the input-to-parse, + * e.g. inserting macro expansions and alike in the + * input which has yet to be lexed. + * The behaviour of this API contrasts the `unput()` et al + * APIs as those act on the *consumed* input, while this + * one allows one to manipulate the future, without impacting + * the current `yyloc` cursor location or any history. + * + * Use this API to help implement C-preprocessor-like + * `#include` statements, etc. + * + * The provided callback must be synchronous and is + * expected to return the edited input (string). + * + * The `cpsArg` argument value is passed to the callback + * as-is. + * + * `callback` interface: + * `function callback(input, cpsArg)` + * + * - `input` will carry the remaining-input-to-lex string + * from the lexer. + * - `cpsArg` is `cpsArg` passed into this API. + * + * The `this` reference for the callback will be set to + * reference this lexer instance so that userland code + * in the callback can easily and quickly access any lexer + * API. + * + * When the callback returns a non-string-type falsey value, + * we assume the callback did not edit the input and we + * will using the input as-is. + * + * When the callback returns a non-string-type value, it + * is converted to a string for lexing via the `"" + retval` + * operation. (See also why: http://2ality.com/2012/03/converting-to-string.html + * -- that way any returned object's `toValue()` and `toString()` + * methods will be invoked in a proper/desirable order.) + * + * @public + * @this {RegExpLexer} + */ + editRemainingInput: function lexer_editRemainingInput(callback, cpsArg) { + var rv = callback.call(this, this._input, cpsArg); + + if (typeof rv !== 'string') { + if (rv) { + this._input = '' + rv; + } + // else: keep `this._input` as is. + } else { + this._input = rv; + } + + return this; }, -// unshifts one char (or a string) into the input -unput:function (ch) { - var len = ch.length; - var lines = ch.split(/(?:\r\n?|\n)/g); + /** + * consumes and returns one char from the input + * + * @public + * @this {RegExpLexer} + */ + input: function lexer_input() { + if (!this._input) { + //this.done = true; -- don't set `done` as we want the lex()/next() API to be able to produce one custom EOF token match after this anyhow. (lexer can match special <> tokens and perform user action code for a <> match, but only does so *once*) + return null; + } - this._input = ch + this._input; - this.yytext = this.yytext.substr(0, this.yytext.length - len - 1); - //this.yyleng -= len; - this.offset -= len; - var oldLines = this.match.split(/(?:\r\n?|\n)/g); - this.match = this.match.substr(0, this.match.length - 1); - this.matched = this.matched.substr(0, this.matched.length - 1); + var ch = this._input[0]; + this.yytext += ch; + this.yyleng++; + this.offset++; + this.match += ch; + this.matched += ch; - if (lines.length - 1) { - this.yylineno -= lines.length - 1; + // Count the linenumber up when we hit the LF (or a stand-alone CR). + // On CRLF, the linenumber is incremented when you fetch the CR or the CRLF combo + // and we advance immediately past the LF as well, returning both together as if + // it was all a single 'character' only. + var slice_len = 1; + + var lines = false; + + if (ch === '\n') { + lines = true; + } else if (ch === '\r') { + lines = true; + var ch2 = this._input[1]; + + if (ch2 === '\n') { + slice_len++; + ch += ch2; + this.yytext += ch2; + this.yyleng++; + this.offset++; + this.match += ch2; + this.matched += ch2; + this.yylloc.range[1]++; } - var r = this.yylloc.range; + } - this.yylloc = { - first_line: this.yylloc.first_line, - last_line: this.yylineno + 1, - first_column: this.yylloc.first_column, - last_column: lines ? - (lines.length === oldLines.length ? this.yylloc.first_column : 0) - + oldLines[oldLines.length - lines.length].length - lines[0].length : - this.yylloc.first_column - len - }; + if (lines) { + this.yylineno++; + this.yylloc.last_line++; + this.yylloc.last_column = 0; + } else { + this.yylloc.last_column++; + } + + this.yylloc.range[1]++; + this._input = this._input.slice(slice_len); + return ch; + }, - if (this.options.ranges) { - this.yylloc.range = [r[0], r[0] + this.yyleng - len]; + /** + * unshifts one char (or an entire string) into the input + * + * @public + * @this {RegExpLexer} + */ + unput: function lexer_unput(ch) { + var len = ch.length; + var lines = ch.split(/(?:\r\n?|\n)/g); + this._input = ch + this._input; + this.yytext = this.yytext.substr(0, this.yytext.length - len); + this.yyleng = this.yytext.length; + this.offset -= len; + this.match = this.match.substr(0, this.match.length - len); + this.matched = this.matched.substr(0, this.matched.length - len); + + if (lines.length > 1) { + this.yylineno -= lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + + // Get last entirely matched line into the `pre_lines[]` array's + // last index slot; we don't mind when other previously + // matched lines end up in the array too. + var pre = this.match; + + var pre_lines = pre.split(/(?:\r\n?|\n)/g); + + if (pre_lines.length === 1) { + pre = this.matched; + pre_lines = pre.split(/(?:\r\n?|\n)/g); } - this.yyleng = this.yytext.length; - return this; + + this.yylloc.last_column = pre_lines[pre_lines.length - 1].length; + } else { + this.yylloc.last_column -= len; + } + + this.yylloc.range[1] = this.yylloc.range[0] + this.yyleng; + this.done = false; + return this; }, -// When called from action, caches matched text and appends it on next action -more:function () { - this._more = true; - return this; + /** + * cache matched text and append it on next action + * + * @public + * @this {RegExpLexer} + */ + more: function lexer_more() { + this._more = true; + return this; }, -// When called from action, signals the lexer that this rule fails to match the input, so the next matching rule (regex) should be tested instead. -reject:function () { - if (this.options.backtrack_lexer) { - this._backtrack = true; - } else { - return this.parseError('Lexical error on line ' + (this.yylineno + 1) + '. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n' + this.showPosition(), { - text: "", - token: null, - line: this.yylineno - }); + /** + * signal the lexer that this rule fails to match the input, so the + * next matching rule (regex) should be tested instead. + * + * @public + * @this {RegExpLexer} + */ + reject: function lexer_reject() { + if (this.options.backtrack_lexer) { + this._backtrack = true; + } else { + // when the `parseError()` call returns, we MUST ensure that the error is registered. + // We accomplish this by signaling an 'error' token to be produced for the current + // `.lex()` run. + var lineno_msg = ''; + if (this.yylloc) { + lineno_msg = ' on line ' + (this.yylineno + 1); } - return this; + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).', + false + ); + + this._signaled_error_token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + } + + return this; }, -// retain first n characters of the match -less:function (n) { - this.unput(this.match.slice(n)); + /** + * retain first n characters of the match + * + * @public + * @this {RegExpLexer} + */ + less: function lexer_less(n) { + return this.unput(this.match.slice(n)); }, -// displays already matched input, i.e. for error messages -pastInput:function () { - var past = this.matched.substr(0, this.matched.length - this.match.length); - return (past.length > 20 ? '...':'') + past.substr(-20).replace(/\n/g, ""); + /** + * return (part of the) already matched input, i.e. for error + * messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of + * input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * @public + * @this {RegExpLexer} + */ + pastInput: function lexer_pastInput(maxSize, maxLines) { + var past = this.matched.substring(0, this.matched.length - this.match.length); + + if (maxSize < 0) + maxSize = past.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = past.length; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substr` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + past = past.substr(-maxSize * 2 - 2); + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = past.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(-maxLines); + past = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis prefix... + if (past.length > maxSize) { + past = '...' + past.substr(-maxSize); + } + + return past; }, -// displays upcoming input, i.e. for error messages -upcomingInput:function () { - var next = this.match; - if (next.length < 20) { - next += this._input.substr(0, 20-next.length); - } - return (next.substr(0,20) + (next.length > 20 ? '...' : '')).replace(/\n/g, ""); + /** + * return (part of the) upcoming input, i.e. for error messages. + * + * Limit the returned string length to `maxSize` (default: 20). + * + * Limit the returned string to the `maxLines` number of lines of input (default: 1). + * + * Negative limit values equal *unlimited*. + * + * > ### NOTE ### + * > + * > *"upcoming input"* is defined as the whole of the both + * > the *currently lexed* input, together with any remaining input + * > following that. *"currently lexed"* input is the input + * > already recognized by the lexer but not yet returned with + * > the lexer token. This happens when you are invoking this API + * > from inside any lexer rule action code block. + * > + * + * @public + * @this {RegExpLexer} + */ + upcomingInput: function lexer_upcomingInput(maxSize, maxLines) { + var next = this.match; + + if (maxSize < 0) + maxSize = next.length + this._input.length; + else if (!maxSize) + maxSize = 20; + + if (maxLines < 0) + maxLines = maxSize; // can't ever have more input lines than this! + else if (!maxLines) + maxLines = 1; + + // `substring` anticipation: treat \r\n as a single character and take a little + // more than necessary so that we can still properly check against maxSize + // after we've transformed and limited the newLines in here: + if (next.length < maxSize * 2 + 2) { + next += this._input.substring(0, maxSize * 2 + 2); // substring is faster on Chrome/V8 + } + + // now that we have a significantly reduced string to process, transform the newlines + // and chop them, then limit them: + var a = next.replace(/\r\n|\r/g, '\n').split('\n'); + + a = a.slice(0, maxLines); + next = a.join('\n'); + + // When, after limiting to maxLines, we still have too much to return, + // do add an ellipsis postfix... + if (next.length > maxSize) { + next = next.substring(0, maxSize) + '...'; + } + + return next; }, -// displays the character position where the lexing error occurred, i.e. for error messages -showPosition:function () { - var pre = this.pastInput(); - var c = new Array(pre.length + 1).join("-"); - return pre + this.upcomingInput() + "\n" + c + "^"; + /** + * return a string which displays the character position where the + * lexing error occurred, i.e. for error messages + * + * @public + * @this {RegExpLexer} + */ + showPosition: function lexer_showPosition(maxPrefix, maxPostfix) { + var pre = this.pastInput(maxPrefix).replace(/\s/g, ' '); + var c = new Array(pre.length + 1).join('-'); + return pre + this.upcomingInput(maxPostfix).replace(/\s/g, ' ') + '\n' + c + '^'; }, -// test the lexed token: return FALSE when not a match, otherwise return token -test_match:function (match, indexed_rule) { - var token, - lines, - backup; - - if (this.options.backtrack_lexer) { - // save context - backup = { - yylineno: this.yylineno, - yylloc: { - first_line: this.yylloc.first_line, - last_line: this.last_line, - first_column: this.yylloc.first_column, - last_column: this.yylloc.last_column - }, - yytext: this.yytext, - match: this.match, - matches: this.matches, - matched: this.matched, - yyleng: this.yyleng, - offset: this.offset, - _more: this._more, - _input: this._input, - yy: this.yy, - conditionStack: this.conditionStack.slice(0), - done: this.done - }; - if (this.options.ranges) { - backup.yylloc.range = this.yylloc.range.slice(0); - } + /** + * return a string which displays the lines & columns of input which are referenced + * by the given location info range, plus a few lines of context. + * + * This function pretty-prints the indicated section of the input, with line numbers + * and everything! + * + * This function is very useful to provide highly readable error reports, while + * the location range may be specified in various flexible ways: + * + * - `loc` is the location info object which references the area which should be + * displayed and 'marked up': these lines & columns of text are marked up by `^` + * characters below each character in the entire input range. + * + * - `context_loc` is the *optional* location info object which instructs this + * pretty-printer how much *leading* context should be displayed alongside + * the area referenced by `loc`. This can help provide context for the displayed + * error, etc. + * + * When this location info is not provided, a default context of 3 lines is + * used. + * + * - `context_loc2` is another *optional* location info object, which serves + * a similar purpose to `context_loc`: it specifies the amount of *trailing* + * context lines to display in the pretty-print output. + * + * When this location info is not provided, a default context of 1 line only is + * used. + * + * Special Notes: + * + * - when the `loc`-indicated range is very large (about 5 lines or more), then + * only the first and last few lines of this block are printed while a + * `...continued...` message will be printed between them. + * + * This serves the purpose of not printing a huge amount of text when the `loc` + * range happens to be huge: this way a manageable & readable output results + * for arbitrary large ranges. + * + * - this function can display lines of input which whave not yet been lexed. + * `prettyPrintRange()` can access the entire input! + * + * @public + * @this {RegExpLexer} + */ + prettyPrintRange: function lexer_prettyPrintRange(loc, context_loc, context_loc2) { + var error_size = loc.last_line - loc.first_line; + const CONTEXT = 3; + const CONTEXT_TAIL = 1; + const MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT = 2; + var input = this.matched + this._input; + var lines = input.split('\n'); + + //var show_context = (error_size < 5 || context_loc); + var l0 = Math.max(1, (context_loc ? context_loc.first_line : loc.first_line - CONTEXT)); + + var l1 = Math.max(1, (context_loc2 ? context_loc2.last_line : loc.last_line + CONTEXT_TAIL)); + var lineno_display_width = 1 + Math.log10(l1 | 1) | 0; + var ws_prefix = new Array(lineno_display_width).join(' '); + var nonempty_line_indexes = []; + + var rv = lines.slice(l0 - 1, l1 + 1).map(function injectLineNumber(line, index) { + var lno = index + l0; + var lno_pfx = (ws_prefix + lno).substr(-lineno_display_width); + var rv = lno_pfx + ': ' + line; + var errpfx = new Array(lineno_display_width + 1).join('^'); + var offset = 2 + 1; + var len = 0; + + if (lno === loc.first_line) { + offset += loc.first_column; + + len = Math.max( + 2, + ((lno === loc.last_line ? loc.last_column : line.length)) - loc.first_column + 1 + ); + } else if (lno === loc.last_line) { + len = Math.max(2, loc.last_column + 1); + } else if (lno > loc.first_line && lno < loc.last_line) { + len = Math.max(2, line.length + 1); + } + + if (len) { + var lead = new Array(offset).join('.'); + var mark = new Array(len).join('^'); + rv += '\n' + errpfx + lead + mark; + + if (line.trim().length > 0) { + nonempty_line_indexes.push(index); + } + } + + rv = rv.replace(/\t/g, ' '); + return rv; + }); + + // now make sure we don't print an overly large amount of error area: limit it + // to the top and bottom line count: + if (nonempty_line_indexes.length > 2 * MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT) { + var clip_start = nonempty_line_indexes[MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT - 1] + 1; + var clip_end = nonempty_line_indexes[nonempty_line_indexes.length - MINIMUM_VISIBLE_NONEMPTY_LINE_COUNT] - 1; + var intermediate_line = new Array(lineno_display_width + 1).join(' ') + ' (...continued...)'; + intermediate_line += '\n' + new Array(lineno_display_width + 1).join('-') + ' (---------------)'; + rv.splice(clip_start, clip_end - clip_start + 1, intermediate_line); + } + + return rv.join('\n'); + }, + + /** + * helper function, used to produce a human readable description as a string, given + * the input `yylloc` location object. + * + * Set `display_range_too` to TRUE to include the string character index position(s) + * in the description if the `yylloc.range` is available. + * + * @public + * @this {RegExpLexer} + */ + describeYYLLOC: function lexer_describe_yylloc(yylloc, display_range_too) { + var l1 = yylloc.first_line; + var l2 = yylloc.last_line; + var c1 = yylloc.first_column; + var c2 = yylloc.last_column; + var dl = l2 - l1; + var dc = c2 - c1; + var rv; + + if (dl === 0) { + rv = 'line ' + l1 + ', '; + + if (dc <= 1) { + rv += 'column ' + c1; + } else { + rv += 'columns ' + c1 + ' .. ' + c2; } + } else { + rv = 'lines ' + l1 + '(column ' + c1 + ') .. ' + l2 + '(column ' + c2 + ')'; + } + + if (yylloc.range && display_range_too) { + var r1 = yylloc.range[0]; + var r2 = yylloc.range[1] - 1; - lines = match[0].match(/(?:\r\n?|\n).*/g); - if (lines) { - this.yylineno += lines.length; + if (r2 <= r1) { + rv += ' {String Offset: ' + r1 + '}'; + } else { + rv += ' {String Offset range: ' + r1 + ' .. ' + r2 + '}'; } - this.yylloc = { - first_line: this.yylloc.last_line, - last_line: this.yylineno + 1, - first_column: this.yylloc.last_column, - last_column: lines ? - lines[lines.length - 1].length - lines[lines.length - 1].match(/\r?\n?/)[0].length : - this.yylloc.last_column + match[0].length + } + + return rv; + }, + + /** + * test the lexed token: return FALSE when not a match, otherwise return token. + * + * `match` is supposed to be an array coming out of a regex match, i.e. `match[0]` + * contains the actually matched text string. + * + * Also move the input cursor forward and update the match collectors: + * + * - `yytext` + * - `yyleng` + * - `match` + * - `matches` + * - `yylloc` + * - `offset` + * + * @public + * @this {RegExpLexer} + */ + test_match: function lexer_test_match(match, indexed_rule) { + var token, lines, backup, match_str, match_str_len; + + if (this.options.backtrack_lexer) { + // save context + backup = { + yylineno: this.yylineno, + + yylloc: { + first_line: this.yylloc.first_line, + last_line: this.yylloc.last_line, + first_column: this.yylloc.first_column, + last_column: this.yylloc.last_column, + range: this.yylloc.range.slice(0) + }, + + yytext: this.yytext, + match: this.match, + matches: this.matches, + matched: this.matched, + yyleng: this.yyleng, + offset: this.offset, + _more: this._more, + _input: this._input, + + //_signaled_error_token: this._signaled_error_token, + yy: this.yy, + + conditionStack: this.conditionStack.slice(0), + done: this.done }; - this.yytext += match[0]; - this.match += match[0]; - this.matches = match; - this.yyleng = this.yytext.length; - if (this.options.ranges) { - this.yylloc.range = [this.offset, this.offset += this.yyleng]; - } - this._more = false; - this._backtrack = false; - this._input = this._input.slice(match[0].length); - this.matched += match[0]; - token = this.performAction.call(this, this.yy, this, indexed_rule, this.conditionStack[this.conditionStack.length - 1]); - if (this.done && this._input) { - this.done = false; - } - if (token) { - return token; - } else if (this._backtrack) { - // recover context - for (var k in backup) { - this[k] = backup[k]; - } - return false; // rule action called reject() implying the next rule should be tested instead. + } + + match_str = match[0]; + match_str_len = match_str.length; + + // if (match_str.indexOf('\n') !== -1 || match_str.indexOf('\r') !== -1) { + lines = match_str.split(/(?:\r\n?|\n)/g); + + if (lines.length > 1) { + this.yylineno += lines.length - 1; + this.yylloc.last_line = this.yylineno + 1; + this.yylloc.last_column = lines[lines.length - 1].length; + } else { + this.yylloc.last_column += match_str_len; + } + + // } + this.yytext += match_str; + + this.match += match_str; + this.matched += match_str; + this.matches = match; + this.yyleng = this.yytext.length; + this.yylloc.range[1] += match_str_len; + + // previous lex rules MAY have invoked the `more()` API rather than producing a token: + // those rules will already have moved this `offset` forward matching their match lengths, + // hence we must only add our own match length now: + this.offset += match_str_len; + + this._more = false; + this._backtrack = false; + this._input = this._input.slice(match_str_len); + + // calling this method: + // + // function lexer__performAction(yy, yyrulenumber, YY_START) {...} + token = this.performAction.call( + this, + this.yy, + indexed_rule, + this.conditionStack[this.conditionStack.length - 1] /* = YY_START */ + ); + + // otherwise, when the action codes are all simple return token statements: + //token = this.simpleCaseActionClusters[indexed_rule]; + + if (this.done && this._input) { + this.done = false; + } + + if (token) { + return token; + } else if (this._backtrack) { + // recover context + for (var k in backup) { + this[k] = backup[k]; } - return false; + + this.__currentRuleSet__ = null; + return false; // rule action called reject() implying the next rule should be tested instead. + } else if (this._signaled_error_token) { + // produce one 'error' token as `.parseError()` in `reject()` + // did not guarantee a failure signal by throwing an exception! + token = this._signaled_error_token; + + this._signaled_error_token = false; + return token; + } + + return false; }, -// return next match in input -next:function () { - if (this.done) { - return this.EOF; - } - if (!this._input) { - this.done = true; - } - - var token, - match, - tempMatch, - index; - if (!this._more) { - this.yytext = ''; - this.match = ''; - } - var rules = this._currentRules(); - for (var i = 0; i < rules.length; i++) { - tempMatch = this._input.match(this.rules[rules[i]]); - if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { - match = tempMatch; - index = i; - if (this.options.backtrack_lexer) { - token = this.test_match(tempMatch, rules[i]); - if (token !== false) { - return token; - } else if (this._backtrack) { - match = false; - continue; // rule action called reject() implying a rule MISmatch. - } else { - // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) - return false; - } - } else if (!this.options.flex) { - break; - } - } + /** + * return next match in input + * + * @public + * @this {RegExpLexer} + */ + next: function lexer_next() { + if (this.done) { + this.clear(); + return this.EOF; + } + + if (!this._input) { + this.done = true; + } + + var token, match, tempMatch, index; + + if (!this._more) { + this.clear(); + } + + var spec = this.__currentRuleSet__; + + if (!spec) { + // Update the ruleset cache as we apparently encountered a state change or just started lexing. + // The cache is set up for fast lookup -- we assume a lexer will switch states much less often than it will + // invoke the `lex()` token-producing API and related APIs, hence caching the set for direct access helps + // speed up those activities a tiny bit. + spec = this.__currentRuleSet__ = this._currentRules(); + + // Check whether a *sane* condition has been pushed before: this makes the lexer robust against + // user-programmer bugs such as https://github.com/zaach/jison-lex/issues/19 + if (!spec || !spec.rules) { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Internal lexer engine error' + lineno_msg + ': The lex grammar programmer pushed a non-existing condition name "' + this.topState() + '"; this is a fatal error and should be reported to the application programmer team!', + false + ); + + // produce one 'error' token until this situation has been resolved, most probably by parse termination! + return this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; } - if (match) { - token = this.test_match(match, rules[index]); + } + + var rule_ids = spec.rules; + var regexes = spec.__rule_regexes; + var len = spec.__rule_count; + + // Note: the arrays are 1-based, while `len` itself is a valid index, + // hence the non-standard less-or-equal check in the next loop condition! + for (var i = 1; i <= len; i++) { + tempMatch = this._input.match(regexes[i]); + + if (tempMatch && (!match || tempMatch[0].length > match[0].length)) { + match = tempMatch; + index = i; + + if (this.options.backtrack_lexer) { + token = this.test_match(tempMatch, rule_ids[i]); + if (token !== false) { - return token; + return token; + } else if (this._backtrack) { + match = undefined; + continue; // rule action called reject() implying a rule MISmatch. + } else { + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; } - // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) - return false; + } else if (!this.options.flex) { + break; + } } - if (this._input === "") { - return this.EOF; - } else { - return this.parseError('Lexical error on line ' + (this.yylineno + 1) + '. Unrecognized text.\n' + this.showPosition(), { - text: "", - token: null, - line: this.yylineno - }); + } + + if (match) { + token = this.test_match(match, rule_ids[index]); + + if (token !== false) { + return token; } - }, -// return next match that has a token -lex:function lex() { - var r = this.next(); - if (r) { - return r; - } else { - return this.lex(); + // else: this is a lexer rule which consumes input without producing a token (e.g. whitespace) + return false; + } + + if (!this._input) { + this.done = true; + this.clear(); + return this.EOF; + } else { + var lineno_msg = ''; + + if (this.options.trackPosition) { + lineno_msg = ' on line ' + (this.yylineno + 1); + } + + var p = this.constructLexErrorInfo( + 'Lexical error' + lineno_msg + ': Unrecognized text.', + this.options.lexerErrorsAreRecoverable + ); + + var pendingInput = this._input; + var activeCondition = this.topState(); + var conditionStackDepth = this.conditionStack.length; + token = this.parseError(p.errStr, p, this.JisonLexerError) || this.ERROR; + + if (token === this.ERROR) { + // we can try to recover from a lexer error that `parseError()` did not 'recover' for us + // by moving forward at least one character at a time IFF the (user-specified?) `parseError()` + // has not consumed/modified any pending input or changed state in the error handler: + if (!this.matches && // and make sure the input has been modified/consumed ... + pendingInput === this._input && // ...or the lexer state has been modified significantly enough + // to merit a non-consuming error handling action right now. + activeCondition === this.topState() && conditionStackDepth === this.conditionStack.length) { + this.input(); + } } + + return token; + } + }, + + /** + * return next match that has a token + * + * @public + * @this {RegExpLexer} + */ + lex: function lexer_lex() { + var r; + + // allow the PRE/POST handlers set/modify the return token for maximum flexibility of the generated lexer: + if (typeof this.options.pre_lex === 'function') { + r = this.options.pre_lex.call(this); + } + + while (!r) { + r = this.next(); + } + + if (typeof this.options.post_lex === 'function') { + // (also account for a userdef function which does not return any value: keep the token as is) + r = this.options.post_lex.call(this, r) || r; + } + + return r; }, -// activates a new lexer condition state (pushes the new lexer condition state onto the condition stack) -begin:function begin(condition) { - this.conditionStack.push(condition); + /** + * backwards compatible alias for `pushState()`; + * the latter is symmetrical with `popState()` and we advise to use + * those APIs in any modern lexer code, rather than `begin()`. + * + * @public + * @this {RegExpLexer} + */ + begin: function lexer_begin(condition) { + return this.pushState(condition); }, -// pop the previously active lexer condition state off the condition stack -popState:function popState() { - var n = this.conditionStack.length - 1; - if (n > 0) { - return this.conditionStack.pop(); - } else { - return this.conditionStack[0]; - } + /** + * activates a new lexer condition state (pushes the new lexer + * condition state onto the condition stack) + * + * @public + * @this {RegExpLexer} + */ + pushState: function lexer_pushState(condition) { + this.conditionStack.push(condition); + this.__currentRuleSet__ = null; + return this; }, -// produce the lexer rule set which is active for the currently active lexer condition state -_currentRules:function _currentRules() { - if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { - return this.conditions[this.conditionStack[this.conditionStack.length - 1]].rules; - } else { - return this.conditions["INITIAL"].rules; - } + /** + * pop the previously active lexer condition state off the condition + * stack + * + * @public + * @this {RegExpLexer} + */ + popState: function lexer_popState() { + var n = this.conditionStack.length - 1; + + if (n > 0) { + this.__currentRuleSet__ = null; + return this.conditionStack.pop(); + } else { + return this.conditionStack[0]; + } }, -// return the currently active lexer condition state; when an index argument is provided it produces the N-th previous condition state, if available -topState:function topState(n) { - n = this.conditionStack.length - 1 - Math.abs(n || 0); - if (n >= 0) { - return this.conditionStack[n]; - } else { - return "INITIAL"; - } + /** + * return the currently active lexer condition state; when an index + * argument is provided it produces the N-th previous condition state, + * if available + * + * @public + * @this {RegExpLexer} + */ + topState: function lexer_topState(n) { + n = this.conditionStack.length - 1 - Math.abs(n || 0); + + if (n >= 0) { + return this.conditionStack[n]; + } else { + return 'INITIAL'; + } }, -// alias for begin(condition) -pushState:function pushState(condition) { - this.begin(condition); + /** + * (internal) determine the lexer rule set which is active for the + * currently active lexer condition state + * + * @public + * @this {RegExpLexer} + */ + _currentRules: function lexer__currentRules() { + if (this.conditionStack.length && this.conditionStack[this.conditionStack.length - 1]) { + return this.conditions[this.conditionStack[this.conditionStack.length - 1]]; + } else { + return this.conditions['INITIAL']; + } }, -// return the number of states currently on the stack -stateStackSize:function stateStackSize() { - return this.conditionStack.length; + /** + * return the number of states currently on the stack + * + * @public + * @this {RegExpLexer} + */ + stateStackSize: function lexer_stateStackSize() { + return this.conditionStack.length; }, -options: {}, -performAction: function anonymous(yy,yy_,$avoiding_name_collisions,YY_START) { - -var YYSTATE=YY_START; -switch($avoiding_name_collisions) { -case 0:/* skip whitespace */ -break; -case 1:return 12; -break; -case 2:yy_.yytext = yy_.yytext.substr(1, yy_.yyleng-2); return 11; -break; -case 3:return 12; -break; -case 4:return 12; -break; -case 5:return 'bar'; -break; -case 6:return 13; -break; -case 7:return 14; -break; -case 8:return 15; -break; -case 9:return 16; -break; -case 10:return 7; -break; -case 11:return 17; -break; -case 12:return 5; -break; -} -}, -rules: [/^(?:\s+)/,/^(?:([a-zA-Z][a-zA-Z0-9_-]*))/,/^(?:\[([a-zA-Z][a-zA-Z0-9_-]*)\])/,/^(?:'[^']*')/,/^(?:\.)/,/^(?:bar\b)/,/^(?:\()/,/^(?:\))/,/^(?:\*)/,/^(?:\?)/,/^(?:\|)/,/^(?:\+)/,/^(?:$)/], -conditions: {"INITIAL":{"rules":[0,1,2,3,4,5,6,7,8,9,10,11,12],"inclusive":true}} -}; -return lexer; -})(); + + options: { + xregexp: true, + ranges: true, + trackPosition: true, + easy_keyword_rules: true + }, + + JisonLexerError: JisonLexerError, + + performAction: function lexer__performAction(yy, yyrulenumber, YY_START) { + var yy_ = this; + var YYSTATE = YY_START; + + switch (yyrulenumber) { + case 0: + /*! Conditions:: INITIAL */ + /*! Rule:: \s+ */ + /* skip whitespace */ + break; + + case 3: + /*! Conditions:: INITIAL */ + /*! Rule:: \[{ID}\] */ + yy_.yytext = this.matches[1]; + + return 9; + break; + + default: + return this.simpleCaseActionClusters[yyrulenumber]; + } + }, + + simpleCaseActionClusters: { + /*! Conditions:: INITIAL */ + /*! Rule:: {ID} */ + 1: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \$end\b */ + 2: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: '{QUOTED_STRING_CONTENT}' */ + 4: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: "{DOUBLEQUOTED_STRING_CONTENT}" */ + 5: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \. */ + 6: 10, + + /*! Conditions:: INITIAL */ + /*! Rule:: \( */ + 7: 4, + + /*! Conditions:: INITIAL */ + /*! Rule:: \) */ + 8: 5, + + /*! Conditions:: INITIAL */ + /*! Rule:: \* */ + 9: 6, + + /*! Conditions:: INITIAL */ + /*! Rule:: \? */ + 10: 7, + + /*! Conditions:: INITIAL */ + /*! Rule:: \| */ + 11: 3, + + /*! Conditions:: INITIAL */ + /*! Rule:: \+ */ + 12: 8, + + /*! Conditions:: INITIAL */ + /*! Rule:: $ */ + 13: 1 + }, + + rules: [ + /* 0: */ /^(?:\s+)/, + /* 1: */ new XRegExp('^(?:([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*))', ''), + /* 2: */ /^(?:\$end\b)/, + /* 3: */ new XRegExp('^(?:\\[([\\p{Alphabetic}_](?:[\\p{Alphabetic}\\p{Number}_])*)\\])', ''), + /* 4: */ /^(?:'((?:\\'|\\[^']|[^'\\])*)')/, + /* 5: */ /^(?:"((?:\\"|\\[^"]|[^"\\])*)")/, + /* 6: */ /^(?:\.)/, + /* 7: */ /^(?:\()/, + /* 8: */ /^(?:\))/, + /* 9: */ /^(?:\*)/, + /* 10: */ /^(?:\?)/, + /* 11: */ /^(?:\|)/, + /* 12: */ /^(?:\+)/, + /* 13: */ /^(?:$)/ + ], + + conditions: { + 'INITIAL': { + rules: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + inclusive: true + } + } + }; + + return lexer; +}(); parser.lexer = lexer; -function Parser () { - this.yy = {}; + +function Parser() { + this.yy = {}; } -Parser.prototype = parser;parser.Parser = Parser; -return new Parser; -})(); - - -if (typeof require !== 'undefined' && typeof exports !== 'undefined') { -exports.parser = ebnf; -exports.Parser = ebnf.Parser; -exports.parse = function () { return ebnf.parse.apply(ebnf, arguments); }; -exports.main = function commonjsMain(args) { - if (!args[1]) { - console.log('Usage: '+args[0]+' FILE'); - process.exit(1); - } - var source = require('fs').readFileSync(require('path').normalize(args[1]), "utf8"); - return exports.parser.parse(source); -}; -if (typeof module !== 'undefined' && require.main === module) { - exports.main(process.argv.slice(1)); +Parser.prototype = parser; +parser.Parser = Parser; + +function yyparse() { + return parser.parse.apply(parser, arguments); } -} \ No newline at end of file + + + +export default { + parser, + Parser, + parse: yyparse, + +}; +