diff --git a/.gitignore b/.gitignore index c6e172a5b..9c2088800 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ target/ # ... but not code generation targets !tool/src/org/antlr/v4/codegen/target/ -# Node.js (npm and typings) cached dependencies +# Node.js (npm and typings) cached dependencies node_modules/ typings/ @@ -98,3 +98,6 @@ xcuserdata javac-services.0.log javac-services.0.log.lck test/ + +# Don't ignore python tests +!runtime/Python3/test/ diff --git a/.travis.yml b/.travis.yml index 0220f4ca2..58d4ccb4b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -74,42 +74,42 @@ matrix: - clang-3.7 - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=cpp - GROUP=LEXER stage: extended-test - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=cpp - GROUP=PARSER stage: extended-test - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=cpp - GROUP=RECURSION stage: extended-test - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=swift - GROUP=LEXER stage: main-test - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=swift - GROUP=PARSER stage: main-test - os: osx compiler: clang - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=swift - GROUP=RECURSION @@ -122,19 +122,19 @@ matrix: - GROUP=ALL stage: extended-test - os: osx - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=dotnet - GROUP=LEXER stage: extended-test - os: osx - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=dotnet - GROUP=PARSER stage: extended-test - os: osx - osx_image: xcode10.1 + osx_image: xcode10.2 env: - TARGET=dotnet - GROUP=RECURSION @@ -152,6 +152,10 @@ matrix: jdk: openjdk8 env: TARGET=csharp stage: main-test + - os: linux + jdk: openjdk8 + env: TARGET=dart + stage: main-test - os: linux language: php php: @@ -192,13 +196,16 @@ matrix: sources: - deadsnakes # source required so it finds the package definition below packages: - - python3.6 + - python3.7 stage: main-test - os: linux dist: trusty jdk: openjdk8 env: TARGET=javascript stage: main-test + before_install: + - nvm install 14 # otherwise it runs by default on node 8 + - f="./.travis/before-install-linux-javascript.sh"; ! [ -x "$f" ] || "$f" - os: linux dist: trusty jdk: openjdk8 @@ -211,7 +218,7 @@ before_install: script: - | cd runtime-testsuite; - travis_wait 40 ../.travis/run-tests-$TARGET.sh; - rc=$?; - cat target/surefire-reports/*.dumpstream || true; + travis_wait 40 ../.travis/run-tests-$TARGET.sh + rc=$? + cat target/surefire-reports/*.dumpstream || true exit $rc diff --git a/.travis/before-install-linux-cpp.sh b/.travis/before-install-linux-cpp.sh deleted file mode 100755 index c496c2cbd..000000000 --- a/.travis/before-install-linux-cpp.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq diff --git a/.travis/before-install-linux-dart.sh b/.travis/before-install-linux-dart.sh new file mode 100755 index 000000000..d87086ed4 --- /dev/null +++ b/.travis/before-install-linux-dart.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail +wget https://storage.googleapis.com/dart-archive/channels/stable/release/2.8.4/linux_packages/dart_2.8.4-1_amd64.deb +sudo dpkg -i ./dart_2.8.4-1_amd64.deb +sudo rm ./dart_2.8.4-1_amd64.deb +sudo apt-get install -f diff --git a/.travis/before-install-linux-go.sh b/.travis/before-install-linux-go.sh index 71adcfadd..16c828180 100755 --- a/.travis/before-install-linux-go.sh +++ b/.travis/before-install-linux-go.sh @@ -2,7 +2,5 @@ set -euo pipefail -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq eval "$(sudo gimme 1.7.3)" ( go version ; go env ) || true diff --git a/.travis/before-install-linux-java.sh b/.travis/before-install-linux-java.sh deleted file mode 100755 index c496c2cbd..000000000 --- a/.travis/before-install-linux-java.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq diff --git a/.travis/before-install-linux-javascript.sh b/.travis/before-install-linux-javascript.sh index c70757d53..5ad81f41c 100755 --- a/.travis/before-install-linux-javascript.sh +++ b/.travis/before-install-linux-javascript.sh @@ -2,8 +2,6 @@ set -euo pipefail -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq -curl -sL https://deb.nodesource.com/setup_0.12 | sudo -E bash - -sudo apt-get install -qq nodejs -node --version +# use v14 and check +echo node version: $(node --version) + diff --git a/.travis/before-install-linux-python2.sh b/.travis/before-install-linux-python2.sh deleted file mode 100755 index d79e21610..000000000 --- a/.travis/before-install-linux-python2.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -sudo apt-get update -qq -python --version diff --git a/.travis/before-install-linux-python3.sh b/.travis/before-install-linux-python3.sh deleted file mode 100755 index a3b091260..000000000 --- a/.travis/before-install-linux-python3.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -python3 --version diff --git a/.travis/run-tests-dart.sh b/.travis/run-tests-dart.sh new file mode 100755 index 000000000..8053a90d7 --- /dev/null +++ b/.travis/run-tests-dart.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +set -euo pipefail +mvn -q -Dparallel=classes -DthreadCount=4 -Dtest=dart.* test diff --git a/.travis/run-tests-javascript.sh b/.travis/run-tests-javascript.sh index 013321870..ae8a7e8bc 100755 --- a/.travis/run-tests-javascript.sh +++ b/.travis/run-tests-javascript.sh @@ -2,4 +2,8 @@ set -euo pipefail -mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=node.* test +cd ../runtime/JavaScript +npm install +npm link +cd ../../runtime-testsuite +mvn -q -Dparallel=methods -DthreadCount=1 -Dtest=javascript.* test diff --git a/.travis/run-tests-python2.sh b/.travis/run-tests-python2.sh index d2a22dc77..c5cd0ca99 100755 --- a/.travis/run-tests-python2.sh +++ b/.travis/run-tests-python2.sh @@ -2,8 +2,10 @@ set -euo pipefail +python --version + mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=python2.* test cd ../runtime/Python2/tests -python run.py \ No newline at end of file +python run.py diff --git a/.travis/run-tests-python3.sh b/.travis/run-tests-python3.sh index 9b528f35b..8b74928c5 100755 --- a/.travis/run-tests-python3.sh +++ b/.travis/run-tests-python3.sh @@ -2,8 +2,10 @@ set -euo pipefail +python3 --version + mvn -q -Dparallel=methods -DthreadCount=4 -Dtest=python3.* test cd ../runtime/Python3/test -python3.6 run.py \ No newline at end of file +python3 run.py diff --git a/.travis/run-tests-swift.sh b/.travis/run-tests-swift.sh index 0e297c503..677f356b2 100755 --- a/.travis/run-tests-swift.sh +++ b/.travis/run-tests-swift.sh @@ -6,7 +6,7 @@ set -euo pipefail # here since environment variables doesn't pass # across scripts if [ $TRAVIS_OS_NAME == "linux" ]; then - export SWIFT_VERSION=swift-4.2.1 + export SWIFT_VERSION=swift-5.0.1 export SWIFT_HOME=$(pwd)/swift/$SWIFT_VERSION-RELEASE-ubuntu16.04/usr/bin/ export PATH=$SWIFT_HOME:$PATH diff --git a/README.md b/README.md index ec5c21c62..44cbf62a8 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ ANTLR project lead and supreme dictator for life * [Ewan Mellor](https://github.com/ewanmellor), [Hanzhou Shi](https://github.com/hanjoes) (Swift target merging) * [Ben Hamilton](https://github.com/bhamiltoncx) (Full Unicode support in serialized ATN and all languages' runtimes for code points > U+FFFF) * [Marcos Passos](https://github.com/marcospassos) (PHP target) +* [Lingyu Li](https://github.com/lingyv-li) (Dart target) ## Useful information diff --git a/antlr4-maven-plugin/pom.xml b/antlr4-maven-plugin/pom.xml index 6e57f5e79..fa58b32b3 100644 --- a/antlr4-maven-plugin/pom.xml +++ b/antlr4-maven-plugin/pom.xml @@ -8,7 +8,7 @@ org.antlr antlr4-master - 4.7.3-SNAPSHOT + 4.8-2-SNAPSHOT antlr4-maven-plugin maven-plugin @@ -120,7 +120,7 @@ org.apache.maven.plugins maven-plugin-plugin - 3.3 + 3.6.0 true diff --git a/appveyor.yml b/appveyor.yml index d58657c98..aaa7b37c9 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,7 +6,9 @@ image: Visual Studio 2017 build: off install: - git submodule update --init --recursive - - cinst -y php composer + - cinst -y php --params "/InstallDir:C:\tools\php" + - cinst -y composer + - cinst -y dart-sdk --version=2.8.4 build_script: - mvn -DskipTests install --batch-mode - msbuild /target:restore /target:rebuild /property:Configuration=Release /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln @@ -14,7 +16,7 @@ build_script: after_build: - msbuild /target:pack /property:Configuration=Release /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln test_script: - - mvn install -Dantlr-php-php="C:\tools\php73\php.exe" -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode + - mvn install -Dantlr-php-php="C:\tools\php\php.exe" -Dantlr-dart-dart="C:\tools\dart-sdk\bin\dart.exe" -Dantlr-dart-pub="C:\tools\dart-sdk\bin\pub.bat" -Dantlr-dart-dart2native="C:\tools\dart-sdk\bin\dart2native.bat" -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" --batch-mode artifacts: - path: 'runtime\**\*.nupkg' name: NuGet \ No newline at end of file diff --git a/contributors.txt b/contributors.txt index 03ab8e18c..6a1804d70 100644 --- a/contributors.txt +++ b/contributors.txt @@ -1,5 +1,9 @@ ANTLR Project Contributors Certification of Origin and Rights +NOTE: This tool is mature and Terence is mostly occupied elsewhere. We +can't accept any changes that could have widespread effects on thousands +of existing projects. Sorry! + All contributors to ANTLR v4 must formally agree to abide by this certificate of origin by signing on the bottom with their github userid, full name, email address (you can obscure your e-mail, but it @@ -204,6 +208,7 @@ YYYY/MM/DD, github id, Full name, email 2018/07/03, jgoppert, James Goppert, james.goppert@gmail.com 2018/07/27, Maksim Novikov, mnovikov.work@gmail.com 2018/08/03, ENDOH takanao, djmchl@gmail.com +2018/10/08, xsIceman, Andreas Skaar, andreas.skaar@gmail.com 2018/10/18, edirgarcia, Edir García Lazo, edirgl@hotmail.com 2018/07/31, Lucas Henrqiue, lucashenrique580@gmail.com 2018/08/03, ENDOH takanao, djmchl@gmail.com @@ -214,7 +219,10 @@ YYYY/MM/DD, github id, Full name, email 2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com 2018/12/20, WalterCouto, Walter Couto, WalterCouto@users.noreply.github.com 2018/12/23, youkaichao, Kaichao You, youkaichao@gmail.com +2019/01/02, wkhemir, Wail Khemir, khemirwail@gmail.com +2019/01/16, kuegi, Markus Zancolo, markus.zancolo@roomle.com 2019/02/06, ralucado, Cristina Raluca Vijulie, ralucris.v[at]gmail[dot]com +2019/02/23, gedimitr, Gerasimos Dimitriadis, gedimitr@gmail.com 2019/03/13, base698, Justin Thomas, justin.thomas1@gmail.com 2019/03/18, carlodri, Carlo Dri, carlo.dri@gmail.com 2019/05/02, askingalot, Andy Collins, askingalot@gmail.com @@ -229,5 +237,43 @@ YYYY/MM/DD, github id, Full name, email 2019/09/10, yar3333, Yaroslav Sivakov, yar3333@gmail.com 2019/09/10, marcospassos, Marcos Passos, marcospassos.com@gmail.com 2019/09/10, amorimjuliana, Juliana Amorim, juu.amorim@gmail.com +2019/09/15, sullis, Sean Sullivan, github@seansullivan.com 2019/09/17, kaz, Kazuki Sawada, kazuki@6715.jp 2019/09/28, lmy269, Mingyang Liu, lmy040758@gmail.com +2019/10/29, tehbone, Tabari Alexander, tehbone@gmail.com +2019/10/31, a-square, Alexei Averchenko, lex.aver@gmail.com +2019/11/05, listba, Ben List, ben.list89@gmail.com +2019/11/11, foxeverl, Liu Xinfeng, liuxf1986[at]gmail[dot]com +2019/11/17, felixn, Felix Nieuwenhuizhen, felix@tdlrali.com +2019/11/18, mlilback, Mark Lilback, mark@lilback.com +2020/01/19, lingyv-li, Lingyu Li, lingyv.li@gmail.com +2020/02/02, carocad, Camilo Roca, carocad@unal.edu.co +2020/02/10, julibert, Julián Bermúdez Ortega, julibert.dev@gmail.com +2020/02/17, quantumsheep, Nathanael Demacon, nathanael.dmc@outlook.fr +2020/02/21, StochasticTinkr, Daniel Pitts, github@coloraura.com +2020/03/17, XsongyangX, Song Yang, songyang1218@gmail.com +2020/04/07, deniskyashif, Denis Kyashif, denis.kyashif@gmail.com +2020/04/08, lwehmeier, Leon Wehmeier, wehmeier@st.ovgu.de +2020/04/10, agrabski, Adam Grabski, adam.gr@outlook.com +2020/04/23, martinvw, Martin van Wingerden, martin@martinvw.nl +2020/04/30, TristonianJones, Tristan Swadell, tswadell@google.com +2020/05/06, iammosespaulr, Moses Paul R, iammosespaulr@gmail.com +2020/05/10, gomerser, Erik Gomersbach, gomerser@gomersba.ch +2020/05/22, keywan-ghadami-oxid, Keywan Ghadami, keywan.ghadami@oxid-esales.com +2020/05/25, graknol, Sindre van der Linden, graknol@gmail.com +2020/05/31, d-markey, David Markey, dmarkey@free.fr +2020/06/02, cohomology, Kilian Kilger, kkilger AT gmail.com +2020/06/04, IohannRabeson, Iohann Rabeson, iotaka6@gmail.com +2020/06/04, sigmasoldi3r, Pablo Blanco, pablobc.1995@gmail.com +2020/07/01, sha-N, Shan M Mathews, admin@bluestarqatar.com +2020/08/22, stevenjohnstone, Steven Johnstone, steven.james.johnstone@gmail.com +2020/09/06, ArthurSonzogni, Sonzogni Arthur, arthursonzogni@gmail.com +2020/09/10, Khailian, Arunav Sanyal, arunav.sanyal91@gmail.com +2020/09/12, Clcanny, Charles Ruan, a837940593@gmail.com +2020/09/15, rmcgregor1990, Robert McGregor, rmcgregor1990@gmail.com +2020/09/16, trenki2, Markus Trenkwalder, trenki2[at]gmx[dot]net +2020/10/08, Marti2203, Martin Mirchev, mirchevmartin2203@gmail.com +2020/10/16, adarshbhat, Adarsh Bhat, adarshbhat@users.noreply.github.com +2020/10/20, adamwojs, Adam Wójs, adam[at]wojs.pl +2020/10/24, cliid, Jiwu Jang, jiwujang@naver.com +2020/11/05, MichelHartmann, Michel Hartmann, MichelHartmann@users.noreply.github.com \ No newline at end of file diff --git a/doc/cpp-target.md b/doc/cpp-target.md index eec7cf88b..b2fc38284 100644 --- a/doc/cpp-target.md +++ b/doc/cpp-target.md @@ -1,6 +1,6 @@ # C++ -The C++ target supports all platforms that can either run MS Visual Studio 2013 (or newer), XCode 7 (or newer) or CMake (C++11 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](http://blorente.me//Antlr-,-C++-and-CMake-Wait-what.html). +The C++ target supports all platforms that can either run MS Visual Studio 2013 (or newer), XCode 7 (or newer) or CMake (C++11 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](http://blorente.me/beyond-the-loop/Antlr-cpp-cmake/). ## How to create a C++ lexer or parser? This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example: @@ -65,7 +65,7 @@ int main(int argc, const char* argv[]) { tree::ParseTree *tree = parser.key(); TreeShapeListener listener; - tree::ParseTreeWalker::DEFAULT->walk(&listener, tree); + tree::ParseTreeWalker::DEFAULT.walk(&listener, tree); return 0; } diff --git a/doc/creating-a-language-target.md b/doc/creating-a-language-target.md index ff7db290e..dd06208ea 100644 --- a/doc/creating-a-language-target.md +++ b/doc/creating-a-language-target.md @@ -10,6 +10,8 @@ Creating a new target involves the following key elements: 1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit. 1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below. 1. Create a template file for runtime tests. All you have to do is provide a few templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. It needs to know how to define various class fields, compare members and so on. You must create a *X*.test.stg file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime). Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit. +1. Create test files under [/runtime-testsuite/test/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime). They will load defined test cases in each test descriptor. Also add the `/runtime-testsuite/test/org/antlr/v4/test/runtime/X/BaseXTest.java` which defines how test cases will execute and output. +1. Create/edit shell scripts in [/.travis](https://github.com/antlr/antlr4/blob/master/.travis) and [/appveyor.yml](https://github.com/antlr/antlr4/blob/master/appveyor.yml) to run tests in CI pipelines. ## Getting started diff --git a/doc/csharp-target.md b/doc/csharp-target.md index a869a82f6..e2551aa7a 100644 --- a/doc/csharp-target.md +++ b/doc/csharp-target.md @@ -86,7 +86,7 @@ In order to execute this listener, you would simply add the following lines to t ... IParseTree tree = parser.StartRule() - only repeated here for reference KeyPrinter printer = new KeyPrinter(); -ParseTreeWalker.DEFAULT.walk(printer, tree); +ParseTreeWalker.Default.Walk(printer, tree); ``` Further information can be found from The Definitive ANTLR Reference book. diff --git a/doc/dart-target.md b/doc/dart-target.md new file mode 100644 index 000000000..eb4da0f72 --- /dev/null +++ b/doc/dart-target.md @@ -0,0 +1,117 @@ +# ANTLR4 Runtime for Dart + +Notice: Dart target may generate code incompatible with Dart 2.9 sound null safety. Please set the minimum SDK constraint to 2.8.4 or lower if such violation is found. Contributions are welcomed. + +### First steps + +#### 1. Install ANTLR4 + +[The getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) +should get you started. + +#### 2. Install the Dart ANTLR runtime + +Each target language for ANTLR has a runtime package for running parser +generated by ANTLR4. The runtime provides a common set of tools for using your parser. + +Install the runtime with the same version as the main ANTLR tool: + +Add this to your package's pubspec.yaml file: +```yaml +... +dependencies: + antlr4: +... +``` + +#### 3. Generate your parser + +You use the ANTLR4 "tool" to generate a parser. These will reference the ANTLR +runtime, installed above. + +Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool +as described in [the getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md). +To generate your Dart parser, run the following command: + +```shell script +antlr4 -Dlanguage=Dart MyGrammar.g4 +``` + +For a full list of antlr4 tool options, please visit the +[tool documentation page](https://github.com/antlr/antlr4/blob/master/doc/tool-options.md). + +### Complete example + +Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json. + +Then, invoke `antlr4 -Dlanguage=Dart JSON.g4`. The result of this is a +collection of `.dart` including: + +* JsonLexer.dart +* JsonParser.dart +* JsonBaseListener.dart +* JsonListener.dart (if you have not activated the -no-listener option) +* JsonVisitor.dart (if you have activated the -visitor option) + +We'll write a small main func to call the generated parser/lexer +(assuming they are separate). This one writes out the encountered +`ParseTreeContext`'s: + +```dart +import 'package:antlr4/antlr4.dart'; +import 'package:my_project/JSONParser.dart'; +import 'package:my_project/JSONLexer.dart'; + +class TreeShapeListener implements ParseTreeListener { + @override + void enterEveryRule(ParserRuleContext ctx) { + print(ctx.text); + } + + @override + void exitEveryRule(ParserRuleContext node) { + } + + @override + void visitErrorNode(ErrorNode node) { + } + + @override + void visitTerminal(TerminalNode node) { + } +} + +void main(List args) async { + JSONLexer.checkVersion(); + JSONParser.checkVersion(); + final input = await InputStream.fromPath(args[0]); + final lexer = JSONLexer(input); + final tokens = CommonTokenStream(lexer); + final parser = JSONParser(tokens); + parser.addErrorListener(DiagnosticErrorListener()); + parser.buildParseTree = true; + final tree = parser.json(); + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree); +} +``` + +Create a `example.json` file: +```json +{"a":1} +``` + +Parse the input file: + +```shell script +dart bin/main.dart example.json +``` + +The expected output is: + +``` +{"a":1} +{"a":1} +{"a":1} +"a":1 +1 +``` \ No newline at end of file diff --git a/doc/faq/index.md b/doc/faq/index.md index 734fc6c13..9dd72165f 100644 --- a/doc/faq/index.md +++ b/doc/faq/index.md @@ -38,8 +38,8 @@ This is the main landing page for the ANTLR 4 FAQ. The links below will take you ## Translation -* [ASTs vs parse trees](parse-trees.md) -* [Decoupling input walking from output generation](parse-trees.md) +* [ASTs vs parse trees](translation.md) +* [Decoupling input walking from output generation](translation.md) ## Actions and semantic predicates diff --git a/doc/faq/parse-trees.md b/doc/faq/parse-trees.md index 5a243cedb..48ce56315 100644 --- a/doc/faq/parse-trees.md +++ b/doc/faq/parse-trees.md @@ -50,7 +50,7 @@ For writing a compiler, either generate [LLVM-type static-single-assignment](htt ### XPath -XPath works great when you need to find specific nodes, possibly in certain contexts. The context is limited to the parents on the way to the root of the tree. For example, if you want to find all ID nodes, use path `//ID`. If you want all variable declarations, you might use path `//vardecl`. If you only want fields declarations, then you can use some context information via path `/classdef/vardecl`, which would only find vardecls that our children of class definitions. You can merge the results of multiple XPath `findAll()`s simulating a set union for XPath. The only caveat is that the order from the original tree is not preserved when you union multiple `findAll()` sets. +XPath works great when you need to find specific nodes, possibly in certain contexts. The context is limited to the parents on the way to the root of the tree. For example, if you want to find all ID nodes, use path `//ID`. If you want all variable declarations, you might use path `//vardecl`. If you only want fields declarations, then you can use some context information via path `/classdef/vardecl`, which would only find vardecls that are children of class definitions. You can merge the results of multiple XPath `findAll()`s simulating a set union for XPath. The only caveat is that the order from the original tree is not preserved when you union multiple `findAll()` sets. ### Tree pattern matching @@ -70,4 +70,4 @@ scopeStack.peek().define(new VariableSymbol("foo")) That way each listener function does not have to compute its appropriate scope. -Examples: [DefScopesAndSymbols.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/DefScopesAndSymbols.java) and [SetScopeListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/SetScopeListener.java) and [VerifyListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/VerifyListener.java) \ No newline at end of file +Examples: [DefScopesAndSymbols.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/DefScopesAndSymbols.java) and [SetScopeListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/SetScopeListener.java) and [VerifyListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/VerifyListener.java) diff --git a/doc/getting-started.md b/doc/getting-started.md index 4614c67f6..2bc998e64 100644 --- a/doc/getting-started.md +++ b/doc/getting-started.md @@ -6,46 +6,46 @@ Hi and welcome to the version 4 release of ANTLR! It's named after the fearless ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library. -The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.7.1-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). +The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.9-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). -If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems. +If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see [Integrating ANTLR into Development Systems](https://github.com/antlr/antlr4/blob/master/doc/IDEs.md). ### UNIX -0. Install Java (version 1.6 or higher) +0. Install Java (version 1.7 or higher) 1. Download ``` $ cd /usr/local/lib -$ curl -O https://www.antlr.org/download/antlr-4.7.1-complete.jar +$ curl -O https://www.antlr.org/download/antlr-4.9-complete.jar ``` Or just download in browser from website: [https://www.antlr.org/download.html](https://www.antlr.org/download.html) and put it somewhere rational like `/usr/local/lib`. -2. Add `antlr-4.7.1-complete.jar` to your `CLASSPATH`: +2. Add `antlr-4.9-complete.jar` to your `CLASSPATH`: ``` -$ export CLASSPATH=".:/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" +$ export CLASSPATH=".:/usr/local/lib/antlr-4.9-complete.jar.jar:$CLASSPATH" ``` It's also a good idea to put this in your `.bash_profile` or whatever your startup script is. 3. Create aliases for the ANTLR Tool, and `TestRig`. ``` -$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' -$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' +$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.9-complete.jar.jar:$CLASSPATH" org.antlr.v4.Tool' +$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.9-complete.jar.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' ``` ### WINDOWS (*Thanks to Graham Wideman*) -0. Install Java (version 1.6 or higher) -1. Download antlr-4.7.1-complete.jar (or whatever version) from [https://www.antlr.org/download/](https://www.antlr.org/download/) +0. Install Java (version 1.7 or higher) +1. Download antlr-4.9-complete.jar (or whatever version) from [https://www.antlr.org/download/](https://www.antlr.org/download/) Save to your directory for 3rd party Java libraries, say `C:\Javalib` -2. Add `antlr-4.7.1-complete.jar` to CLASSPATH, either: +2. Add `antlr-4.9-complete.jar.jar` to CLASSPATH, either: * Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable * Temporarily, at command line: ``` -SET CLASSPATH=.;C:\Javalib\antlr-4.7.1-complete.jar;%CLASSPATH% +SET CLASSPATH=.;C:\Javalib\antlr-4.9-complete.jar;%CLASSPATH% ``` 3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands: * Batch files (in directory in system PATH) antlr4.bat and grun.bat @@ -71,7 +71,7 @@ Either launch org.antlr.v4.Tool directly: ``` $ java org.antlr.v4.Tool -ANTLR Parser Generator Version 4.7.1 +ANTLR Parser Generator Version 4.9 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -80,8 +80,8 @@ ANTLR Parser Generator Version 4.7.1 or use -jar option on java: ``` -$ java -jar /usr/local/lib/antlr-4.7.1-complete.jar -ANTLR Parser Generator Version 4.7.1 +$ java -jar /usr/local/lib/antlr-4.9-complete.jar +ANTLR Parser Generator Version 4.9 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -138,3 +138,4 @@ The book has lots and lots of examples that should be useful too. You can downlo Also, there is a large collection of grammars for v4 at github: [https://github.com/antlr/grammars-v4](https://github.com/antlr/grammars-v4) +/ \ No newline at end of file diff --git a/doc/javascript-target.md b/doc/javascript-target.md index 845fcba59..da01ca2a2 100644 --- a/doc/javascript-target.md +++ b/doc/javascript-target.md @@ -11,11 +11,18 @@ In practice, this target has been extensively tested against: * Chrome 39.0.2171 * Explorer 11.0.3 -The tests were conducted using Selenium. No issue was found, so you should find that the runtime works pretty much against any recent JavaScript engine. +The above tests were conducted using Selenium. No issue was found, so you should find that the runtime works pretty much against any recent JavaScript engine. ## Is NodeJS supported? -The runtime has also been extensively tested against Node.js 0.12.7. No issue was found. +The runtime has also been extensively tested against Node.js 14 LTS. No issue was found. +NodeJS together with a packaging tool is now the preferred development path, developers are encouraged to follow it. + +## What about modules? + +Starting with version 8.1, Antlr4 JavaScript runtime follows esm semantics (see https://tc39.es/ecma262/#sec-modules for details) +Generated lexers, parsers, listeners and visitors also follow this new standard. +If you have used previous versions of the runtime, you will need to migrate and make your parser a module. ## How to create a JavaScript lexer or parser? @@ -55,32 +62,20 @@ You can get [information on webpack here](https://webpack.github.io). The steps to create your parsing code are the following: - generate your lexer, parser, listener and visitor using the antlr tool - - write your parse tree handling code by providig your custom listener or visitor, and associated code, using 'require' to load antlr. + - write your parse tree handling code by providing your custom listener or visitor, and associated code, using 'require' to load antlr. - create an index.js file with the entry point to your parsing code (or several if required). - test your parsing logic thoroughly using node.js You are now ready to bundle your parsing code as follows: - following webpack specs, create a webpack.config file - - in the webpack.config file, exclude node.js only modules using: node: { module: "empty", net: "empty", fs: "empty" } + - For Webpack version 5, + - in the `webpack.config` file, exclude node.js only modules using: `resolve: { fallback: { fs: false } }` + - For older versions of Webpack, + - in the `webpack.config` file, exclude node.js only modules using: `node: { module: "empty", net: "empty", fs: "empty" }` - from the cmd line, navigate to the directory containing webpack.config and type: webpack This will produce a single js file containing all your parsing code. Easy to include in your web pages! -If you can't use webpack, you can use the lib/require.js script which implements the Node.js 'require' function in brwsers. - -This script is provided by Torben Haase, and is NOT part of ANTLR JavaScript runtime. - -Assuming you have, at the root of your web site, both the 'antlr4' directory and a 'lib' directory with 'require.js' inside it, all you need to put in your HTML header is the following: - -```xml - -``` - -This will load the runtime asynchronously. - ## How do I run the generated lexer and/or parser? Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose this parser comprises a rule named "StartRule". The tool will have generated for you the following files: @@ -95,18 +90,18 @@ Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose t Now a fully functioning script might look like the following: ```javascript - var antlr4 = require('antlr4'); - var MyGrammarLexer = require('./MyGrammarLexer').MyGrammarLexer; - var MyGrammarParser = require('./MyGrammarParser').MyGrammarParser; - var MyGrammarListener = require('./MyGrammarListener').MyGrammarListener; + import antlr4 from 'antlr4'; + import MyGrammarLexer from './MyGrammarLexer.js'); + import MyGrammarParser from './MyGrammarParser.js'; + import MyGrammarListener from './MyGrammarListener.js'; - var input = "your text to parse here" - var chars = new antlr4.InputStream(input); - var lexer = new MyGrammarLexer(chars); - var tokens = new antlr4.CommonTokenStream(lexer); - var parser = new MyGrammarParser(tokens); + const input = "your text to parse here" + const chars = new antlr4.InputStream(input); + const lexer = new MyGrammarLexer(chars); + const tokens = new antlr4.CommonTokenStream(lexer); + const parser = new MyGrammarParser(tokens); parser.buildParseTrees = true; - var tree = parser.MyStartRule(); + const tree = parser.MyStartRule(); ``` This program will work. But it won't be useful unless you do one of the following: @@ -120,19 +115,19 @@ This program will work. But it won't be useful unless you do one of the followin ## How do I create and run a visitor? ```javascript // test.js -var antlr4 = require('antlr4'); -var MyGrammarLexer = require('./QueryLexer').QueryLexer; -var MyGrammarParser = require('./QueryParser').QueryParser; -var MyGrammarListener = require('./QueryListener').QueryListener; +import antlr4 from 'antlr4'; +import MyGrammarLexer from './QueryLexer.js'; +import MyGrammarParser from './QueryParser.js'; +import MyGrammarListener from './QueryListener.js'; -var input = "field = 123 AND items in (1,2,3)" -var chars = new antlr4.InputStream(input); -var lexer = new MyGrammarLexer(chars); -var tokens = new antlr4.CommonTokenStream(lexer); -var parser = new MyGrammarParser(tokens); +const input = "field = 123 AND items in (1,2,3)" +const chars = new antlr4.InputStream(input); +const lexer = new MyGrammarLexer(chars); +const tokens = new antlr4.CommonTokenStream(lexer); +const parser = new MyGrammarParser(tokens); parser.buildParseTrees = true; -var tree = parser.query(); +const tree = parser.query(); class Visitor { visitChildren(ctx) { @@ -160,40 +155,37 @@ tree.accept(new Visitor()); Let's suppose your MyGrammar grammar comprises 2 rules: "key" and "value". The antlr4 tool will have generated the following listener: ```javascript - MyGrammarListener = function(ParseTreeListener) { - // some code here - } - // some code here - MyGrammarListener.prototype.enterKey = function(ctx) {}; - MyGrammarListener.prototype.exitKey = function(ctx) {}; - MyGrammarListener.prototype.enterValue = function(ctx) {}; - MyGrammarListener.prototype.exitValue = function(ctx) {}; +class MyGrammarListener extends ParseTreeListener { + + constructor() { + super(); + } + + enterKey(ctx) {} + exitKey(ctx) {} + enterValue(ctx) {} + exitValue(ctx) {} +} ``` In order to provide custom behavior, you might want to create the following class: ```javascript -var KeyPrinter = function() { - MyGrammarListener.call(this); // inherit default listener - return this; -}; +class KeyPrinter extends MyGrammarListener { -// continue inheriting default listener -KeyPrinter.prototype = Object.create(MyGrammarListener.prototype); -KeyPrinter.prototype.constructor = KeyPrinter; - -// override default listener behavior -KeyPrinter.prototype.exitKey = function(ctx) { - console.log("Oh, a key!"); -}; + // override default listener behavior + exitKey(ctx) { + console.log("Oh, a key!"); + } +} ``` In order to execute this listener, you would simply add the following lines to the above code: ```javascript - ... - tree = parser.StartRule() // only repeated here for reference -var printer = new KeyPrinter(); +... +tree = parser.StartRule() // only repeated here for reference +const printer = new KeyPrinter(); antlr4.tree.ParseTreeWalker.DEFAULT.walk(printer, tree); ``` diff --git a/doc/lexer-rules.md b/doc/lexer-rules.md index 5070f4790..540479805 100644 --- a/doc/lexer-rules.md +++ b/doc/lexer-rules.md @@ -58,7 +58,7 @@ Match that character or sequence of characters. E.g., ’while’ or ’=’. [char set] -

Match one of the characters specified in the character set. Interpret x-y as the set of characters between range x and y, inclusively. The following escaped characters are interpreted as single special characters: \n, \r, \b, \t, \f, \uXXXX, and \u{XXXXXX}. To get ], \, or - you must escape them with \.

+

Match one of the characters specified in the character set. Interpret x-y as the set of characters between range x and y, inclusively. The following escaped characters are interpreted as single special characters: \n, \r, \b, \t, \f, \uXXXX, and \u{XXXXXX}. To get ] or \ you must escape them with \. To get - you must escape it with \ too, except for the case when - is the first or last character in the set.

You can also include all characters matching Unicode properties (general category, boolean, or enumerated including scripts and blocks) with \p{PropertyName} or \p{EnumProperty=Value}. (You can invert the test with \P{PropertyName} or \P{EnumProperty=Value}).

@@ -90,6 +90,8 @@ UNICODE_ID : [\p{Alpha}\p{General_Category=Other_Letter}] [\p{Alnum}\p{General_C EMOJI : [\u{1F4A9}\u{1F926}] ; // note Unicode code points > U+FFFF DASHBRACK : [\-\]]+ ; // match - or ] one or more times + +DASH : [---] ; // match a single -, i.e., "any character" between - and - (note first and last - not escaped) @@ -123,7 +125,7 @@ ESC : '\\' . ; // match any escaped \x character {«action»} -Lexer actions can appear anywhere as of 4.2, not just at the end of the outermost alternative. The lexer executes the actions at the appropriate input position, according to the placement of the action within the rule. To execute a single action for a role that has multiple alternatives, you can enclose the alts in parentheses and put the action afterwards: +Lexer actions can appear anywhere as of 4.2, not just at the end of the outermost alternative. The lexer executes the actions at the appropriate input position, according to the placement of the action within the rule. To execute a single action for a rule that has multiple alternatives, you can enclose the alts in parentheses and put the action afterwards:
 END : ('endif'|'end') {System.out.println("found an end");} ;
@@ -244,7 +246,8 @@ The mode commands alter the mode stack and hence the mode of the lexer. The 'mor
 ```
 // Default "mode": Everything OUTSIDE of a tag
 COMMENT : '' ;
-CDATA   : '' ;OPEN : '<' -> pushMode(INSIDE) ;
+CDATA   : '' ;
+OPEN : '<' -> pushMode(INSIDE) ;
  ...
 XMLDeclOpen : ' pushMode(INSIDE) ;
 SPECIAL_OPEN: ' more, pushMode(PROC_INSTR) ;
diff --git a/doc/lexicon.md b/doc/lexicon.md
index 078dc3e7a..92081575a 100644
--- a/doc/lexicon.md
+++ b/doc/lexicon.md
@@ -26,8 +26,8 @@ The Javadoc comments are hidden from the parser and are ignored at the moment.
 Token names always start with a capital letter and so do lexer rules as defined by Java’s `Character.isUpperCase` method. Parser rule names always start with a lowercase letter (those that fail `Character.isUpperCase`). The initial character can be followed by uppercase and lowercase letters, digits, and underscores. Here are some sample names:
 
 ```
-ID, LPAREN, RIGHT_CURLY // token names/rules
-expr, simpleDeclarator, d2, header_file // rule names
+ID, LPAREN, RIGHT_CURLY // token names/lexer rules
+expr, simpleDeclarator, d2, header_file // parser rule names
 ```
 
 Like Java, ANTLR accepts Unicode characters in ANTLR names:
@@ -96,7 +96,7 @@ The recognizers that ANTLR generates assume a character vocabulary containing al
 
 ## Actions
 
-Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with thelanguage option.
+Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with the language option.
 
 Embedded code can appear in: `@header` and `@members` named actions, parser and lexer rules, exception catching specifications, attribute sections for parser rules (return values, arguments, and locals), and some rule element options (currently predicates).
 
diff --git a/doc/php-target.md b/doc/php-target.md
index 75eae465c..d0f010528 100644
--- a/doc/php-target.md
+++ b/doc/php-target.md
@@ -15,7 +15,7 @@ generated by ANTLR4. The runtime provides a common set of tools for using your p
 Install the runtime with Composer:
 
 ```bash
-composer install antlr/antlr4
+composer require antlr4-php-runtime
 ```
 
 #### 3. Generate your parser
@@ -108,4 +108,4 @@ The expected output is:
 {"a":1}
 "a":1
 1
-```
\ No newline at end of file
+```
diff --git a/doc/releasing-antlr.md b/doc/releasing-antlr.md
index a44e48e7d..a09fd33eb 100644
--- a/doc/releasing-antlr.md
+++ b/doc/releasing-antlr.md
@@ -9,17 +9,41 @@ Create a pre-release or full release at github; [Example 4.5-rc-1](https://githu
 Wack any existing tag as mvn will create one and it fails if already there.
 
 ```
-$ git tag -d 4.7
-$ git push origin :refs/tags/4.7
-$ git push upstream :refs/tags/4.7
+$ git tag -d 4.8
+$ git push origin :refs/tags/4.8
+$ git push upstream :refs/tags/4.8
 ```
 
 ### Create release candidate tag
 
 ```bash
-$ git tag -a 4.7-rc1 -m 'heading towards 4.7'
-$ git push origin 4.7-rc1
-$ git push upstream 4.7-rc1
+$ git tag -a 4.8-rc1 -m 'heading towards 4.8'
+$ git push origin 4.8-rc1
+$ git push upstream 4.8-rc1
+```
+
+## Update submodules
+
+Make sure you tell git to pull in the submodule (for every clone you do of antlr4):
+
+```bash
+git submodule init
+```
+
+Also bump version to 4.8 in `runtime/PHP/src/RuntimeMetaData.php`.
+
+Update the runtime submodules by running the following command:
+
+```bash
+git submodule update --recursive
+git submodule update --remote --merge # might only need this last one but do both
+```
+
+Make sure these changes go back to antlr4 repo:
+
+```bash
+git add runtime/PHP
+git commit -m "Update PHP Runtime to latest version"
 ```
 
 ## Bump version
@@ -33,7 +57,7 @@ Edit the repository looking for 4.5 or whatever and update it. Bump version in t
  * runtime/Python3/src/antlr4/Recognizer.py
  * runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs
  * runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj
- * runtime/JavaScript/src/antlr4/package.json
+ * runtime/JavaScript/package.json
  * runtime/JavaScript/src/antlr4/Recognizer.js
  * runtime/Cpp/VERSION
  * runtime/Cpp/runtime/src/RuntimeMetaData.cpp
@@ -41,6 +65,8 @@ Edit the repository looking for 4.5 or whatever and update it. Bump version in t
  * runtime/Cpp/demo/generate.cmd
  * runtime/Go/antlr/recognizer.go
  * runtime/Swift/Antlr4/org/antlr/v4/runtime/RuntimeMetaData.swift
+ * runtime/Dart/lib/src/runtime_meta_data.dart
+ * runtime/Dart/pubspec.yaml
  * tool/src/org/antlr/v4/codegen/target/GoTarget.java
  * tool/src/org/antlr/v4/codegen/target/CppTarget.java
  * tool/src/org/antlr/v4/codegen/target/CSharpTarget.java
@@ -59,6 +85,10 @@ find tool runtime -type f -exec grep -l '4\.6' {} \;
 
 Commit to repository.
 
+## Building
+
+ugh. apparently you have to `mvn install` and then `mvn compile` or some such or subdir pom.xml's won't see the latest runtime build.
+
 ## Maven Repository Settings
 
 First, make sure you have maven set up to communicate with staging servers etc...  Create file `~/.m2/settings.xml` with appropriate username/password for staging server and gpg.keyname/passphrase for signing. Make sure it has strict visibility privileges to just you. On unix, it looks like:
@@ -106,7 +136,7 @@ Here is the file template
 
 ## Maven deploy snapshot
 
-The goal is to get a snapshot, such as `4.7-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime).
+The goal is to get a snapshot, such as `4.8-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime).
 
 Do this:
 
@@ -114,15 +144,15 @@ Do this:
 $ mvn deploy -DskipTests
 ...
 [INFO] --- maven-deploy-plugin:2.7:deploy (default-deploy) @ antlr4-tool-testsuite ---
-Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/maven-metadata.xml
-Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/antlr4-tool-testsuite-4.7-20161211.173752-1.jar
-Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/antlr4-tool-testsuite-4.7-20161211.173752-1.jar (3 KB at 3.4 KB/sec)
-Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/antlr4-tool-testsuite-4.7-20161211.173752-1.pom
-Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/antlr4-tool-testsuite-4.7-20161211.173752-1.pom (3 KB at 6.5 KB/sec)
+Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml
+Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.jar
+Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.jar (3 KB at 3.4 KB/sec)
+Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.pom
+Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/antlr4-tool-testsuite-4.8-20161211.173752-1.pom (3 KB at 6.5 KB/sec)
 Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml
 Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (371 B at 1.4 KB/sec)
-Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/maven-metadata.xml
-Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.7-SNAPSHOT/maven-metadata.xml (774 B at 1.8 KB/sec)
+Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml
+Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.8-SNAPSHOT/maven-metadata.xml (774 B at 1.8 KB/sec)
 Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml
 Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (388 B at 0.9 KB/sec)
 [INFO] ------------------------------------------------------------------------
@@ -192,18 +222,18 @@ It will start out by asking you the version number:
 
 ```
 ...
-What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.7: : 4.7
-What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.7: : 
-What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.7: : 
-What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.7: : 
-What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.7: : 
-What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.7: : 
-What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.7: : 4.7
-What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.7.1-SNAPSHOT:
+What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.8: : 4.8
+What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.8: : 
+What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.8: : 
+What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.8: : 
+What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.8: : 
+What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.8: : 
+What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.8: : 4.8
+What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.8.1-SNAPSHOT:
 ...
 ```
 
-Maven will go through your pom.xml files to update versions from 4.7-SNAPSHOT to 4.7 for release and then to 4.7.1-SNAPSHOT after release, which is done with:
+Maven will go through your pom.xml files to update versions from 4.8-SNAPSHOT to 4.8 for release and then to 4.8.1-SNAPSHOT after release, which is done with:
 
 ```bash
 mvn release:perform -Darguments="-DskipTests"
@@ -217,16 +247,18 @@ Now, go here:
 
 and on the left click "Staging Repositories". You click the staging repo and close it, then you refresh, click it and release it. It's done when you see it here:
 
-    [http://repo1.maven.org/maven2/org/antlr/antlr4-runtime/](http://repo1.maven.org/maven2/org/antlr/antlr4-runtime/)
+    [https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar](https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar)
+
+All releases should be here: https://repo1.maven.org/maven2/org/antlr/antlr4-runtime/
 
 Copy the jars to antlr.org site and update download/index.html
 
 ```bash
-cp ~/.m2/repository/org/antlr/antlr4-runtime/4.7/antlr4-runtime-4.7.jar ~/antlr/sites/website-antlr4/download/antlr-runtime-4.7.jar
-cp ~/.m2/repository/org/antlr/antlr4/4.7/antlr4-4.7-complete.jar ~/antlr/sites/website-antlr4/download/antlr-4.7-complete.jar
+cp ~/.m2/repository/org/antlr/antlr4-runtime/4.8/antlr4-runtime-4.8.jar ~/antlr/sites/website-antlr4/download/antlr-runtime-4.8.jar
+cp ~/.m2/repository/org/antlr/antlr4/4.8/antlr4-4.8-complete.jar ~/antlr/sites/website-antlr4/download/antlr-4.8-complete.jar
 cd ~/antlr/sites/website-antlr4/download
-git add antlr-4.7-complete.jar
-git add antlr-runtime-4.7.jar 
+git add antlr-4.8-complete.jar
+git add antlr-runtime-4.8.jar 
 ```
 
 Update on site:
@@ -238,7 +270,7 @@ Update on site:
 *   scripts/topnav.js
 
 ```
-git commit -a -m 'add 4.7 jars'
+git commit -a -m 'add 4.8 jars'
 git push origin gh-pages
 ```
 
@@ -247,16 +279,14 @@ git push origin gh-pages
 ### JavaScript
 
 ```bash
-cd runtime/JavaScript/src
-zip -r /tmp/antlr-javascript-runtime-4.7.zip antlr4
-cp /tmp/antlr-javascript-runtime-4.7.zip ~/antlr/sites/website-antlr4/download
+cd runtime/JavaScript
 # git add, commit, push
 ```
 
 **Push to npm**
 
 ```bash
-cd runtime/JavaScript/src
+cd runtime/JavaScript
 npm login
 npm publish antlr4
 ```
@@ -264,11 +294,8 @@ npm publish antlr4
 Move target to website
 
 ```bash
-pushd ~/antlr/sites/website-antlr4/download
-git add antlr-javascript-runtime-4.7.zip
-git commit -a -m 'update JS runtime'
-git push origin gh-pages
-popd
+npm run build
+cp /dist/antlr4.js ~/antlr/sites/website-antlr4/download
 ```
 
 ### CSharp
@@ -309,7 +336,7 @@ Copyright (C) Microsoft Corporation. All rights reserved.
   Restore completed in 427.62 ms for C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\Antlr4.Runtime.dotnet.csproj.
   Antlr4.Runtime.dotnet -> C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\netstandard1.3\Antlr4.Runtime.Standard.dll
   Antlr4.Runtime.dotnet -> C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\net35\Antlr4.Runtime.Standard.dll
-  Successfully created package 'C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\Antlr4.Runtime.Standard.4.7.2.nupkg'.
+  Successfully created package 'C:\Code\antlr4-fork\runtime\CSharp\runtime\CSharp\Antlr4.Runtime\lib\Release\Antlr4.Runtime.Standard.4.8.2.nupkg'.
 ```
 
 **Publishing to NuGet**
@@ -371,7 +398,7 @@ There are links to the artifacts in [download.html](http://www.antlr.org/downloa
 
 The C++ target is the most complex one, because it addresses multiple platforms, which require individual handling. We have 4 scenarios to cover:
 
-* **Windows**: static and dynamic libraries for the VC++ runtime 2013 or 2015 (corresponding to Visual Studio 2013 or 2015) + header files. All that in 32 and 64bit, debug + release.
+* **Windows**: static and dynamic libraries for the VC++ runtime 2017 or 2019 (corresponding to Visual Studio 2017 or 2019) + header files. All that in 32 and 64bit, debug + release.
 * **MacOS**: static and dynamic release libraries + header files.
 * **iOS**: no prebuilt binaries, but just a zip of the source, including the XCode project to build everything from source.
 * **Linux**: no prebuilt binaries, but just a zip of the source code, including the cmake file to build everything from source there.
@@ -385,7 +412,7 @@ On a Mac (with XCode 7+ installed):
 ```bash
 cd runtime/Cpp
 ./deploy-macos.sh
-cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.7-macos.zip
+cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-macos.zip
 ```
 
 On any Mac or Linux machine:
@@ -393,15 +420,15 @@ On any Mac or Linux machine:
 ```bash
 cd runtime/Cpp
 ./deploy-source.sh
-cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.7-source.zip
+cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-source.zip
 ```
 
-On a Windows machine the build scripts checks if VS 2013 and/or VS 2015 are installed and builds binaries for each, if found. This script requires 7z to be installed (http://7-zip.org then do `set PATH=%PATH%;C:\Program Files\7-Zip\` from DOS not powershell).
+On a Windows machine the build scripts checks if VS 2017 and/or VS 2019 are installed and builds binaries for each, if found. This script requires 7z to be installed (http://7-zip.org then do `set PATH=%PATH%;C:\Program Files\7-Zip\` from DOS not powershell).
 
 ```bash
 cd runtime/Cpp
-deploy-windows.cmd
-cp runtime\bin\vs-2015\x64\Release DLL\antlr4-cpp-runtime-vs2015.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.7-vs2015.zip
+deploy-windows.cmd Community
+cp antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.8-vs2019.zip
 ```
 
 Move target to website (**_rename to a specific ANTLR version first if needed_**):
@@ -409,14 +436,27 @@ Move target to website (**_rename to a specific ANTLR version first if needed_**
 ```bash
 pushd ~/antlr/sites/website-antlr4/download
 # vi index.html
-git add antlr4cpp-runtime-4.7-macos.zip
-git add antlr4cpp-runtime-4.7-windows.zip
-git add antlr4cpp-runtime-4.7-source.zip
+git add antlr4cpp-runtime-4.8-macos.zip
+git add antlr4cpp-runtime-4.8-windows.zip
+git add antlr4cpp-runtime-4.8-source.zip
 git commit -a -m 'update C++ runtime'
 git push origin gh-pages
 popd
 ```
 
+### Dart
+
+Push to pub.dev
+
+```bash
+cd runtime/Dart
+pub publish
+```
+
+It will warn that no change log found for the new version.
+If there are changes relevant to dart in this release, edit [CHANGELOG.md](https://github.com/antlr/antlr4/blob/master/runtime/Dart/CHANGELOG.md) to describe the changes.
+Otherwise enter `N` to ignore the warning.
+
 ## Update javadoc for runtime and tool
 
 First, gen javadoc:
@@ -433,9 +473,9 @@ cd ~/antlr/sites/website-antlr4/api
 git checkout gh-pages
 git pull origin gh-pages
 cd Java
-jar xvf ~/.m2/repository/org/antlr/antlr4-runtime/4.7/antlr4-runtime-4.7-javadoc.jar
+jar xvf ~/.m2/repository/org/antlr/antlr4-runtime/4.8/antlr4-runtime-4.8-javadoc.jar
 cd ../JavaTool
-jar xvf ~/.m2/repository/org/antlr/antlr4/4.7/antlr4-4.7-javadoc.jar
+jar xvf ~/.m2/repository/org/antlr/antlr4/4.8/antlr4-4.8-javadoc.jar
 git commit -a -m 'freshen api doc'
 git push origin gh-pages
 ```
diff --git a/doc/targets.md b/doc/targets.md
index c2341ec48..ad6e7dba9 100644
--- a/doc/targets.md
+++ b/doc/targets.md
@@ -10,12 +10,13 @@ This page lists the available and upcoming ANTLR runtimes. Please note that you
 * [C++](cpp-target.md)
 * [Swift](swift-target.md)
 * [PHP](php-target.md)
+* [Dart](dart-target.md)
 
 ## Target feature parity
 
 New features generally appear in the Java target and then migrate to the other targets, but these other targets don't always get updated in the same overall tool release. This section tries to identify features added to Java that have not been added to the other targets.
 
-|Feature|Java|C♯|Python2|Python3|JavaScript|Go|C++|Swift|PHP
-|---|---|---|---|---|---|---|---|---|---|
-|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|-|-|
+|Feature|Java|C♯|Python2|Python3|JavaScript|Go|C++|Swift|PHP|Dart
+|---|---|---|---|---|---|---|---|---|---|---|
+|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|-|-|-|
 
diff --git a/pom.xml b/pom.xml
index e636655cf..5c7a93549 100644
--- a/pom.xml
+++ b/pom.xml
@@ -13,7 +13,7 @@
 	
 	org.antlr
 	antlr4-master
-	4.7.3-SNAPSHOT
+	4.8-2-SNAPSHOT
 	pom
 
 	ANTLR 4
@@ -151,7 +151,7 @@
 				
 					org.apache.maven.plugins
 					maven-compiler-plugin
-					3.6.0
+					3.8.1
 					
 						${maven.compiler.source}
 						${maven.compiler.target}
diff --git a/runtime-testsuite/annotations/pom.xml b/runtime-testsuite/annotations/pom.xml
index 3d39f719f..97bf78649 100644
--- a/runtime-testsuite/annotations/pom.xml
+++ b/runtime-testsuite/annotations/pom.xml
@@ -9,7 +9,7 @@
   
     org.antlr
     antlr4-master
-    4.7.3-SNAPSHOT
+    4.8-2-SNAPSHOT
     ../../pom.xml
   
   antlr4-runtime-test-annotations
diff --git a/runtime-testsuite/pom.xml b/runtime-testsuite/pom.xml
index b9e6bd936..b6368917d 100644
--- a/runtime-testsuite/pom.xml
+++ b/runtime-testsuite/pom.xml
@@ -10,7 +10,7 @@
 	
 		org.antlr
 		antlr4-master
-		4.7.3-SNAPSHOT
+		4.8-2-SNAPSHOT
 	
 	antlr4-runtime-testsuite
 	ANTLR 4 Runtime Tests (2nd generation)
@@ -26,7 +26,7 @@
 		
 			org.antlr
 			ST4
-			4.1
+			4.3
 			test
 		
 		
@@ -59,19 +59,6 @@
 			4.12
 			test
 		
-		
-			org.seleniumhq.selenium
-			selenium-java
-			2.46.0
-			test
-		
-		
-		  org.eclipse.jetty
-		  jetty-server
-          
-		  9.4.19.v20190610
-		  test
-		
 		
 			org.glassfish
 			javax.json
@@ -112,10 +99,11 @@
                         **/csharp/Test*.java
                         **/java/Test*.java
                         **/go/Test*.java
-                        **/javascript/node/Test*.java
+                        **/javascript/Test*.java
                         **/python2/Test*.java
                         **/python3/Test*.java
                         **/php/Test*.java
+                        **/dart/Test*.java
                         ${antlr.tests.swift}
                     
 				
diff --git a/runtime-testsuite/processors/pom.xml b/runtime-testsuite/processors/pom.xml
index 24fd9b786..c417c626d 100644
--- a/runtime-testsuite/processors/pom.xml
+++ b/runtime-testsuite/processors/pom.xml
@@ -9,7 +9,7 @@
 	
 		org.antlr
 		antlr4-master
-		4.7.3-SNAPSHOT
+		4.8-2-SNAPSHOT
 		../../pom.xml
 	
 	antlr4-runtime-test-annotation-processors
diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg
index fb95440ce..cd9270286 100644
--- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg
+++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg
@@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%>
 
 Concat(a,b) ::= ""
 
-DeclareLocal(s,v) ::= "Object  = ;"
-
 AssertIsList(v) ::= "System.Collections.IList __ttt__ = ;" // just use static type system
 
 AssignLocal(s,v) ::= " = ;"
@@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%>
 
 AddMember(n,v) ::= <%this. += ;%>
 
-PlusMember(v,n) ::= <% + this.%>
-
 MemberEquals(n,v) ::= <%this. == %>
 
 ModMemberEquals(n,m,v) ::= <%this. %  == %>
diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg
index 070409c21..7b4729eb5 100644
--- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg
+++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg
@@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%>
 
 Concat(a,b) ::= ""
 
-DeclareLocal(s,v) ::= "var  = ;"
-
 AssertIsList(v) ::= <>
 
 AssignLocal(s,v) ::= " = ;"
@@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%>
 
 AddMember(n,v) ::= <%this. += ;%>
 
-PlusMember(v,n) ::= <% + this.%>
-
 MemberEquals(n,v) ::= <%this. === %>
 
 ModMemberEquals(n,m,v) ::= <%this. %  === %>
@@ -78,7 +74,7 @@ LANotEquals(i, v) ::= <%this._input.LA()!=%>
 
 TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%>
 
-ImportListener(X) ::= <Listener = require('./Listener').Listener;>>
+ImportListener(X) ::= ""
 
 GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
 
diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg
index bd3f0dd79..a5dab18bd 100644
--- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg
+++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg
@@ -11,8 +11,6 @@ Append(a,b) ::= " + ->toString()"
 Concat(a,b) ::= ""
 AppendStr(a,b) ::= " + "
 
-DeclareLocal(s,v) ::= " = "
-
 AssertIsList(v) ::= "assert(.size() >= 0);" // Use a method that exists only on a list (vector actually).
 AssignLocal(s,v) ::= " = ;"
 
@@ -25,7 +23,6 @@ VarRef(n) ::= ""
 GetMember(n) ::= ""
 SetMember(n,v) ::= " = ;"
 AddMember(n,v) ::= " += ;"
-PlusMember(v,n) ::= " + "
 MemberEquals(n,v) ::= " == "
 ModMemberEquals(n,m,v) ::= " %  == "
 ModMemberNotEquals(n,m,v) ::= " %  != "
diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg
new file mode 100644
index 000000000..9f7d65cb0
--- /dev/null
+++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg
@@ -0,0 +1,318 @@
+writeln(s) ::= <);>>
+write(s) ::= <);>>
+writeList(s) ::= <);>>
+
+False() ::= "false"
+
+True() ::= "true"
+
+Not(v) ::= "!"
+
+Assert(s) ::= <);>>
+
+Cast(t,v) ::= "( as )"
+
+Append(a,b) ::= ".toString() + .toString()"
+
+AppendStr(a,b) ::= <%%>
+
+Concat(a,b) ::= ""
+
+AssertIsList(v) ::= "assert ( is List);" // just use static type system
+
+AssignLocal(s,v) ::= " = ;"
+
+InitIntMember(n,v) ::= <%int  = ;%>
+
+InitBooleanMember(n,v) ::= <%bool  = ;%>
+
+InitIntVar(n,v) ::= <%%>
+
+IntArg(n) ::= "int "
+
+VarRef(n) ::= ""
+
+GetMember(n) ::= <%this.%>
+
+SetMember(n,v) ::= <%this. = ;%>
+
+AddMember(n,v) ::= <%this. += ;%>
+
+MemberEquals(n,v) ::= <%this. == %>
+
+ModMemberEquals(n,m,v) ::= <%this. %  == %>
+
+ModMemberNotEquals(n,m,v) ::= <%this. %  != %>
+
+DumpDFA() ::= "this.dumpDFA();"
+
+Pass() ::= ""
+
+StringList() ::= "List\"
+
+BuildParseTrees() ::= "buildParseTree = true;"
+
+BailErrorStrategy() ::= <%errorHandler = new BailErrorStrategy();%>
+
+ToStringTree(s) ::= <%.toStringTree(parser: this)%>
+
+Column() ::= "this.charPositionInLine"
+
+Text() ::= "this.text"
+
+ValEquals(a,b) ::= <%==%>
+
+TextEquals(a) ::= <%this.text == ""%>
+
+PlusText(a) ::= <%"" + this.text%>
+
+InputText() ::= "tokenStream.text"
+
+LTEquals(i, v) ::= <%tokenStream.LT().text == %>
+
+LANotEquals(i, v) ::= <%tokenStream.LA()!=%>
+
+TokenStartColumnEquals(i) ::= <%this.tokenStartCharPositionInLine==%>
+
+ImportListener(X) ::= ""
+
+GetExpectedTokenNames() ::= "this.expectedTokens.toString(vocabulary: this.vocabulary)"
+
+RuleInvocationStack() ::= "ruleInvocationStack"
+
+LL_EXACT_AMBIG_DETECTION() ::= <>
+
+ParserToken(parser, token) ::= <%.TOKEN_%>
+
+Production(p) ::= <%

%> + +Result(r) ::= <%%> + +ParserPropertyMember() ::= << +@members { +bool Property() { + return true; +} +} +>> + +ParserPropertyCall(p, call) ::= "

+ *

+ * The default behavior (used in all other cases) is removing the temporary + * directories for all tests which completed successfully, and preserving + * the directories for tests which failed.

+ */ + public static final boolean PRESERVE_TEST_DIR = Boolean.parseBoolean(System.getProperty("antlr.preserve-test-dir", "false")); + + /** + * The base test directory is the directory where generated files get placed + * during unit test execution. + *

+ *

+ * The default value for this property is the {@code java.io.tmpdir} system + * property, and can be overridden by setting the + * {@code antlr.java-test-dir} property to a custom location. Note that the + * {@code antlr.java-test-dir} property directly affects the + * {@link #CREATE_PER_TEST_DIRECTORIES} value as well.

+ */ + public static final String BASE_TEST_DIR; + + /** + * When {@code true}, a temporary directory will be created for each test + * executed during the test run. + *

+ *

+ * This value is {@code true} when the {@code antlr.java-test-dir} system + * property is set, and otherwise {@code false}.

+ */ + public static final boolean CREATE_PER_TEST_DIRECTORIES; + + static { + String baseTestDir = System.getProperty("antlr.dart-test-dir"); + boolean perTestDirectories = false; + if (baseTestDir == null || baseTestDir.isEmpty()) { + baseTestDir = System.getProperty("java.io.tmpdir"); + perTestDirectories = true; + } + + if (!new File(baseTestDir).isDirectory()) { + throw new UnsupportedOperationException("The specified base test directory does not exist: " + baseTestDir); + } + + BASE_TEST_DIR = baseTestDir; + CREATE_PER_TEST_DIRECTORIES = perTestDirectories; + } + + /** + * Build up the full classpath we need, including the surefire path (if present) + */ + public static final String CLASSPATH = System.getProperty("java.class.path"); + + public String tmpdir = null; + + /** + * If error during parser execution, store stderr here; can't return + * stdout and stderr. This doesn't trap errors from running antlr. + */ + protected String stderrDuringParse; + + /** + * Errors found while running antlr + */ + protected StringBuilder antlrToolErrors; + + private static String cacheDartPackages; + + private String getPropertyPrefix() { + return "antlr-dart"; + } + + @Override + public void testSetUp() throws Exception { + if (CREATE_PER_TEST_DIRECTORIES) { + // new output dir for each test + String threadName = Thread.currentThread().getName(); + String testDirectory = getClass().getSimpleName() + "-" + threadName + "-" + System.nanoTime(); + tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath(); + } else { + tmpdir = new File(BASE_TEST_DIR).getAbsolutePath(); + if (!PRESERVE_TEST_DIR && new File(tmpdir).exists()) { + eraseFiles(); + } + } + antlrToolErrors = new StringBuilder(); + } + + @Override + public void testTearDown() throws Exception { + } + + @Override + public String getTmpDir() { + return tmpdir; + } + + @Override + public String getStdout() { + return null; + } + + @Override + public String getParseErrors() { + return stderrDuringParse; + } + + @Override + public String getANTLRToolErrors() { + if (antlrToolErrors.length() == 0) { + return null; + } + return antlrToolErrors.toString(); + } + + protected Tool newTool(String[] args) { + Tool tool = new Tool(args); + return tool; + } + + protected ATN createATN(Grammar g, boolean useSerializer) { + if (g.atn == null) { + semanticProcess(g); + assertEquals(0, g.tool.getNumErrors()); + + ParserATNFactory f; + if (g.isLexer()) { + f = new LexerATNFactory((LexerGrammar) g); + } else { + f = new ParserATNFactory(g); + } + + g.atn = f.createATN(); + assertEquals(0, g.tool.getNumErrors()); + } + + ATN atn = g.atn; + if (useSerializer) { + char[] serialized = ATNSerializer.getSerializedAsChars(atn); + return new ATNDeserializer().deserialize(serialized); + } + + return atn; + } + + protected void semanticProcess(Grammar g) { + if (g.ast != null && !g.ast.hasErrors) { +// System.out.println(g.ast.toStringTree()); + Tool antlr = new Tool(); + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + if (g.getImportedGrammars() != null) { // process imported grammars (if any) + for (Grammar imp : g.getImportedGrammars()) { + antlr.processNonCombinedGrammar(imp, false); + } + } + } + } + + public DFA createDFA(Grammar g, DecisionState s) { +// PredictionDFAFactory conv = new PredictionDFAFactory(g, s); +// DFA dfa = conv.createDFA(); +// conv.issueAmbiguityWarnings(); +// System.out.print("DFA="+dfa); +// return dfa; + return null; + } + +// public void minimizeDFA(DFA dfa) { +// DFAMinimizer dmin = new DFAMinimizer(dfa); +// dfa.minimized = dmin.minimize(); +// } + + IntegerList getTypesFromString(Grammar g, String expecting) { + IntegerList expectingTokenTypes = new IntegerList(); + if (expecting != null && !expecting.trim().isEmpty()) { + for (String tname : expecting.replace(" ", "").split(",")) { + int ttype = g.getTokenType(tname); + expectingTokenTypes.add(ttype); + } + } + return expectingTokenTypes; + } + + public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { + ANTLRInputStream in = new ANTLRInputStream(input); + IntegerList tokenTypes = new IntegerList(); + int ttype; + do { + ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); + tokenTypes.add(ttype); + } while (ttype != Token.EOF); + return tokenTypes; + } + + public List getTokenTypes(LexerGrammar lg, + ATN atn, + CharStream input) { + LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[]{new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE))}, null); + List tokenTypes = new ArrayList(); + int ttype; + boolean hitEOF = false; + do { + if (hitEOF) { + tokenTypes.add("EOF"); + break; + } + int t = input.LA(1); + ttype = interp.match(input, Lexer.DEFAULT_MODE); + if (ttype == Token.EOF) { + tokenTypes.add("EOF"); + } else { + tokenTypes.add(lg.typeToTokenList.get(ttype)); + } + + if (t == IntStream.EOF) { + hitEOF = true; + } + } while (ttype != Token.EOF); + return tokenTypes; + } + + List checkRuleDFA(String gtext, String ruleName, String expecting) + throws Exception { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + ATNState s = atn.ruleToStartState[g.getRule(ruleName).index]; + if (s == null) { + System.err.println("no such rule: " + ruleName); + return null; + } + ATNState t = s.transition(0).target; + if (!(t instanceof DecisionState)) { + System.out.println(ruleName + " has no decision"); + return null; + } + DecisionState blk = (DecisionState) t; + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + List checkRuleDFA(String gtext, int decision, String expecting) + throws Exception { + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(gtext, equeue); + ATN atn = createATN(g, false); + DecisionState blk = atn.decisionToState.get(decision); + checkRuleDFA(g, blk, expecting); + return equeue.all; + } + + void checkRuleDFA(Grammar g, DecisionState blk, String expecting) + throws Exception { + DFA dfa = createDFA(g, blk); + String result = null; + if (dfa != null) result = dfa.toString(); + assertEquals(expecting, result); + } + + List checkLexerDFA(String gtext, String expecting) + throws Exception { + return checkLexerDFA(gtext, LexerGrammar.DEFAULT_MODE_NAME, expecting); + } + + List checkLexerDFA(String gtext, String modeName, String expecting) + throws Exception { + ErrorQueue equeue = new ErrorQueue(); + LexerGrammar g = new LexerGrammar(gtext, equeue); + g.atn = createATN(g, false); +// LexerATNToDFAConverter conv = new LexerATNToDFAConverter(g); +// DFA dfa = conv.createDFA(modeName); +// g.setLookaheadDFA(0, dfa); // only one decision to worry about +// +// String result = null; +// if ( dfa!=null ) result = dfa.toString(); +// assertEquals(expecting, result); +// +// return equeue.all; + return null; + } + + protected String load(String fileName, String encoding) + throws IOException { + if (fileName == null) { + return null; + } + + String fullFileName = getClass().getPackage().getName().replace('.', '/') + '/' + fileName; + int size = 65000; + InputStreamReader isr; + InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); + if (encoding != null) { + isr = new InputStreamReader(fis, encoding); + } else { + isr = new InputStreamReader(fis); + } + try { + char[] data = new char[size]; + int n = isr.read(data); + return new String(data, 0, n); + } finally { + isr.close(); + } + } + + protected String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input) { + return execLexer(grammarFileName, grammarStr, lexerName, input, false); + } + + @Override + public String execLexer(String grammarFileName, + String grammarStr, + String lexerName, + String input, + boolean showDFA) { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + null, + lexerName); + assertTrue(success); + writeFile(tmpdir, "input", input); + writeLexerTestFile(lexerName, showDFA); + String output = execClass("Test", AOT_COMPILE_TESTS.contains(input)); + return output; + } + + public ParseTree execParser(String startRuleName, String input, + String parserName, String lexerName) + throws Exception { + Pair pl = getParserAndLexer(input, parserName, lexerName); + Parser parser = pl.a; + return execStartRule(startRuleName, parser); + } + + public ParseTree execStartRule(String startRuleName, Parser parser) + throws IllegalAccessException, InvocationTargetException, + NoSuchMethodException { + Method startRule = null; + Object[] args = null; + try { + startRule = parser.getClass().getMethod(startRuleName); + } catch (NoSuchMethodException nsme) { + // try with int _p arg for recursive func + startRule = parser.getClass().getMethod(startRuleName, int.class); + args = new Integer[]{0}; + } + ParseTree result = (ParseTree) startRule.invoke(parser, args); +// System.out.println("parse tree = "+result.toStringTree(parser)); + return result; + } + + public Pair getParserAndLexer(String input, + String parserName, String lexerName) + throws Exception { + final Class lexerClass = loadLexerClassFromTempDir(lexerName); + final Class parserClass = loadParserClassFromTempDir(parserName); + + ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); + + Class c = lexerClass.asSubclass(Lexer.class); + Constructor ctor = c.getConstructor(CharStream.class); + Lexer lexer = ctor.newInstance(in); + + Class pc = parserClass.asSubclass(Parser.class); + Constructor pctor = pc.getConstructor(TokenStream.class); + CommonTokenStream tokens = new CommonTokenStream(lexer); + Parser parser = pctor.newInstance(tokens); + return new Pair(parser, lexer); + } + + public Class loadClassFromTempDir(String name) throws Exception { + ClassLoader loader = + new URLClassLoader(new URL[]{new File(tmpdir).toURI().toURL()}, + ClassLoader.getSystemClassLoader()); + return loader.loadClass(name); + } + + public Class loadLexerClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Lexer.class); + } + + public Class loadParserClassFromTempDir(String name) throws Exception { + return loadClassFromTempDir(name).asSubclass(Parser.class); + } + + @Override + public String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String listenerName, + String visitorName, + String startRuleName, + String input, + boolean showDiagnosticErrors) { + return execParser(grammarFileName, grammarStr, parserName, lexerName, + listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); + } + + public String execParser(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String listenerName, + String visitorName, + String startRuleName, + String input, + boolean showDiagnosticErrors, + boolean profile) { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, + parserName, + lexerName, + "-visitor"); + assertTrue(success); + writeFile(tmpdir, "input", input); + return rawExecRecognizer(parserName, + lexerName, + startRuleName, + showDiagnosticErrors, + profile, + AOT_COMPILE_TESTS.contains(input)); + } + + /** + * Return true if all is well + */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + String... extraOptions) { + return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); + } + + /** + * Return true if all is well + */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, + String parserName, + String lexerName, + boolean defaultListener, + String... extraOptions) { + ErrorQueue equeue = + BaseRuntimeTest.antlrOnString(getTmpDir(), "Dart", grammarFileName, grammarStr, defaultListener, extraOptions); + if (!equeue.errors.isEmpty()) { + return false; + } + + List files = new ArrayList(); + if (lexerName != null) { + files.add(lexerName + ".dart"); + } + if (parserName != null) { + files.add(parserName + ".dart"); + Set optionsSet = new HashSet(Arrays.asList(extraOptions)); + String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); + if (!optionsSet.contains("-no-listener")) { + files.add(grammarName + "Listener.dart"); + files.add(grammarName + "BaseListener.dart"); + } + if (optionsSet.contains("-visitor")) { + files.add(grammarName + "Visitor.dart"); + files.add(grammarName + "BaseVisitor.dart"); + } + } + + String runtime = locateRuntime(); + writeFile(tmpdir, "pubspec.yaml", + "name: \"test\"\n" + + "dependencies:\n" + + " antlr4:\n" + + " path: " + runtime + "\n"); + if (cacheDartPackages == null) { + try { + Process process = Runtime.getRuntime().exec(new String[]{locatePub(), "get"}, null, new File(tmpdir)); + StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); + stderrVacuum.start(); + process.waitFor(); + stderrVacuum.join(); + String stderrDuringPubGet = stderrVacuum.toString(); + if (!stderrDuringPubGet.isEmpty()) { + System.out.println("Pub Get error: " + stderrVacuum.toString()); + } + } catch (IOException | InterruptedException e) { + e.printStackTrace(); + return false; + } + cacheDartPackages = readFile(tmpdir, ".packages"); + } else { + writeFile(tmpdir, ".packages", cacheDartPackages); + } + return true; // allIsWell: no compile + } + + protected String rawExecRecognizer(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile, + boolean aotCompile) { + this.stderrDuringParse = null; + if (parserName == null) { + writeLexerTestFile(lexerName, false); + } else { + writeTestFile(parserName, + lexerName, + parserStartRuleName, + debug, + profile); + } + + return execClass("Test", aotCompile); + } + + public String execClass(String className, boolean compile) { + try { + if (compile) { + String[] args = new String[]{ + locateDart2Native(), + className + ".dart", "-o", className + }; + String cmdLine = Utils.join(args, " "); + System.err.println("Compile: " + cmdLine); + Process process = + Runtime.getRuntime().exec(args, null, new File(tmpdir)); + StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); + stderrVacuum.start(); + int result = process.waitFor(); + if (result != 0) { + stderrVacuum.join(); + System.err.print("Error compiling dart file: " + stderrVacuum.toString()); + } + } + + String[] args; + if (compile) { + args = new String[]{ + new File(tmpdir, className).getAbsolutePath(), new File(tmpdir, "input").getAbsolutePath() + }; + } else { + args = new String[]{ + locateDart(), + className + ".dart", new File(tmpdir, "input").getAbsolutePath() + }; + } + //String cmdLine = Utils.join(args, " "); + //System.err.println("execParser: " + cmdLine); + Process process = + Runtime.getRuntime().exec(args, null, new File(tmpdir)); + StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); + StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); + stdoutVacuum.start(); + stderrVacuum.start(); + process.waitFor(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if (output.length() == 0) { + output = null; + } + if (stderrVacuum.toString().length() > 0) { + this.stderrDuringParse = stderrVacuum.toString(); + } + return output; + } catch (Exception e) { + System.err.println("can't exec recognizer"); + e.printStackTrace(System.err); + } + return null; + } + + private String locateTool(String tool) { + final String dartPath = System.getProperty("DART_PATH"); + + final String[] tools = isWindows() + ? new String[]{tool + ".exe", tool + ".bat", tool} + : new String[]{tool}; + + if (dartPath != null) { + for (String t : tools) { + if (new File(dartPath + t).exists()) { + return dartPath + t; + } + } + } + + final String[] roots = isWindows() + ? new String[]{"C:\\tools\\dart-sdk\\bin\\"} + : new String[]{"/usr/local/bin/", "/opt/local/bin/", "/usr/bin/", "/usr/lib/dart/bin/"}; + + for (String root : roots) { + for (String t : tools) { + if (new File(root + t).exists()) { + return root + t; + } + } + } + + throw new RuntimeException("Could not locate " + tool); + } + + protected String locatePub() { + String propName = getPropertyPrefix() + "-pub"; + String prop = System.getProperty(propName); + + if (prop == null || prop.length() == 0) { + prop = locateTool("pub"); + } + + File file = new File(prop); + + if (!file.exists()) { + throw new RuntimeException("Missing system property:" + propName); + } + + return file.getAbsolutePath(); + } + + protected String locateDart() { + String propName = getPropertyPrefix() + "-dart"; + String prop = System.getProperty(propName); + + if (prop == null || prop.length() == 0) { + prop = locateTool("dart"); + } + + File file = new File(prop); + + if (!file.exists()) { + throw new RuntimeException("Missing system property:" + propName); + } + + return file.getAbsolutePath(); + } + + protected String locateDart2Native() { + String propName = getPropertyPrefix() + "-dart2native"; + String prop = System.getProperty(propName); + + if (prop == null || prop.length() == 0) { + prop = locateTool("dart2native"); + } + + File file = new File(prop); + + if (!file.exists()) { + throw new RuntimeException("Missing system property:" + propName); + } + + return file.getAbsolutePath(); + } + + private String locateRuntime() { + final ClassLoader loader = Thread.currentThread().getContextClassLoader(); + final URL runtimeSrc = loader.getResource("Dart"); + if (runtimeSrc == null) { + throw new RuntimeException("Cannot find Dart runtime"); + } + if (isWindows()) { + return runtimeSrc.getPath().replaceFirst("/", ""); + } + return runtimeSrc.getPath(); + } + + private boolean isWindows() { + return System.getProperty("os.name").toLowerCase().contains("windows"); + } + +// void ambig(List msgs, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); +// } + +// void ambig(List msgs, int i, int[] expectedAmbigAlts, String expectedAmbigInput) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); +// AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); +// if ( a==null ) assertNull(expectedAmbigAlts); +// else { +// assertEquals(a.conflictingAlts.toString(), Arrays.toString(expectedAmbigAlts)); +// } +// assertEquals(expectedAmbigInput, a.input); +// } + +// void unreachable(List msgs, int[] expectedUnreachableAlts) +// throws Exception +// { +// unreachable(msgs, 0, expectedUnreachableAlts); +// } + +// void unreachable(List msgs, int i, int[] expectedUnreachableAlts) +// throws Exception +// { +// List amsgs = getMessagesOfType(msgs, UnreachableAltsMessage.class); +// UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); +// if ( u==null ) assertNull(expectedUnreachableAlts); +// else { +// assertEquals(u.conflictingAlts.toString(), Arrays.toString(expectedUnreachableAlts)); +// } +// } + + List getMessagesOfType(List msgs, Class c) { + List filtered = new ArrayList(); + for (ANTLRMessage m : msgs) { + if (m.getClass() == c) filtered.add(m); + } + return filtered; + } + + public void checkRuleATN(Grammar g, String ruleName, String expecting) { +// DOTGenerator dot = new DOTGenerator(g); +// System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); + + Rule r = g.getRule(ruleName); + ATNState startState = g.getATN().ruleToStartState[r.index]; + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + + //System.out.print(result); + assertEquals(expecting, result); + } + + public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { + int lp = templates.indexOf('('); + String name = templates.substring(0, lp); + STGroup group = new STGroupString(templates); + ST st = group.getInstanceOf(name); + st.add(actionName, action); + String grammar = st.render(); + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(grammar, equeue); + if (g.ast != null && !g.ast.hasErrors) { + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + + ATNFactory factory = new ParserATNFactory(g); + if (g.isLexer()) factory = new LexerATNFactory((LexerGrammar) g); + g.atn = factory.createATN(); + + AnalysisPipeline anal = new AnalysisPipeline(g); + anal.process(); + + CodeGenerator gen = new CodeGenerator(g); + ST outputFileST = gen.generateParser(false); + String output = outputFileST.render(); + //System.out.println(output); + String b = "#" + actionName + "#"; + int start = output.indexOf(b); + String e = "#end-" + actionName + "#"; + int end = output.indexOf(e); + String snippet = output.substring(start + b.length(), end); + assertEquals(expected, snippet); + } + if (equeue.size() > 0) { +// System.err.println(equeue.toString()); + } + } + + protected void checkGrammarSemanticsError(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType() == expectedMessage.getErrorType()) { + foundMsg = m; + } + } + assertNotNull("no error; " + expectedMessage.getErrorType() + " expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if (equeue.size() != 1) { + System.err.println(equeue); + } + } + + protected void checkGrammarSemanticsWarning(ErrorQueue equeue, + GrammarSemanticsMessage expectedMessage) + throws Exception { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage m = equeue.warnings.get(i); + if (m.getErrorType() == expectedMessage.getErrorType()) { + foundMsg = m; + } + } + assertNotNull("no error; " + expectedMessage.getErrorType() + " expected", foundMsg); + assertTrue("error is not a GrammarSemanticsMessage", + foundMsg instanceof GrammarSemanticsMessage); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if (equeue.size() != 1) { + System.err.println(equeue); + } + } + + protected void checkError(ErrorQueue equeue, + ANTLRMessage expectedMessage) + throws Exception { + //System.out.println("errors="+equeue); + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage m = equeue.errors.get(i); + if (m.getErrorType() == expectedMessage.getErrorType()) { + foundMsg = m; + } + } + assertTrue("no error; " + expectedMessage.getErrorType() + " expected", !equeue.errors.isEmpty()); + assertTrue("too many errors; " + equeue.errors, equeue.errors.size() <= 1); + assertNotNull("couldn't find expected error: " + expectedMessage.getErrorType(), foundMsg); + /* + * assertTrue("error is not a GrammarSemanticsMessage", foundMsg + * instanceof GrammarSemanticsMessage); + */ + assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); + } + + public static class FilteringTokenStream extends CommonTokenStream { + public FilteringTokenStream(TokenSource src) { + super(src); + } + + Set hide = new HashSet(); + + @Override + protected boolean sync(int i) { + if (!super.sync(i)) { + return false; + } + + Token t = get(i); + if (hide.contains(t.getType())) { + ((WritableToken) t).setChannel(Token.HIDDEN_CHANNEL); + } + + return true; + } + + public void setTokenTypeChannel(int ttype, int channel) { + hide.add(ttype); + } + } + + protected void writeTestFile(String parserName, + String lexerName, + String parserStartRuleName, + boolean debug, + boolean profile) { + ST outputFileST = new ST( + "import 'package:antlr4/antlr4.dart';\n" + + "\n" + + "import '.dart';\n" + + "import '.dart';\n" + + "\n" + + "void main(List\\ args) async {\n" + + " CharStream input = await InputStream.fromPath(args[0]);\n" + + " final lex = (input);\n" + + " final tokens = CommonTokenStream(lex);\n" + + " \n" + + " parser.buildParseTree = true;\n" + + " \n" + + " ParserRuleContext tree = parser.();\n" + + " print('[${profiler.getDecisionInfo().join(', ')}]');\n" + + " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree);\n" + + "}\n" + + "\n" + + "class TreeShapeListener implements ParseTreeListener {\n" + + " @override void visitTerminal(TerminalNode node) {}\n" + + "\n" + + " @override void visitErrorNode(ErrorNode node) {}\n" + + "\n" + + " @override void exitEveryRule(ParserRuleContext ctx) {}\n" + + "\n" + + " @override\n" + + " void enterEveryRule(ParserRuleContext ctx) {\n" + + " for (var i = 0; i \\< ctx.childCount; i++) {\n" + + " final parent = ctx.getChild(i).parent;\n" + + " if (!(parent is RuleNode) || (parent as RuleNode).ruleContext != ctx) {\n" + + " throw StateError('Invalid parse tree shape detected.');\n" + + " }\n" + + " }\n" + + " }\n" + + "}\n" + ); + ST createParserST = new ST("final parser = (tokens);\n"); + if (debug) { + createParserST = + new ST( + "final parser = (tokens);\n" + + " parser.addErrorListener(new DiagnosticErrorListener());\n"); + } + if (profile) { + outputFileST.add("profile", + "ProfilingATNSimulator profiler = ProfilingATNSimulator(parser);\n" + + "parser.setInterpreter(profiler);"); + } else { + outputFileST.add("profile", new ArrayList()); + } + outputFileST.add("createParser", createParserST); + outputFileST.add("parserName", parserName); + outputFileST.add("lexerName", lexerName); + outputFileST.add("parserStartRuleName", parserStartRuleName); + writeFile(tmpdir, "Test.dart", outputFileST.render()); + } + + protected void writeLexerTestFile(String lexerName, boolean showDFA) { + ST outputFileST = new ST( + "import 'dart:io';\n" + + "\n" + + "import 'package:antlr4/antlr4.dart';\n" + + "\n" + + "import '.dart';\n" + + "\n" + + "void main(List\\ args) async {\n" + + " CharStream input = await InputStream.fromPath(args[0]);\n" + + " lex = (input);\n" + + " CommonTokenStream tokens = CommonTokenStream(lex);\n" + + " tokens.fill();\n" + + " for (Object t in tokens.getTokens())\n" + + " print(t);\n" + + "\n" + + (showDFA ? "stdout.write(lex.interpreter.getDFA(Lexer.DEFAULT_MODE).toLexerString());\n" : "") + + "}\n" + ); + + outputFileST.add("lexerName", lexerName); + writeFile(tmpdir, "Test.dart", outputFileST.render()); + } + + protected void eraseFiles() { + if (tmpdir == null) { + return; + } + + File tmpdirF = new File(tmpdir); + String[] files = tmpdirF.list(); + for (int i = 0; files != null && i < files.length; i++) { + new File(tmpdir + "/" + files[i]).delete(); + } + } + + @Override + public void eraseTempDir() { + File tmpdirF = new File(tmpdir); + if (tmpdirF.exists()) { + eraseFiles(); + tmpdirF.delete(); + } + } + + public String getFirstLineOfException() { + if (this.stderrDuringParse == null) { + return null; + } + String[] lines = this.stderrDuringParse.split("\n"); + String prefix = "Exception in thread \"main\" "; + return lines[0].substring(prefix.length(), lines[0].length()); + } + + /** + * When looking at a result set that consists of a Map/HashTable + * we cannot rely on the output order, as the hashing algorithm or other aspects + * of the implementation may be different on differnt JDKs or platforms. Hence + * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a + * bit of a hack, but guarantees that we get the same order on all systems. We assume that + * the keys are strings. + * + * @param m The Map that contains keys we wish to return in sorted order + * @return A string that represents all the keys in sorted order. + */ + public String sortMapToString(Map m) { + // Pass in crap, and get nothing back + // + if (m == null) { + return null; + } + + System.out.println("Map toString looks like: " + m.toString()); + + // Sort the keys in the Map + // + TreeMap nset = new TreeMap(m); + + System.out.println("Tree map looks like: " + nset.toString()); + return nset.toString(); + } + + public List realElements(List elements) { + return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); + } + + public void assertNotNullOrEmpty(String message, String text) { + assertNotNull(message, text); + assertFalse(message, text.isEmpty()); + } + + public void assertNotNullOrEmpty(String text) { + assertNotNull(text); + assertFalse(text.isEmpty()); + } + + public static class IntTokenStream implements TokenStream { + public IntegerList types; + int p = 0; + + public IntTokenStream(IntegerList types) { + this.types = types; + } + + @Override + public void consume() { + p++; + } + + @Override + public int LA(int i) { + return LT(i).getType(); + } + + @Override + public int mark() { + return index(); + } + + @Override + public int index() { + return p; + } + + @Override + public void release(int marker) { + seek(marker); + } + + @Override + public void seek(int index) { + p = index; + } + + @Override + public int size() { + return types.size(); + } + + @Override + public String getSourceName() { + return UNKNOWN_SOURCE_NAME; + } + + @Override + public Token LT(int i) { + CommonToken t; + int rawIndex = p + i - 1; + if (rawIndex >= types.size()) t = new CommonToken(Token.EOF); + else t = new CommonToken(types.get(rawIndex)); + t.setTokenIndex(rawIndex); + return t; + } + + @Override + public Token get(int i) { + return new CommonToken(types.get(i)); + } + + @Override + public TokenSource getTokenSource() { + return null; + } + + @Override + public String getText() { + throw new UnsupportedOperationException("can't give strings"); + } + + @Override + public String getText(Interval interval) { + throw new UnsupportedOperationException("can't give strings"); + } + + @Override + public String getText(RuleContext ctx) { + throw new UnsupportedOperationException("can't give strings"); + } + + @Override + public String getText(Token start, Token stop) { + throw new UnsupportedOperationException("can't give strings"); + } + } + + /** + * Sort a list + */ + public > List sort(List data) { + List dup = new ArrayList(); + dup.addAll(data); + Collections.sort(dup); + return dup; + } + + /** + * Return map sorted by key + */ + public , V> LinkedHashMap sort(Map data) { + LinkedHashMap dup = new LinkedHashMap(); + List keys = new ArrayList(); + keys.addAll(data.keySet()); + Collections.sort(keys); + for (K k : keys) { + dup.put(k, data.get(k)); + } + return dup; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java index fa22c5d91..60aa4a35a 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeLexers.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.explorer; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestCompositeLexers extends BaseRuntimeTest { public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Explorer"); + return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java index e7fd00723..638413f9a 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeParsers.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.firefox; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestCompositeParsers extends BaseRuntimeTest { public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Firefox"); + return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java index 1e397c5f6..a0d7f9c1a 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestFullContextParsing.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.explorer; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestFullContextParsing extends BaseRuntimeTest { public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Explorer"); + return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java index dfd1ccbad..e92f1b306 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLeftRecursion.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestLeftRecursion extends BaseRuntimeTest { public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java index 3eff25ca2..b95cd59bf 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerErrors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestLexerErrors extends BaseRuntimeTest { public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java index e07e020b8..1ed1d84ca 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestLexerExec.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestLexerExec extends BaseRuntimeTest { public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java index 950dd75ca..e15dee9ae 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestListeners.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.firefox; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestListeners extends BaseRuntimeTest { public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Firefox"); + return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java index 487ddbbb4..0115e384d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParseTrees.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestParseTrees extends BaseRuntimeTest { public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java index 1eab06b52..87b850fb9 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserErrors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestParserErrors extends BaseRuntimeTest { public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java index 019b15e36..c22aa8ceb 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserExec.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.firefox; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestParserExec extends BaseRuntimeTest { public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Firefox"); + return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java index af1eacd01..78e6942ac 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestPerformance.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.firefox; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestPerformance extends BaseRuntimeTest { public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Firefox"); + return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java index d0374c881..8825042cf 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalLexer.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestSemPredEvalLexer extends BaseRuntimeTest { public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java similarity index 82% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java index cb8e530ff..87d6a9dea 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSemPredEvalParser.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestSemPredEvalParser extends BaseRuntimeTest { public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java similarity index 83% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java index 331ca0858..4fe603fd4 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestSets.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java @@ -4,22 +4,23 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.chrome; +package org.antlr.v4.test.runtime.dart; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; +import org.antlr.v4.test.runtime.dart.BaseDartTest; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestSets extends BaseRuntimeTest { public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); + super(descriptor,new BaseDartTest()); } @Parameterized.Parameters(name="{0}") public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Chrome"); + return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java index 7c74e74cb..f8a6a394c 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/LexerExecDescriptors.java @@ -1051,10 +1051,39 @@ public class LexerExecDescriptors { grammar = new String(Files.readAllBytes(Paths.get(stuff.toURI()))); } catch (Exception e) { - System.err.println("Cannot find grammar org/antlr/v4/test/runtime/LarseLexer.g4"); + System.err.println("Cannot find grammar org/antlr/v4/test/runtime/LargeLexer.g4"); } return new Pair<>(grammarName, grammar); } } + + /** + * This is a regression test for antlr/antlr4#2709 "PHP target generates + * invalid output when $ is used as part of the literal in lexer rule" + * https://github.com/antlr/antlr4/issues/2709 + */ + public static class EscapeTargetStringLiteral extends BaseLexerTestDescriptor { + /** + [@0,0:-1='',<-1>,1:0] + */ + @CommentHasStringValue + public String output; + + public String errors = null; + public String startRule = ""; + public String grammarName = "L"; + + /** + lexer grammar L; + ACTION_WITH_DOLLAR: '$ACTION'; + */ + @CommentHasStringValue + public String grammar; + + @Override + public boolean ignore(String targetName) { + return !targetName.equals("PHP"); + } + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java index bda6fecf3..5bd26693c 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParseTreesDescriptors.java @@ -112,7 +112,7 @@ public class ParseTreesDescriptors { @Override public boolean ignore(String targetName) { - return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp"); + return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp|Dart"); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java index 0a3e40b19..b6f1c4686 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java @@ -639,7 +639,7 @@ public class ParserErrorsDescriptors { @Override public boolean ignore(String targetName) { - return !"Java".equals(targetName) && !"Swift".equals(targetName); + return !"Java".equals(targetName) && !"Swift".equals(targetName) && !"Dart".equals(targetName); } } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java index ca6e393dd..d2b0931f0 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserExecDescriptors.java @@ -889,4 +889,32 @@ public class ParserExecDescriptors { @CommentHasStringValue public String grammar; } + + /** + * This is a regression test for antlr/antlr4#2728 + * It should generate correct code for grammars with more than 65 tokens. + * https://github.com/antlr/antlr4/pull/2728#issuecomment-622940562 + */ + public static class TokenOffset extends BaseParserTestDescriptor { + public String input = "12 34 56 66"; + public String output = "12345666\n"; + + public String errors = null; + public String startRule = "a"; + public String grammarName = "L"; + + /** + grammar L; + a : ('1'|'2'|'3'|'4'|'5'|'6'|'7'|'8'|'9'|'10'|'11'|'12'|'13'|'14'|'15'|'16' + |'17'|'18'|'19'|'20'|'21'|'22'|'23'|'24'|'25'|'26'|'27'|'28'|'29'|'30'|'31'|'32' + |'33'|'34'|'35'|'36'|'37'|'38'|'39'|'40'|'41'|'42'|'43'|'44'|'45'|'46'|'47'|'48' + |'49'|'50'|'51'|'52'|'53'|'54'|'55'|'56'|'57'|'58'|'59'|'60'|'61'|'62'|'63'|'64' + |'65'|'66')+ { + + }; + WS : (' '|'\n') -> skip; + */ + @CommentHasStringValue + public String grammar; + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java index 2c2702fe5..69dbd9a07 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/PerformanceDescriptors.java @@ -113,7 +113,7 @@ public class PerformanceDescriptors { @Override public boolean ignore(String targetName) { - return !Arrays.asList("Java", "CSharp", "Python2", "Python3", "Node", "Cpp", "Swift").contains(targetName); + return !Arrays.asList("Java", "CSharp", "Python2", "Python3", "Node", "Cpp", "Swift", "Dart").contains(targetName); } } @@ -199,7 +199,7 @@ public class PerformanceDescriptors { @Override public boolean ignore(String targetName) { // passes, but still too slow in Python and JavaScript - return !Arrays.asList("Java", "CSharp", "Cpp", "Swift").contains(targetName); + return !Arrays.asList("Java", "CSharp", "Cpp", "Swift", "Dart").contains(targetName); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java new file mode 100644 index 000000000..3f5c3e378 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java @@ -0,0 +1,533 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.javascript; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.IntStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.WritableToken; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.test.runtime.ErrorQueue; +import org.antlr.v4.test.runtime.RuntimeTestSupport; +import org.antlr.v4.test.runtime.StreamVacuum; +import org.antlr.v4.test.runtime.TestContext; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.DOTGenerator; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.GrammarSemanticsMessage; +import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.tool.Rule; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STGroupString; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; +import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class BaseNodeTest implements RuntimeTestSupport { + // -J-Dorg.antlr.v4.test.BaseTest.level=FINE + // private static final Logger LOGGER = + // Logger.getLogger(BaseTest.class.getName()); + + public static final String newline = System.getProperty("line.separator"); + public static final String pathSep = System.getProperty("path.separator"); + + public String tmpdir = null; + + /** + * If error during parser execution, store stderr here; can't return stdout + * and stderr. This doesn't trap errors from running antlr. + */ + protected String stderrDuringParse; + + /** Errors found while running antlr */ + protected StringBuilder antlrToolErrors; + + @Override + public void testSetUp() throws Exception { + // new output dir for each test + String prop = System.getProperty("antlr-javascript-test-dir"); + if (prop != null && prop.length() > 0) { + tmpdir = prop; + } + else { + tmpdir = new File(System.getProperty("java.io.tmpdir"), getClass() + .getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()) + .getAbsolutePath(); + } + File dir = new File(tmpdir); + if (dir.exists()) + this.eraseFiles(dir); + antlrToolErrors = new StringBuilder(); + } + + @Override + public void testTearDown() throws Exception { + } + + @Override + public String getTmpDir() { + return tmpdir; + } + + @Override + public String getStdout() { + return null; + } + + @Override + public String getParseErrors() { + return stderrDuringParse; + } + + @Override + public String getANTLRToolErrors() { + if ( antlrToolErrors.length()==0 ) { + return null; + } + return antlrToolErrors.toString(); + } + + protected ATN createATN(Grammar g, boolean useSerializer) { + if (g.atn == null) { + semanticProcess(g); + assertEquals(0, g.tool.getNumErrors()); + + ParserATNFactory f; + if (g.isLexer()) { + f = new LexerATNFactory((LexerGrammar) g); + } + else { + f = new ParserATNFactory(g); + } + + g.atn = f.createATN(); + assertEquals(0, g.tool.getNumErrors()); + } + + ATN atn = g.atn; + if (useSerializer) { + char[] serialized = ATNSerializer.getSerializedAsChars(atn); + return new ATNDeserializer().deserialize(serialized); + } + + return atn; + } + + protected void semanticProcess(Grammar g) { + if (g.ast != null && !g.ast.hasErrors) { + System.out.println(g.ast.toStringTree()); + Tool antlr = new Tool(); + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + if (g.getImportedGrammars() != null) { // process imported grammars + // (if any) + for (Grammar imp : g.getImportedGrammars()) { + antlr.processNonCombinedGrammar(imp, false); + } + } + } + } + + protected String execLexer(String grammarFileName, String grammarStr, + String lexerName, String input) { + return execLexer(grammarFileName, grammarStr, lexerName, input, false); + } + + @Override + public String execLexer(String grammarFileName, String grammarStr, + String lexerName, String input, boolean showDFA) { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, null, lexerName, "-no-listener"); + assertTrue(success); + writeFile(tmpdir, "input", input); + writeLexerTestFile(lexerName, showDFA); + writeFile(tmpdir, "package.json", "{\"type\": \"module\"}"); + String output = execModule("Test.js"); + if ( output!=null && output.length()==0 ) { + output = null; + } + return output; + } + + @Override + public String execParser(String grammarFileName, String grammarStr, + String parserName, String lexerName, String listenerName, + String visitorName, String startRuleName, String input, + boolean showDiagnosticErrors) + { + boolean success = rawGenerateAndBuildRecognizer(grammarFileName, + grammarStr, parserName, lexerName, "-visitor"); + assertTrue(success); + writeFile(tmpdir, "input", input); + rawBuildRecognizerTestFile(parserName, lexerName, listenerName, + visitorName, startRuleName, showDiagnosticErrors); + writeFile(tmpdir, "package.json", "{\"type\": \"module\"}"); + return execRecognizer(); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, String parserName, String lexerName, + String... extraOptions) { + return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, + parserName, lexerName, false, extraOptions); + } + + /** Return true if all is well */ + protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, + String grammarStr, String parserName, String lexerName, + boolean defaultListener, String... extraOptions) { + ErrorQueue equeue = antlrOnString(getTmpDir(), "JavaScript", grammarFileName, grammarStr, + defaultListener, extraOptions); + if (!equeue.errors.isEmpty()) { + return false; + } + + List files = new ArrayList(); + if (lexerName != null) { + files.add(lexerName + ".js"); + } + if (parserName != null) { + files.add(parserName + ".js"); + Set optionsSet = new HashSet( + Arrays.asList(extraOptions)); + if (!optionsSet.contains("-no-listener")) { + files.add(grammarFileName.substring(0, + grammarFileName.lastIndexOf('.')) + + "Listener.js"); + } + if (optionsSet.contains("-visitor")) { + files.add(grammarFileName.substring(0, + grammarFileName.lastIndexOf('.')) + + "Visitor.js"); + } + } + return true; // allIsWell: no compile + } + + protected void rawBuildRecognizerTestFile(String parserName, + String lexerName, String listenerName, String visitorName, + String parserStartRuleName, boolean debug) { + this.stderrDuringParse = null; + if (parserName == null) { + writeLexerTestFile(lexerName, false); + } + else { + writeParserTestFile(parserName, lexerName, listenerName, + visitorName, parserStartRuleName, debug); + } + } + + public String execRecognizer() { + return execModule("Test.js"); + } + + public String execModule(String fileName) { + try { + String npmPath = locateNpm(); + if(!TestContext.isTravisCI()) { + installRuntime(npmPath); + registerRuntime(npmPath); + } + String modulePath = new File(new File(tmpdir), fileName) + .getAbsolutePath(); + linkRuntime(npmPath); + String nodejsPath = locateNodeJS(); + String inputPath = new File(new File(tmpdir), "input") + .getAbsolutePath(); + ProcessBuilder builder = new ProcessBuilder(nodejsPath, modulePath, + inputPath); + builder.environment().put("NODE_PATH", tmpdir); + builder.directory(new File(tmpdir)); + Process process = builder.start(); + StreamVacuum stdoutVacuum = new StreamVacuum( + process.getInputStream()); + StreamVacuum stderrVacuum = new StreamVacuum( + process.getErrorStream()); + stdoutVacuum.start(); + stderrVacuum.start(); + // TODO switch to jdk 8 + process.waitFor(); + // if(!process.waitFor(1L, TimeUnit.MINUTES)) + // process.destroyForcibly(); + stdoutVacuum.join(); + stderrVacuum.join(); + String output = stdoutVacuum.toString(); + if ( output.length()==0 ) { + output = null; + } + if (stderrVacuum.toString().length() > 0) { + this.stderrDuringParse = stderrVacuum.toString(); + } + return output; + } catch (Exception e) { + System.err.println("can't exec recognizer"); + e.printStackTrace(System.err); + System.err.println(); + return null; + } + } + + private void installRuntime(String npmPath) throws IOException, InterruptedException { + String runtimePath = locateRuntime(); + ProcessBuilder builder = new ProcessBuilder(npmPath, "install"); + builder.directory(new File(runtimePath)); + builder.redirectError(new File(tmpdir, "error.txt")); + builder.redirectOutput(new File(tmpdir, "output.txt")); + Process process = builder.start(); + // TODO switch to jdk 8 + process.waitFor(); + // if(!process.waitFor(30L, TimeUnit.SECONDS)) + // process.destroyForcibly(); + int error = process.exitValue(); + if(error!=0) + throw new IOException("'npm install' failed"); + } + + private void registerRuntime(String npmPath) throws IOException, InterruptedException { + String runtimePath = locateRuntime(); + ProcessBuilder builder = new ProcessBuilder(npmPath, "link"); + builder.directory(new File(runtimePath)); + builder.redirectError(new File(tmpdir, "error.txt")); + builder.redirectOutput(new File(tmpdir, "output.txt")); + Process process = builder.start(); + // TODO switch to jdk 8 + process.waitFor(); + // if(!process.waitFor(30L, TimeUnit.SECONDS)) + // process.destroyForcibly(); + int error = process.exitValue(); + if(error!=0) + throw new IOException("'npm link' failed"); + } + + private void linkRuntime(String npmPath) throws IOException, InterruptedException { + ProcessBuilder builder = new ProcessBuilder(npmPath, "link", "antlr4"); + builder.directory(new File(tmpdir)); + builder.redirectError(new File(tmpdir, "error.txt")); + builder.redirectOutput(new File(tmpdir, "output.txt")); + Process process = builder.start(); + // TODO switch to jdk 8 + process.waitFor(); + // if(!process.waitFor(30L, TimeUnit.SECONDS)) + // process.destroyForcibly(); + int error = process.exitValue(); + if(error!=0) + throw new IOException("'npm link antlr4' failed"); + } + + private boolean canExecute(String tool) { + try { + ProcessBuilder builder = new ProcessBuilder(tool, "--version"); + builder.redirectErrorStream(true); + Process process = builder.start(); + StreamVacuum vacuum = new StreamVacuum(process.getInputStream()); + vacuum.start(); + // TODO switch to jdk 8 + process.waitFor(); + // if(!process.waitFor(30L, TimeUnit.SECONDS)) + // process.destroyForcibly(); + vacuum.join(); + return process.exitValue() == 0; + } catch (Exception e) { + return false; + } + } + + private String locateNpm() { + // typically /usr/local/bin/npm + String prop = System.getProperty("antlr-javascript-npm"); + if ( prop!=null && prop.length()!=0 ) { + return prop; + } + return "npm"; // everywhere + } + + private String locateNodeJS() { + // typically /usr/local/bin/node + String prop = System.getProperty("antlr-javascript-nodejs"); + if ( prop!=null && prop.length()!=0 ) { + return prop; + } + if (canExecute("nodejs")) { + return "nodejs"; // nodejs on Debian without node-legacy package + } + return "node"; // everywhere else + } + + private String locateRuntime() { + final ClassLoader loader = Thread.currentThread().getContextClassLoader(); + final URL runtimeSrc = loader.getResource("JavaScript"); + if ( runtimeSrc==null ) { + throw new RuntimeException("Cannot find JavaScript runtime"); + } + if(isWindows()){ + return runtimeSrc.getPath().replaceFirst("/", ""); + } + return runtimeSrc.getPath(); + } + + private boolean isWindows() { + return System.getProperty("os.name").toLowerCase().contains("windows"); + } + + + protected void writeParserTestFile(String parserName, String lexerName, + String listenerName, String visitorName, + String parserStartRuleName, boolean debug) { + ST outputFileST = new ST( + "import antlr4 from 'antlr4';\n" + + "import from './.js';\n" + + "import from './.js';\n" + + "import from './.js';\n" + + "import from './.js';\n" + + "\n" + + "class TreeShapeListener extends antlr4.tree.ParseTreeListener {\n" + + " enterEveryRule(ctx) {\n" + + " for (let i = 0; i \\< ctx.getChildCount; i++) {\n" + + " const child = ctx.getChild(i)\n" + + " const parent = child.parentCtx\n" + + " if (parent.getRuleContext() !== ctx || !(parent instanceof antlr4.tree.RuleNode)) {\n" + + " throw `Invalid parse tree shape detected.`\n" + + " }\n" + + " }\n" + + " }\n" + + "}\n" + + "\n" + + "function main(argv) {\n" + + " var input = new antlr4.FileStream(argv[2], true);\n" + + " var lexer = new (input);\n" + + " var stream = new antlr4.CommonTokenStream(lexer);\n" + + "" + + " parser.buildParseTrees = true;\n" + + " const printer = function() {\n" + + " this.println = function(s) { console.log(s); }\n" + + " this.print = function(s) { process.stdout.write(s); }\n" + + " return this;\n" + + " };\n" + + " parser.printer = new printer();\n" + + " var tree = parser.();\n" + + " antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + + "}\n" + "\n" + "main(process.argv);\n" + "\n"); + ST createParserST = new ST( + " var parser = new (stream);\n"); + if (debug) { + createParserST = new ST( + " var parser = new (stream);\n" + + " parser.addErrorListener(new antlr4.error.DiagnosticErrorListener());\n"); + } + outputFileST.add("createParser", createParserST); + outputFileST.add("parserName", parserName); + outputFileST.add("lexerName", lexerName); + outputFileST.add("listenerName", listenerName); + outputFileST.add("visitorName", visitorName); + outputFileST.add("parserStartRuleName", parserStartRuleName); + writeFile(tmpdir, "Test.js", outputFileST.render()); + } + + protected void writeLexerTestFile(String lexerName, boolean showDFA) { + ST outputFileST = new ST( + "import antlr4 from 'antlr4';\n" + + "import from './.js';\n" + + "\n" + + "function main(argv) {\n" + + " var input = new antlr4.FileStream(argv[2], true);\n" + + " var lexer = new (input);\n" + + " var stream = new antlr4.CommonTokenStream(lexer);\n" + + " stream.fill();\n" + + " for(var i=0; i\\ 0) + doErase = Boolean.getBoolean(prop); + if (doErase) { + File tmpdirF = new File(tmpdir); + if (tmpdirF.exists()) { + eraseFiles(tmpdirF); + tmpdirF.delete(); + } + } + } + + + /** Sort a list */ + public > List sort(List data) { + List dup = new ArrayList(data); + Collections.sort(dup); + return dup; + } + + /** Return map sorted by key */ + public , V> LinkedHashMap sort( + Map data) { + LinkedHashMap dup = new LinkedHashMap(); + List keys = new ArrayList(data.keySet()); + Collections.sort(keys); + for (K k : keys) { + dup.put(k, data.get(k)); + } + return dup; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java index 92b094574..f7b5fe339 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeLexers.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java index 9662ab087..9d9a35c0d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestCompositeParsers.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java index 8978ecddf..1aed8bcb7 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestFullContextParsing.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java index 27182e063..ab892d92f 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLeftRecursion.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java index b4ee553ad..d4bb1b812 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerErrors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java index 0a9bd715e..8f6401dc2 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestLexerExec.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java index d4316f56d..7e429a063 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestListeners.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java index 6884ac3b1..b69f90f41 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParseTrees.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java index e3aa09789..8412327ae 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserErrors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java index 3845a1f14..04838a139 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestParserExec.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java index d053e85f5..038d28a72 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestPerformance.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java index 3f591fd61..ffc4b5434 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalLexer.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java index 2dd349614..01b5b95da 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSemPredEvalParser.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java similarity index 94% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java index 2d11a0a60..8036d6956 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/TestSets.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java @@ -4,7 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ -package org.antlr.v4.test.runtime.javascript.node; +package org.antlr.v4.test.runtime.javascript; import org.antlr.v4.test.runtime.BaseRuntimeTest; import org.antlr.v4.test.runtime.RuntimeTestDescriptor; diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java deleted file mode 100644 index 3796e5154..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/browser/BaseBrowserTest.java +++ /dev/null @@ -1,967 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.browser; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.eclipse.jetty.server.Handler; -import org.eclipse.jetty.server.Server; -import org.eclipse.jetty.server.handler.DefaultHandler; -import org.eclipse.jetty.server.handler.HandlerList; -import org.eclipse.jetty.server.handler.ResourceHandler; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.openqa.selenium.By.ById; -import org.openqa.selenium.WebDriver; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.net.BindException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public abstract class BaseBrowserTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName()); - - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String httpdir = null; - public String tmpdir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @org.junit.Rule - public final TestRule testWatcher = new TestWatcher() { - - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - eraseTempDir(); - } - - }; - - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String prop = System.getProperty("antlr-javascript-test-dir"); - if(prop!=null && prop.length()>0) { - httpdir = prop; - } - else { - httpdir = new File(System.getProperty("java.io.tmpdir"), getClass().getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()).getAbsolutePath(); - } - File dir = new File(httpdir); - if(dir.exists()) - this.eraseFiles(dir); - tmpdir = new File(httpdir, "parser").getAbsolutePath(); - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - return null; - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] {"-o", tmpdir}); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if ( g.isLexer() ) { - f = new LexerATNFactory((LexerGrammar)g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if ( expecting!=null && !expecting.trim().isEmpty() ) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) - { - LexerATNSimulator interp = new LexerATNSimulator(atn,new DFA[] { new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE)) },null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype == Token.EOF ) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if ( t==IntStream.EOF ) { - hitEOF = true; - } - } while ( ttype!=Token.EOF ); - return tokenTypes; - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeLexerTestFile(lexerName, showDFA); - String output = null; - try { - output = execHtmlPage("Test.html", input); - } - catch (Exception e) { - e.printStackTrace(System.err); - } - return output; - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - showDiagnosticErrors); - String result = null; - try { - result = execRecognizer(input); - } - catch (Exception e) { - e.printStackTrace(System.err); - } - return result; - } - - @Override - public void testTearDown() throws Exception { - - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlrOnString(getTmpDir(), "JavaScript", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".js"); - } - if ( parserName!=null ) { - files.add(parserName+".js"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.js"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.js"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, boolean debug) - { - this.stderrDuringParse = null; - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug); - } - } - - public String execRecognizer(String input) throws Exception { - return execHtmlPage("Test.html", input); - } - - static int httpPort = 8080; - - class ServerThread extends Thread { - - Server server; - String runtimePath; - String fileName; - Exception ex; - - public ServerThread(String fileName) { - this.runtimePath = locateRuntime(); - this.fileName = fileName; - } - - @Override - public void run() { - try { - Server server = new Server(httpPort); - ResourceHandler rh1 = new ResourceHandler(); - rh1.setDirectoriesListed(false); - rh1.setResourceBase(httpdir); - rh1.setWelcomeFiles(new String[] { fileName }); - ResourceHandler rh2 = new ResourceHandler(); - rh2.setDirectoriesListed(false); - rh2.setResourceBase(runtimePath); - HandlerList handlers = new HandlerList(); - handlers.setHandlers(new Handler[] { rh1, rh2, new DefaultHandler() }); - server.setHandler(handlers); - server.start(); - this.server = server; - this.server.join(); - } catch(BindException e) { - httpPort++; - run(); - } catch (Exception e) { - ex = e; - } - } - } - - protected static WebDriver driver; - - public String execHtmlPage(String fileName, String input) throws Exception { - // 'file' protocol is not supported by Selenium drivers - // so we run an embedded Jetty server - ServerThread thread = new ServerThread(fileName); - thread.start(); - try { - while(thread.server==null && thread.ex==null) - Thread.sleep(10); - if(thread.ex!=null) - throw thread.ex; - while(thread.server.isStarting()) - Thread.sleep(10); - Thread.sleep(400); // despite all the above precautions, driver.get often fails if you don't give time to Jetty - driver.get("http://localhost:" + httpPort + "/" + fileName); - driver.findElement(new ById("input")).clear(); - driver.findElement(new ById("output")).clear(); - driver.findElement(new ById("errors")).clear(); - driver.navigate().refresh(); - driver.findElement(new ById("input")).sendKeys(input); - driver.findElement(new ById("load")).click(); - driver.findElement(new ById("submit")).click(); - String errors = driver.findElement(new ById("errors")).getAttribute("value"); - if(errors!=null && errors.length()>0) { - this.stderrDuringParse = errors; - System.err.print(errors); - } - String value = driver.findElement(new ById("output")).getAttribute("value"); - // mimic stdout which adds a NL - if(value.length()>0 && !value.endsWith("\n")) - value = value + "\n"; - return value; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } finally { - if(thread.server!=null) { - thread.server.stop(); - while(!thread.server.isStopped()) - Thread.sleep(10); - Thread.sleep(100); // ensure the port is freed - } - } - return null; - } - - private String locateRuntime() { - String propName = "antlr-javascript-runtime"; - String prop = System.getProperty(propName); - if(prop==null || prop.length()==0) - prop = "../runtime/JavaScript/src"; - File file = new File(prop); - System.out.println(file.getAbsolutePath()); - if(!file.exists()) - throw new RuntimeException("Missing system property:" + propName); - return file.getAbsolutePath(); - } - - List getMessagesOfType(List msgs, Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if ( m.getClass() == c ) filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, - ANTLRMessage expectedMessage) - throws Exception - { - //System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertTrue("no error; "+expectedMessage.getErrorType()+" expected", !equeue.errors.isEmpty()); - assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1); - assertNotNull("couldn't find expected error: "+expectedMessage.getErrorType(), foundMsg); - /* - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { super(src); } - Set hide = new HashSet(); - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if ( hide.contains(t.getType()) ) { - ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, boolean debug) { - String html = "\r\n" + - "\r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - " \r\n" + - "\r\n"; - writeFile(httpdir, "Test.html", html); - }; - - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - String html = "\r\n" + - "\r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - " \r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - "
\r\n" + - " \r\n" + - "\r\n"; - writeFile(httpdir, "Test.html", html); - } - - public void writeRecognizer(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - if ( parserName==null ) - writeLexerTestFile(lexerName, debug); - else - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug); - } - - - protected void eraseFiles(final String filesEndingWith) { - File tmpdirF = new File(httpdir); - String[] files = tmpdirF.list(); - for(int i = 0; files!=null && i < files.length; i++) { - if ( files[i].endsWith(filesEndingWith) ) { - new File(httpdir+"/"+files[i]).delete(); - } - } - } - - protected void eraseFiles(File dir) { - String[] files = dir.list(); - for(int i = 0; files!=null && i < files.length; i++) { - new File(dir,files[i]).delete(); - } - } - - @Override - public void eraseTempDir() { - boolean doErase = true; - String propName = "antlr-javascript-erase-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) - doErase = Boolean.getBoolean(prop); - if(doErase) { - File tmpdirF = new File(httpdir); - if ( tmpdirF.exists() ) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if ( this.stderrDuringParse ==null ) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix="Exception in thread \"main\" "; - return lines[0].substring(prefix.length(),lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable - * we cannot rely on the output order, as the hashing algorithm or other aspects - * of the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a - * bit of a hack, but guarantees that we get the same order on all systems. We assume that - * the keys are strings. - * - * @param m The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p=0; - public IntTokenStream(IntegerList types) { this.types = types; } - - @Override - public void consume() { p++; } - - @Override - public int LA(int i) { return LT(i).getType(); } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { return p; } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if ( rawIndex>=types.size() ) t = new CommonToken(Token.EOF); - else t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java deleted file mode 100644 index 93733ca8b..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/BaseChromeTest.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -public class BaseChromeTest extends BaseBrowserTest { - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java deleted file mode 100644 index 0078561e5..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/SharedWebDriver.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.chrome.ChromeDriver; - -import java.io.File; -import java.net.URL; -import java.util.Timer; -import java.util.TimerTask; - -import static org.junit.Assert.assertTrue; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - String path = SharedWebDriver.class.getPackage().getName().replace(".", "/") + "/chromedriver.bin"; - URL url = Thread.currentThread().getContextClassLoader().getResource(path); - File file = new File(url.toExternalForm().substring(5)); // skip 'file:' - assertTrue(file.exists()); - System.setProperty("webdriver.chrome.driver", file.getAbsolutePath()); - driver = new ChromeDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java deleted file mode 100644 index 2249599dd..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java deleted file mode 100644 index 1786d2a56..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java deleted file mode 100644 index 12d8ec76d..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java deleted file mode 100644 index 89cdfb40e..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java deleted file mode 100644 index c8e2ff6a3..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java deleted file mode 100644 index 44aa4e9ed..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/chrome/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.chrome; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseChromeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Chrome"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java deleted file mode 100644 index 3345166ac..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/BaseExplorerTest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.After; -import org.junit.Before; -import org.openqa.selenium.ie.InternetExplorerDriver; - -public class BaseExplorerTest extends BaseBrowserTest { - - @Before - public void initWebDriver() { - System.setProperty("webdriver.ie.driver", "C:\\Program Files (x86)\\Selenium\\IEDriverServer.exe"); - driver = new InternetExplorerDriver(); - } - - @After - public void closeWebDriver() { - if(driver!=null) { - driver.quit(); - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java deleted file mode 100644 index 202c17971..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java deleted file mode 100644 index 0f6cd0274..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java deleted file mode 100644 index 4e7522785..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java deleted file mode 100644 index 1ca62e783..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java deleted file mode 100644 index c12edd0ec..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java deleted file mode 100644 index 25e6305b2..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java deleted file mode 100644 index 68b003cd4..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java deleted file mode 100644 index 9a524c54f..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java deleted file mode 100644 index 23526657f..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java deleted file mode 100644 index e412c9661..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java deleted file mode 100644 index 22ad655a8..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java deleted file mode 100644 index df359cd34..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/explorer/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.explorer; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseExplorerTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Explorer"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java deleted file mode 100644 index 2f43a7562..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/BaseFirefoxTest.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -public class BaseFirefoxTest extends BaseBrowserTest { - - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java deleted file mode 100644 index b9e1daf6e..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/SharedWebDriver.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.firefox.FirefoxDriver; - -import java.util.Timer; -import java.util.TimerTask; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - driver = new FirefoxDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java deleted file mode 100644 index a7b4a13cd..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java deleted file mode 100644 index 40aef5505..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java deleted file mode 100644 index 29679cd3b..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java deleted file mode 100644 index 9fd9fac5d..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java deleted file mode 100644 index b7b04e837..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java deleted file mode 100644 index 5fa612d6b..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java deleted file mode 100644 index af8dbeb36..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java deleted file mode 100644 index cf5aaf9a5..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java deleted file mode 100644 index 3e16eae7d..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java deleted file mode 100644 index 5a967c307..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/firefox/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.firefox; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseFirefoxTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Firefox"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java deleted file mode 100644 index 7246f4ec2..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/node/BaseNodeTest.java +++ /dev/null @@ -1,906 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.node; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonToken; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.IntStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.WritableToken; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestSupport; -import org.antlr.v4.test.runtime.StreamVacuum; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DOTGenerator; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import java.io.File; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class BaseNodeTest implements RuntimeTestSupport { - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - // private static final Logger LOGGER = - // Logger.getLogger(BaseTest.class.getName()); - - public static final String newline = System.getProperty("line.separator"); - public static final String pathSep = System.getProperty("path.separator"); - - public String tmpdir = null; - - /** - * If error during parser execution, store stderr here; can't return stdout - * and stderr. This doesn't trap errors from running antlr. - */ - protected String stderrDuringParse; - - /** Errors found while running antlr */ - protected StringBuilder antlrToolErrors; - - @Override - public void testSetUp() throws Exception { - // new output dir for each test - String prop = System.getProperty("antlr-javascript-test-dir"); - if (prop != null && prop.length() > 0) { - tmpdir = prop; - } - else { - tmpdir = new File(System.getProperty("java.io.tmpdir"), getClass() - .getSimpleName()+"-"+Thread.currentThread().getName()+"-"+System.currentTimeMillis()) - .getAbsolutePath(); - } - File dir = new File(tmpdir); - if (dir.exists()) - this.eraseFiles(dir); - antlrToolErrors = new StringBuilder(); - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public String getTmpDir() { - return tmpdir; - } - - @Override - public String getStdout() { - return null; - } - - @Override - public String getParseErrors() { - return stderrDuringParse; - } - - @Override - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected org.antlr.v4.Tool newTool(String[] args) { - Tool tool = new Tool(args); - return tool; - } - - protected Tool newTool() { - org.antlr.v4.Tool tool = new Tool(new String[] { "-o", tmpdir }); - return tool; - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if (g.atn == null) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f; - if (g.isLexer()) { - f = new LexerATNFactory((LexerGrammar) g); - } - else { - f = new ParserATNFactory(g); - } - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if (useSerializer) { - char[] serialized = ATNSerializer.getSerializedAsChars(atn); - return new ATNDeserializer().deserialize(serialized); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if (g.ast != null && !g.ast.hasErrors) { - System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if (g.getImportedGrammars() != null) { // process imported grammars - // (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - - IntegerList getTypesFromString(Grammar g, String expecting) { - IntegerList expectingTokenTypes = new IntegerList(); - if (expecting != null && !expecting.trim().isEmpty()) { - for (String tname : expecting.replace(" ", "").split(",")) { - int ttype = g.getTokenType(tname); - expectingTokenTypes.add(ttype); - } - } - return expectingTokenTypes; - } - - public IntegerList getTokenTypesViaATN(String input, - LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while (ttype != Token.EOF); - return tokenTypes; - } - - public List getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) { - LexerATNSimulator interp = new LexerATNSimulator(atn, - new DFA[] { new DFA( - atn.modeToStartState.get(Lexer.DEFAULT_MODE)) }, null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if (hitEOF) { - tokenTypes.add("EOF"); - break; - } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if (ttype == Token.EOF) { - tokenTypes.add("EOF"); - } - else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); - } - - if (t == IntStream.EOF) { - hitEOF = true; - } - } while (ttype != Token.EOF); - return tokenTypes; - } - - protected String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input, boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, null, lexerName, "-no-listener"); - assertTrue(success); - writeFile(tmpdir, "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.js"); - if ( output.length()==0 ) { - output = null; - } - return output; - } - - @Override - public String execParser(String grammarFileName, String grammarStr, - String parserName, String lexerName, String listenerName, - String visitorName, String startRuleName, String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, parserName, lexerName, "-visitor"); - assertTrue(success); - writeFile(tmpdir, "input", input); - rawBuildRecognizerTestFile(parserName, lexerName, listenerName, - visitorName, startRuleName, showDiagnosticErrors); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, - parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - boolean defaultListener, String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTmpDir(), "JavaScript", grammarFileName, grammarStr, - defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if (lexerName != null) { - files.add(lexerName + ".js"); - } - if (parserName != null) { - files.add(parserName + ".js"); - Set optionsSet = new HashSet( - Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Listener.js"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Visitor.js"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - this.stderrDuringParse = null; - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - public String execRecognizer() { - return execModule("Test.js"); - } - - public String execModule(String fileName) { - String nodejsPath = locateNodeJS(); - String runtimePath = locateRuntime(); - String modulePath = new File(new File(tmpdir), fileName) - .getAbsolutePath(); - String inputPath = new File(new File(tmpdir), "input") - .getAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder(nodejsPath, modulePath, - inputPath); - builder.environment().put("NODE_PATH", - runtimePath + File.pathSeparator + tmpdir); - builder.directory(new File(tmpdir)); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum( - process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum( - process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - this.stderrDuringParse = stderrVacuum.toString(); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - String[] roots = { "/usr/bin/", "/usr/local/bin/" }; - for (String root : roots) { - if (new File(root + tool).exists()) { - return root + tool; - } - } - return null; - } - - private boolean canExecute(String tool) { - try { - ProcessBuilder builder = new ProcessBuilder(tool, "--version"); - builder.redirectErrorStream(true); - Process process = builder.start(); - StreamVacuum vacuum = new StreamVacuum(process.getInputStream()); - vacuum.start(); - process.waitFor(); - vacuum.join(); - return process.exitValue() == 0; - } - catch (Exception e) { - ; - } - return false; - } - - private String locateNodeJS() { - // typically /usr/local/bin/node - String propName = "antlr-javascript-nodejs"; - String prop = System.getProperty(propName); - - if ( prop!=null && prop.length()!=0 ) { - return prop; - } - if (canExecute("nodejs")) { - return "nodejs"; // nodejs on Debian without node-legacy package - } - return "node"; // everywhere else - } - - private String locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("JavaScript/src"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find JavaScript runtime"); - } - if(isWindows()){ - return runtimeSrc.getPath().replaceFirst("/", ""); - } - return runtimeSrc.getPath(); - } - - private boolean isWindows() { - return System.getProperty("os.name").toLowerCase().contains("windows"); - } - - // void ambig(List msgs, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // ambig(msgs, 0, expectedAmbigAlts, expectedAmbigInput); - // } - - // void ambig(List msgs, int i, int[] expectedAmbigAlts, String - // expectedAmbigInput) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, AmbiguityMessage.class); - // AmbiguityMessage a = (AmbiguityMessage)amsgs.get(i); - // if ( a==null ) assertNull(expectedAmbigAlts); - // else { - // assertEquals(a.conflictingAlts.toString(), - // Arrays.toString(expectedAmbigAlts)); - // } - // assertEquals(expectedAmbigInput, a.input); - // } - - // void unreachable(List msgs, int[] expectedUnreachableAlts) - // throws Exception - // { - // unreachable(msgs, 0, expectedUnreachableAlts); - // } - - // void unreachable(List msgs, int i, int[] - // expectedUnreachableAlts) - // throws Exception - // { - // List amsgs = getMessagesOfType(msgs, - // UnreachableAltsMessage.class); - // UnreachableAltsMessage u = (UnreachableAltsMessage)amsgs.get(i); - // if ( u==null ) assertNull(expectedUnreachableAlts); - // else { - // assertEquals(u.conflictingAlts.toString(), - // Arrays.toString(expectedUnreachableAlts)); - // } - // } - - List getMessagesOfType(List msgs, - Class c) { - List filtered = new ArrayList(); - for (ANTLRMessage m : msgs) { - if (m.getClass() == c) - filtered.add(m); - } - return filtered; - } - - void checkRuleATN(Grammar g, String ruleName, String expecting) { - ParserATNFactory f = new ParserATNFactory(g); - ATN atn = f.createATN(); - - DOTGenerator dot = new DOTGenerator(g); - System.out - .println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = atn.ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - // System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, - String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if (g.ast != null && !g.ast.hasErrors) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if (g.isLexer()) - factory = new LexerATNFactory((LexerGrammar) g); - g.atn = factory.createATN(); - - CodeGenerator gen = new CodeGenerator(g); - ST outputFileST = gen.generateParser(); - String output = outputFileST.render(); - // System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start + b.length(), end); - assertEquals(expected, snippet); - } - if (equeue.size() > 0) { - System.err.println(equeue.toString()); - } - } - - protected void checkGrammarSemanticsError(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) throws Exception { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertNotNull("no error; " + expectedMessage.getErrorType() - + " expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), - Arrays.toString(foundMsg.getArgs())); - if (equeue.size() != 1) { - System.err.println(equeue); - } - } - - protected void checkError(ErrorQueue equeue, ANTLRMessage expectedMessage) - throws Exception { - // System.out.println("errors="+equeue); - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage m = equeue.errors.get(i); - if (m.getErrorType() == expectedMessage.getErrorType()) { - foundMsg = m; - } - } - assertTrue("no error; " + expectedMessage.getErrorType() + " expected", - !equeue.errors.isEmpty()); - assertTrue("too many errors; " + equeue.errors, - equeue.errors.size() <= 1); - assertNotNull( - "couldn't find expected error: " - + expectedMessage.getErrorType(), foundMsg); - /* - * assertTrue("error is not a GrammarSemanticsMessage", foundMsg - * instanceof GrammarSemanticsMessage); - */ - assertArrayEquals(expectedMessage.getArgs(), foundMsg.getArgs()); - } - - public static class FilteringTokenStream extends CommonTokenStream { - public FilteringTokenStream(TokenSource src) { - super(src); - } - - Set hide = new HashSet(); - - @Override - protected boolean sync(int i) { - if (!super.sync(i)) { - return false; - } - - Token t = get(i); - if (hide.contains(t.getType())) { - ((WritableToken) t).setChannel(Token.HIDDEN_CHANNEL); - } - - return true; - } - - public void setTokenTypeChannel(int ttype, int channel) { - hide.add(ttype); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - ST outputFileST = new ST( - "var antlr4 = require('antlr4');\n" - + "var = require('./');\n" - + "var = require('./');\n" - + "var = require('./').;\n" - + "var = require('./').;\n" - + "\n" - + "function TreeShapeListener() {\n" - + " antlr4.tree.ParseTreeListener.call(this);\n" - + " return this;\n" - + "}\n" - + "\n" - + "TreeShapeListener.prototype = Object.create(antlr4.tree.ParseTreeListener.prototype);\n" - + "TreeShapeListener.prototype.constructor = TreeShapeListener;\n" - + "\n" - + "TreeShapeListener.prototype.enterEveryRule = function(ctx) {\n" - + " for(var i=0;i\\.(input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + "" - + " parser.buildParseTrees = true;\n" - + " printer = function() {\n" - + " this.println = function(s) { console.log(s); }\n" - + " this.print = function(s) { process.stdout.write(s); }\n" - + " return this;\n" - + " };\n" - + " parser.printer = new printer();\n" - + " var tree = parser.();\n" - + " antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" - + "}\n" + "\n" + "main(process.argv);\n" + "\n"); - ST createParserST = new ST( - " var parser = new .(stream);\n"); - if (debug) { - createParserST = new ST( - " var parser = new .(stream);\n" - + " parser.addErrorListener(new antlr4.error.DiagnosticErrorListener());\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(tmpdir, "Test.js", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "var antlr4 = require('antlr4');\n" - + "var = require('./');\n" - + "\n" - + "function main(argv) {\n" - + " var input = new antlr4.FileStream(argv[2], true);\n" - + " var lexer = new .(input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + " stream.fill();\n" - + " for(var i=0; i\\ 0) - doErase = Boolean.getBoolean(prop); - if (doErase) { - File tmpdirF = new File(tmpdir); - if (tmpdirF.exists()) { - eraseFiles(tmpdirF); - tmpdirF.delete(); - } - } - } - - public String getFirstLineOfException() { - if (this.stderrDuringParse == null) { - return null; - } - String[] lines = this.stderrDuringParse.split("\n"); - String prefix = "Exception in thread \"main\" "; - return lines[0].substring(prefix.length(), lines[0].length()); - } - - /** - * When looking at a result set that consists of a Map/HashTable we cannot - * rely on the output order, as the hashing algorithm or other aspects of - * the implementation may be different on differnt JDKs or platforms. Hence - * we take the Map, convert the keys to a List, sort them and Stringify the - * Map, which is a bit of a hack, but guarantees that we get the same order - * on all systems. We assume that the keys are strings. - * - * @param m - * The Map that contains keys we wish to return in sorted order - * @return A string that represents all the keys in sorted order. - */ - public String sortMapToString(Map m) { - // Pass in crap, and get nothing back - // - if (m == null) { - return null; - } - - System.out.println("Map toString looks like: " + m.toString()); - - // Sort the keys in the Map - // - TreeMap nset = new TreeMap(m); - - System.out.println("Tree map looks like: " + nset.toString()); - return nset.toString(); - } - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - public void assertNotNullOrEmpty(String message, String text) { - assertNotNull(message, text); - assertFalse(message, text.isEmpty()); - } - - public void assertNotNullOrEmpty(String text) { - assertNotNull(text); - assertFalse(text.isEmpty()); - } - - public static class IntTokenStream implements TokenStream { - IntegerList types; - int p = 0; - - public IntTokenStream(IntegerList types) { - this.types = types; - } - - @Override - public void consume() { - p++; - } - - @Override - public int LA(int i) { - return LT(i).getType(); - } - - @Override - public int mark() { - return index(); - } - - @Override - public int index() { - return p; - } - - @Override - public void release(int marker) { - seek(marker); - } - - @Override - public void seek(int index) { - p = index; - } - - @Override - public int size() { - return types.size(); - } - - @Override - public String getSourceName() { - return null; - } - - @Override - public Token LT(int i) { - CommonToken t; - int rawIndex = p + i - 1; - if (rawIndex >= types.size()) - t = new CommonToken(Token.EOF); - else - t = new CommonToken(types.get(rawIndex)); - t.setTokenIndex(rawIndex); - return t; - } - - @Override - public Token get(int i) { - return new org.antlr.v4.runtime.CommonToken(types.get(i)); - } - - @Override - public TokenSource getTokenSource() { - return null; - } - - @Override - public String getText() { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Interval interval) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(RuleContext ctx) { - throw new UnsupportedOperationException("can't give strings"); - } - - @Override - public String getText(Token start, Token stop) { - throw new UnsupportedOperationException("can't give strings"); - } - } - - /** Sort a list */ - public > List sort(List data) { - List dup = new ArrayList(); - dup.addAll(data); - Collections.sort(dup); - return dup; - } - - /** Return map sorted by key */ - public , V> LinkedHashMap sort( - Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(); - keys.addAll(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java deleted file mode 100644 index dcdc31bf7..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/BaseSafariTest.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.javascript.browser.BaseBrowserTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/* see https://code.google.com/p/selenium/wiki/SafariDriver for instructions */ -public class BaseSafariTest extends BaseBrowserTest { - - @BeforeClass - public static void initWebDriver() { - driver = SharedWebDriver.init(); - } - - @AfterClass - public static void closeWebDriver() { - SharedWebDriver.close(); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java deleted file mode 100644 index d0efb37dc..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/SharedWebDriver.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.openqa.selenium.WebDriver; -import org.openqa.selenium.safari.SafariDriver; - -import java.util.Timer; -import java.util.TimerTask; - -public class SharedWebDriver { - - static WebDriver driver; - static Timer timer; - - public static WebDriver init() { - if(driver==null) { - System.setProperty("webdriver.safari.noinstall", "true"); - driver = new SafariDriver(); - } else if(timer!=null) { - timer.cancel(); - timer = null; - } - - return driver; - } - - public static void close() { - if(driver!=null) { - if(timer!=null) { - timer.cancel(); - timer = null; - } - timer = new Timer(); - timer.schedule(new TimerTask() { - @Override public void run() { - driver.quit(); - driver = null; - } - }, 2000); // close with delay to allow next Test to start - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java deleted file mode 100644 index b426850d2..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java deleted file mode 100644 index 7bfe1310f..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java deleted file mode 100644 index ac0842702..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java deleted file mode 100644 index 526254436..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java deleted file mode 100644 index 3e3f352fa..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java deleted file mode 100644 index da29e2de7..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java deleted file mode 100644 index 6d8c0e11b..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor, new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java deleted file mode 100644 index 3e75cf015..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java deleted file mode 100644 index 52346a939..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java deleted file mode 100644 index 75a06c1cb..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java deleted file mode 100644 index 1a09a64c7..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java deleted file mode 100644 index 24d1e7bfe..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalLexer.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java deleted file mode 100644 index 3387a6d49..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java deleted file mode 100644 index b3cd0237f..000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/safari/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript.safari; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.antlr.v4.test.runtime.descriptors.SetsDescriptors; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSafariTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Safari"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java index f9e037fe8..44b0c1294 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java @@ -19,8 +19,8 @@ public class BasePython3Test extends BasePythonTest { @Override protected String getPythonExecutable() { - return "python3.6"; - } // force 3.6 + return "python3.7"; + } // force 3.7 @Override protected void writeLexerTestFile(String lexerName, boolean showDFA) { diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java index 34bb56ff7..2e71deb9b 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java @@ -57,12 +57,23 @@ public class BaseSwiftTest implements RuntimeTestSupport { throw new RuntimeException("Swift runtime file not found at:" + swiftRuntime.getPath()); } ANTLR_RUNTIME_PATH = swiftRuntime.getPath(); - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "build"); + try { + fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "build"); + } + catch (IOException | InterruptedException e) { + e.printStackTrace(); + throw new RuntimeException(e); + } // shutdown logic Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "package", "clean"); + try { + fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "package", "clean"); + } + catch (IOException | InterruptedException e) { + e.printStackTrace(); + } } }); } @@ -145,8 +156,14 @@ public class BaseSwiftTest implements RuntimeTestSupport { String projectName = "testcase-" + System.currentTimeMillis(); String projectDir = getTmpDir() + "/" + projectName; - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); + try { + buildProject(projectDir, projectName); + return execTest(projectDir, projectName); + } + catch (IOException | InterruptedException e) { + e.printStackTrace(); + return null; + } } @Override @@ -183,7 +200,7 @@ public class BaseSwiftTest implements RuntimeTestSupport { Collections.addAll(this.sourceFiles, files); } - private void buildProject(String projectDir, String projectName) { + private void buildProject(String projectDir, String projectName) throws IOException, InterruptedException { mkdir(projectDir); fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable"); for (String sourceFile: sourceFiles) { @@ -191,20 +208,16 @@ public class BaseSwiftTest implements RuntimeTestSupport { fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName); } fastFailRunProcess(getTmpDir(), "mv", "-f", "input", projectDir); - - try { - String dylibPath = ANTLR_RUNTIME_PATH + "/.build/debug/"; - Pair buildResult = runProcess(projectDir, SWIFT_CMD, "build", - "-Xswiftc", "-I"+dylibPath, - "-Xlinker", "-L"+dylibPath, - "-Xlinker", "-lAntlr4", - "-Xlinker", "-rpath", - "-Xlinker", dylibPath); - if (buildResult.b.length() > 0) { - throw new RuntimeException("unit test build failed: " + buildResult.a + "\n" + buildResult.b); - } - } catch (IOException | InterruptedException e) { - e.printStackTrace(); + String dylibPath = ANTLR_RUNTIME_PATH + "/.build/debug/"; +// System.err.println(dylibPath); + Pair buildResult = runProcess(projectDir, SWIFT_CMD, "build", + "-Xswiftc", "-I"+dylibPath, + "-Xlinker", "-L"+dylibPath, + "-Xlinker", "-lAntlr4", + "-Xlinker", "-rpath", + "-Xlinker", dylibPath); + if (buildResult.b.length() > 0) { + throw new IOException("unit test build failed: " + buildResult.a + "\n" + buildResult.b); } } @@ -214,20 +227,22 @@ public class BaseSwiftTest implements RuntimeTestSupport { StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); stdoutVacuum.start(); stderrVacuum.start(); - process.waitFor(); + int status = process.waitFor(); stdoutVacuum.join(); stderrVacuum.join(); + if (status != 0) { + throw new IOException("Process exited with status " + status + ":\n" + stdoutVacuum.toString() + "\n" + stderrVacuum.toString()); + } return new Pair<>(stdoutVacuum.toString(), stderrVacuum.toString()); } - private static void fastFailRunProcess(String workingDir, String... command) { + private static void fastFailRunProcess(String workingDir, String... command) throws IOException, InterruptedException { ProcessBuilder builder = new ProcessBuilder(command); builder.directory(new File(workingDir)); - try { - Process p = builder.start(); - p.waitFor(); - } catch (Exception e) { - e.printStackTrace(); + Process p = builder.start(); + int status = p.waitFor(); + if (status != 0) { + throw new IOException("Process exited with status " + status); } } @@ -251,8 +266,14 @@ public class BaseSwiftTest implements RuntimeTestSupport { addSourceFiles("main.swift"); String projectName = "testcase-" + System.currentTimeMillis(); String projectDir = getTmpDir() + "/" + projectName; - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); + try { + buildProject(projectDir, projectName); + return execTest(projectDir, projectName); + } + catch (IOException | InterruptedException e) { + e.printStackTrace(); + return null; + } } private void writeParserTestFile(String parserName, diff --git a/runtime/CSharp/README.md b/runtime/CSharp/README.md index d272a6a06..94ea0440a 100644 --- a/runtime/CSharp/README.md +++ b/runtime/CSharp/README.md @@ -43,7 +43,7 @@ See the docs and the book to learn about writing lexer and parser grammars. ### Step 4: Generate the C# code This can be done either from the cmd line, or by adding a custom pre-build command in your project. -At minimal, the cmd line should look as follows: ``java -jar antlr4-4.7.2.jar -Dlanguage=CSharp grammar.g4`` +At minimal, the cmd line should look as follows: ``java -jar antlr4-4.8.jar -Dlanguage=CSharp grammar.g4`` This will generate the files, which you can then integrate in your project. This is just a quick start. The tool has many useful options to control generation, please refer to its documentation. diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj index 23cbfa49f..50814fc50 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Antlr4.Runtime.dotnet.csproj @@ -1,7 +1,7 @@  The ANTLR Organization - 4.7.2 + 4.8 en-US netstandard1.3;net35 $(NoWarn);CS1591;CS1574;CS1580 diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs index a35dc3452..023dd671e 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/AntlrInputStream.cs @@ -48,8 +48,7 @@ namespace Antlr4.Runtime System.Diagnostics.Debug.Assert(LA(1) == IntStreamConstants.EOF); throw new InvalidOperationException("cannot consume EOF"); } - //System.out.println("prev p="+p+", c="+(char)data[p]); - if (p < n) + else { p++; } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs index 2f8f1bb5d..e162dcaca 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/SingletonPredictionContext.cs @@ -79,7 +79,7 @@ namespace Antlr4.Runtime.Atn return false; } Antlr4.Runtime.Atn.SingletonPredictionContext other = (Antlr4.Runtime.Atn.SingletonPredictionContext)o; - return returnState == other.returnState && parent.Equals(other.parent); + return returnState == other.returnState && (parent != null && parent.Equals(other.parent)); } public override string ToString() diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs index b13919923..130e00418 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Parser.cs @@ -22,20 +22,20 @@ namespace Antlr4.Runtime #if !PORTABLE public class TraceListener : IParseTreeListener { - private readonly TextWriter Output; - public TraceListener(TextWriter output) { - Output = output; + public TraceListener(TextWriter output,Parser enclosing) { + _output = output; + _enclosing = enclosing; } public virtual void EnterEveryRule(ParserRuleContext ctx) { - Output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); + _output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); } public virtual void ExitEveryRule(ParserRuleContext ctx) { - Output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); + _output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text); } public virtual void VisitErrorNode(IErrorNode node) @@ -46,15 +46,17 @@ namespace Antlr4.Runtime { ParserRuleContext parent = (ParserRuleContext)((IRuleNode)node.Parent).RuleContext; IToken token = node.Symbol; - Output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]); + _output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]); } internal TraceListener(Parser _enclosing) { this._enclosing = _enclosing; + _output = Console.Out; } private readonly Parser _enclosing; + private readonly TextWriter _output; } #endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs index 325bf9076..1a8459929 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs @@ -42,8 +42,8 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("4.7.2")] +[assembly: AssemblyVersion("4.8")] #if !COMPACT -[assembly: AssemblyFileVersion("4.7.2")] -[assembly: AssemblyInformationalVersion("4.7.2")] +[assembly: AssemblyFileVersion("4.8")] +[assembly: AssemblyInformationalVersion("4.8")] #endif diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs index 515c9c0f9..5f91e20f6 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/ParseTreeWalker.cs @@ -9,6 +9,16 @@ namespace Antlr4.Runtime.Tree { public static readonly ParseTreeWalker Default = new ParseTreeWalker(); + /// + /// Performs a walk on the given parse tree starting at the root and going down recursively + /// with depth-first search. On each node, + /// is called before + /// recursively walking down into child nodes, then + /// + /// is called after the recursive call to wind up. + /// + /// The listener used by the walker to process grammar rules + /// The parse tree to be walked on public virtual void Walk(IParseTreeListener listener, IParseTree t) { if (t is IErrorNode) @@ -35,13 +45,12 @@ namespace Antlr4.Runtime.Tree } /// - /// The discovery of a rule node, involves sending two events: the generic - /// - /// and a - /// - /// -specific event. First we trigger the generic and then - /// the rule specific. We to them in reverse order upon finishing the node. + /// Enters a grammar rule by first triggering the generic event + /// + /// then by triggering the event specific to the given parse tree node /// + /// The listener responding to the trigger events + /// The grammar rule containing the rule context protected internal virtual void EnterRule(IParseTreeListener listener, IRuleNode r) { ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; @@ -49,6 +58,13 @@ namespace Antlr4.Runtime.Tree ctx.EnterRule(listener); } + /// + /// Exits a grammar rule by first triggering the event specific to the given parse tree node + /// then by triggering the generic event + /// + /// + /// The listener responding to the trigger events + /// The grammar rule containing the rule context protected internal virtual void ExitRule(IParseTreeListener listener, IRuleNode r) { ParserRuleContext ctx = (ParserRuleContext)r.RuleContext; diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs index 0fd80c339..abe5a1239 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Tree/Xpath/XPathLexer.cs @@ -24,7 +24,7 @@ using Antlr4.Runtime.Atn; using Antlr4.Runtime.Misc; using DFA = Antlr4.Runtime.Dfa.DFA; -[System.CodeDom.Compiler.GeneratedCode("ANTLR", "4.7")] +[System.CodeDom.Compiler.GeneratedCode("ANTLR", "4.8")] [System.CLSCompliant(false)] public partial class XPathLexer : Lexer { diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 28b19a195..32b34bfee 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -19,7 +19,7 @@ if(NOT WITH_DEMO) FORCE) endif(NOT WITH_DEMO) -option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" On) +option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" Off) option(WITH_STATIC_CRT "(Visual C++) Enable to statically link CRT, which avoids requiring users to install the redistribution package. To disable with: -DWITH_STATIC_CRT=Off" On) @@ -71,6 +71,9 @@ else() set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") endif() +# Define USE_UTF8_INSTEAD_OF_CODECVT macro. +# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_UTF8_INSTEAD_OF_CODECVT") + # Initialize CXXFLAGS. if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0) set(CMAKE_CXX_STANDARD 11) @@ -122,7 +125,9 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCH endif() elseif(MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800) # Visual Studio 2012+ supports c++11 features -else () +elseif(CMAKE_SYSTEM_NAME MATCHES "Emscripten") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -stdlib=libc++") +else() message(FATAL_ERROR "Your C++ compiler does not support C++11.") endif() @@ -139,11 +144,12 @@ if (ANTLR4_INSTALL) include(CMakePackageConfigHelpers) if(NOT ANTLR4_CMAKE_DIR) - set(ANTLR4_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/antlr4 CACHE STRING + set(ANTLR4_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake CACHE STRING "Installation directory for cmake files." FORCE ) endif(NOT ANTLR4_CMAKE_DIR) - set(version_config ${PROJECT_BINARY_DIR}/antlr4-config-version.cmake) + set(version_runtime_config ${PROJECT_BINARY_DIR}/antlr4-runtime-config-version.cmake) + set(version_generator_config ${PROJECT_BINARY_DIR}/antlr4-generator-config-version.cmake) set(project_runtime_config ${PROJECT_BINARY_DIR}/antlr4-runtime-config.cmake) set(project_generator_config ${PROJECT_BINARY_DIR}/antlr4-generator-config.cmake) set(targets_export_name antlr4-targets) @@ -157,31 +163,39 @@ if (ANTLR4_INSTALL) configure_package_config_file( cmake/antlr4-runtime.cmake.in ${project_runtime_config} - INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR} + INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime PATH_VARS ANTLR4_INCLUDE_DIR ANTLR4_LIB_DIR ) -configure_package_config_file( + configure_package_config_file( cmake/antlr4-generator.cmake.in ${project_generator_config} - INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR} + INSTALL_DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-generator PATH_VARS ANTLR4_INCLUDE_DIR ANTLR4_LIB_DIR ) write_basic_package_version_file( - ${version_config} + ${version_runtime_config} + VERSION ${ANTLR_VERSION} + COMPATIBILITY SameMajorVersion ) + + write_basic_package_version_file( + ${version_generator_config} VERSION ${ANTLR_VERSION} COMPATIBILITY SameMajorVersion ) install(EXPORT ${targets_export_name} - DESTINATION ${ANTLR4_CMAKE_DIR} ) + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime ) install(FILES ${project_runtime_config} - ${project_generator_config} - ${version_config} - DESTINATION ${ANTLR4_CMAKE_DIR} ) + ${version_runtime_config} + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-runtime ) + + install(FILES ${project_generator_config} + ${version_generator_config} + DESTINATION ${ANTLR4_CMAKE_DIR}/antlr4-generator ) endif(ANTLR4_INSTALL) diff --git a/runtime/Cpp/README.md b/runtime/Cpp/README.md index 79bd27884..81d1f55ed 100644 --- a/runtime/Cpp/README.md +++ b/runtime/Cpp/README.md @@ -25,30 +25,30 @@ The C++ target has been the work of the following people: ## Project Status -* Building on OS X, Windows, Android and Linux +* Building on macOS, Windows, Android and Linux * No errors and warnings * Library linking -* Some unit tests in the OSX project, for important base classes with almost 100% code coverage. +* Some unit tests in the macOS project, for important base classes with almost 100% code coverage. * All memory allocations checked * Simple command line demo application working on all supported platforms. * All runtime tests pass. ### Build + Usage Notes -The minimum C++ version to compile the ANTLR C++ runtime with is C++11. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The OSX project contains a target for iOS and can also be built using cmake (instead of XCode). +The minimum C++ version to compile the ANTLR C++ runtime with is C++11. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The macOS project contains a target for iOS and can also be built using cmake (instead of XCode). Include the antlr4-runtime.h umbrella header in your target application to get everything needed to use the library. If you are compiling with cmake, the minimum version required is cmake 2.8. -#### Compiling on Windows with Visual Studio prior to 2017 -Simply open the VS solution (VS 2013+) and build it. +#### Compiling on Windows with Visual Studio using he Visual Studio projects +Simply open the VS project from the runtime folder (VS 2013+) and build it. -#### Compiling on Windows with Visual Studio VS2017 +#### Compiling on Windows using cmake with Visual Studio VS2017 and later Use the "Open Folder" Feature from the File->Open->Folder menu to open the runtime/Cpp directory. It will automatically use the CMake description to open up a Visual Studio Solution. -#### Compiling on OSX +#### Compiling on macOS Either open the included XCode project and build that or use the cmake compilation as described for linux. #### Compiling on Android diff --git a/runtime/Cpp/VERSION b/runtime/Cpp/VERSION index af9764a59..ef216a53f 100644 --- a/runtime/Cpp/VERSION +++ b/runtime/Cpp/VERSION @@ -1 +1 @@ -4.7.2 +4.8 diff --git a/runtime/Cpp/cmake/Antlr4Package.md b/runtime/Cpp/cmake/Antlr4Package.md index ac9480db2..17a630379 100644 --- a/runtime/Cpp/cmake/Antlr4Package.md +++ b/runtime/Cpp/cmake/Antlr4Package.md @@ -14,7 +14,7 @@ given input file during build. The following table lists the parameters that can be used with the function: Argument# | Required | Default | Use -----------|-----------|---------|-- +----------|-----------|---------|--- 0 | Yes | n/a | Unique target name. It is used to generate CMake Variables to reference the various outputs of the generation 1 | Yes | n/a | Input file containing the lexer/parser definition 2 | Yes | n/a | Type of Rules contained in the input: LEXER, PARSER or BOTH @@ -24,10 +24,10 @@ Argument# | Required | Default | Use 7 | No | none | Additional files on which the input depends 8 | No | none | Library path to use during generation -The `ANTLR4_JAR_LOCATION` CMake variable must be set to the location where the `antlr-4*-complete.jar` generator is located. -You can download the file from [here](http://www.antlr.org/download.html). +The `ANTLR4_JAR_LOCATION` CMake variable must be set to the location where the `antlr-4*-complete.jar` generator is located. You can download the file from [here](http://www.antlr.org/download.html). -Additional option to the ANTLR4 generator can be passed in the `ANTLR4_GENERATED_OPTIONS` variable +Additional options to the ANTLR4 generator can be passed in the `ANTLR4_GENERATED_OPTIONS` variable. Add the installation prefix of `antlr4-runtime` to `CMAKE_PREFIX_PATH` or set + `antlr4-runtime_DIR` to a directory containing the files. The following CMake variables are available following a call to `antlr4_generate` @@ -96,7 +96,7 @@ target_link_libraries( Parsertest PRIVATE find_package(antlr4-generator REQUIRED) # Set path to generator - set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.7.2-complete.jar) + set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.8-complete.jar) # generate lexer antlr4_generate( diff --git a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake index 675cbf579..db8ed6f40 100644 --- a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake +++ b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake @@ -38,7 +38,7 @@ else() set(ANTLR4_SHARED_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dll.a) set(ANTLR4_RUNTIME_LIBRARIES - ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.7.2.dll) + ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.8.dll) elseif(APPLE) set(ANTLR4_RUNTIME_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dylib) @@ -109,6 +109,12 @@ else() endif() # Seperate build step as rarely people want both +set(ANTLR4_BUILD_DIR ${ANTLR4_ROOT}) +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14.0") + # CMake 3.14 builds in above's SOURCE_SUBDIR when BUILD_IN_SOURCE is true + set(ANTLR4_BUILD_DIR ${ANTLR4_ROOT}/runtime/Cpp) +endif() + ExternalProject_Add_Step( antlr4_runtime build_static @@ -118,7 +124,7 @@ ExternalProject_Add_Step( DEPENDS antlr4_runtime BYPRODUCTS ${ANTLR4_STATIC_LIBRARIES} EXCLUDE_FROM_MAIN 1 - WORKING_DIRECTORY ${ANTLR4_ROOT}) + WORKING_DIRECTORY ${ANTLR4_BUILD_DIR}) ExternalProject_Add_StepTargets(antlr4_runtime build_static) add_library(antlr4_static STATIC IMPORTED) @@ -135,7 +141,7 @@ ExternalProject_Add_Step( DEPENDS antlr4_runtime BYPRODUCTS ${ANTLR4_SHARED_LIBRARIES} ${ANTLR4_RUNTIME_LIBRARIES} EXCLUDE_FROM_MAIN 1 - WORKING_DIRECTORY ${ANTLR4_ROOT}) + WORKING_DIRECTORY ${ANTLR4_BUILD_DIR}) ExternalProject_Add_StepTargets(antlr4_runtime build_shared) add_library(antlr4_shared SHARED IMPORTED) diff --git a/runtime/Cpp/cmake/FindANTLR.cmake b/runtime/Cpp/cmake/FindANTLR.cmake index 511064173..5ff866fc2 100644 --- a/runtime/Cpp/cmake/FindANTLR.cmake +++ b/runtime/Cpp/cmake/FindANTLR.cmake @@ -2,7 +2,7 @@ find_package(Java QUIET COMPONENTS Runtime) if(NOT ANTLR_EXECUTABLE) find_program(ANTLR_EXECUTABLE - NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.7.2-complete.jar) + NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.8-complete.jar) endif() if(ANTLR_EXECUTABLE AND Java_JAVA_EXECUTABLE) diff --git a/runtime/Cpp/cmake/README.md b/runtime/Cpp/cmake/README.md index e27c77e50..77e9da6c6 100644 --- a/runtime/Cpp/cmake/README.md +++ b/runtime/Cpp/cmake/README.md @@ -31,7 +31,7 @@ include_directories(${ANTLR4_INCLUDE_DIRS}) # set variable pointing to the antlr tool that supports C++ # this is not required if the jar file can be found under PATH environment -set(ANTLR_EXECUTABLE /home/user/antlr-4.7.2-complete.jar) +set(ANTLR_EXECUTABLE /home/user/antlr-4.8-complete.jar) # add macros to generate ANTLR Cpp code from grammar find_package(ANTLR REQUIRED) @@ -40,7 +40,8 @@ antlr_target(SampleGrammarLexer TLexer.g4 LEXER PACKAGE antlrcpptest) antlr_target(SampleGrammarParser TParser.g4 PARSER PACKAGE antlrcpptest - DEPENDS_ANTLR SampleGrammarLexer) + DEPENDS_ANTLR SampleGrammarLexer + COMPILE_FLAGS -lib ${ANTLR_SampleGrammarLexer_OUTPUT_DIR}) # include generated files in project environment include_directories(${ANTLR_SampleGrammarLexer_OUTPUT_DIR}) diff --git a/runtime/Cpp/demo/generate.cmd b/runtime/Cpp/demo/generate.cmd index 826c1f614..a7130c2b1 100644 --- a/runtime/Cpp/demo/generate.cmd +++ b/runtime/Cpp/demo/generate.cmd @@ -6,7 +6,7 @@ :: Download the ANLTR jar and place it in the same folder as this script (or adjust the LOCATION var accordingly). -set LOCATION=antlr-4.7.2-complete.jar +set LOCATION=antlr-4.8-complete.jar java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest -XdbgST TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Java -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 diff --git a/runtime/Cpp/deploy-macos.sh b/runtime/Cpp/deploy-macos.sh index 55528470f..cf977652d 100755 --- a/runtime/Cpp/deploy-macos.sh +++ b/runtime/Cpp/deploy-macos.sh @@ -4,9 +4,27 @@ rm -f -R antlr4-runtime build lib 2> /dev/null rm antlr4-cpp-runtime-macos.zip 2> /dev/null +# Get utf8 dependency. +mkdir -p runtime/thirdparty 2> /dev/null +pushd runtime/thirdparty +if [ ! -d utfcpp ] +then + git clone https://github.com/nemtrif/utfcpp.git utfcpp + pushd utfcpp + git checkout tags/v3.1.1 + popd +fi +popd + # Binaries -xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4 -configuration Release -xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4_static -configuration Release +xcodebuild -project runtime/antlrcpp.xcodeproj \ + -target antlr4 \ + # GCC_PREPROCESSOR_DEFINITIONS='$GCC_PREPROCESSOR_DEFINITIONS USE_UTF8_INSTEAD_OF_CODECVT' \ + -configuration Release +xcodebuild -project runtime/antlrcpp.xcodeproj \ + -target antlr4_static \ + # GCC_PREPROCESSOR_DEFINITIONS='$GCC_PREPROCESSOR_DEFINITIONS USE_UTF8_INSTEAD_OF_CODECVT' \ + -configuration Release rm -f -R lib mkdir lib mv runtime/build/Release/libantlr4-runtime.a lib/ @@ -17,6 +35,9 @@ rm -f -R antlr4-runtime pushd runtime/src find . -name '*.h' | cpio -pdm ../../antlr4-runtime popd +pushd runtime/thirdparty/utfcpp/source +find . -name '*.h' | cpio -pdm ../../../../antlr4-runtime +popd # Zip up and clean up zip -r antlr4-cpp-runtime-macos.zip antlr4-runtime lib diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd index 5660f26a2..8fc22ab5b 100644 --- a/runtime/Cpp/deploy-windows.cmd +++ b/runtime/Cpp/deploy-windows.cmd @@ -1,58 +1,81 @@ @echo off +setlocal + +if [%1] == [] goto Usage rem Clean left overs from previous builds if there are any if exist bin rmdir /S /Q runtime\bin if exist obj rmdir /S /Q runtime\obj if exist lib rmdir /S /Q lib if exist antlr4-runtime rmdir /S /Q antlr4-runtime -if exist antlr4-cpp-runtime-vs2013.zip erase antlr4-cpp-runtime-vs2013.zip -if exist antlr4-cpp-runtime-vs2015.zip erase antlr4-cpp-runtime-vs2015.zip +if exist antlr4-cpp-runtime-vs2017.zip erase antlr4-cpp-runtime-vs2017.zip +if exist antlr4-cpp-runtime-vs2019.zip erase antlr4-cpp-runtime-vs2019.zip rem Headers -xcopy runtime\src\*.h antlr4-runtime\ /s +echo Copying header files ... +xcopy runtime\src\*.h antlr4-runtime\ /s /q rem Binaries -rem VS 2013 disabled by default. Change the X to a C to enable it. -if exist "X:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" ( - call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" +rem VS 2017 disabled by default. Change the X to a C to enable it. +if exist "X:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" ( + echo. + + call "C:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2013.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2013.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - 7z a antlr4-cpp-runtime-vs2013.zip antlr4-runtime + 7z a antlr4-cpp-runtime-vs2017.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2013.zip lib + 7z a antlr4-cpp-runtime-vs2017.zip lib rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - rem if exist antlr4-cpp-runtime-vs2013.zip copy antlr4-cpp-runtime-vs2013.zip ~/antlr/sites/website-antlr4/download + rem if exist antlr4-cpp-runtime-vs2017.zip copy antlr4-cpp-runtime-vs2017.zip ~/antlr/sites/website-antlr4/download ) -if exist "C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools\VsDevCmd.bat" ( - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools\VsDevCmd.bat" +set VCTargetsPath=C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\MSBuild\Microsoft\VC\v160\ +if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" ( + echo. + + call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2015.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2015.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - 7z a antlr4-cpp-runtime-vs2015.zip antlr4-runtime + 7z a antlr4-cpp-runtime-vs2019.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2015.zip lib + 7z a antlr4-cpp-runtime-vs2019.zip lib rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - rem if exist antlr4-cpp-runtime-vs2015.zip copy antlr4-cpp-runtime-vs2015.zip ~/antlr/sites/website-antlr4/download + rem if exist antlr4-cpp-runtime-vs2019.zip copy antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download ) rmdir /S /Q antlr4-runtime +echo. +echo === Build done === + +goto end + +:Usage + +echo This script builds Visual Studio 2017 and/or 2019 libraries of the ANTLR4 runtime. +echo You have to specify the type of your VS installation (Community, Professional etc.) to construct +echo the correct build tools path. +echo. +echo Example: +echo %0 Professional +echo. :end diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index 2c5e7376f..a8503bb61 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -1,4 +1,19 @@ +include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) + +set(THIRDPARTY_DIR ${CMAKE_BINARY_DIR}/runtime/thirdparty) +set(UTFCPP_DIR ${THIRDPARTY_DIR}/utfcpp) +ExternalProject_Add( + utfcpp + GIT_REPOSITORY "git://github.com/nemtrif/utfcpp" + GIT_TAG "v3.1.1" + SOURCE_DIR ${UTFCPP_DIR} + UPDATE_DISCONNECTED 1 + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${UTFCPP_DIR}/install -Dgtest_force_shared_crt=ON + TEST_AFTER_INSTALL 1 + STEP_TARGETS build) + + include_directories( ${PROJECT_SOURCE_DIR}/runtime/src ${PROJECT_SOURCE_DIR}/runtime/src/atn @@ -8,6 +23,8 @@ include_directories( ${PROJECT_SOURCE_DIR}/runtime/src/tree ${PROJECT_SOURCE_DIR}/runtime/src/tree/pattern ${PROJECT_SOURCE_DIR}/runtime/src/tree/xpath + ${UTFCPP_DIR}/install/include/utf8cpp + ${UTFCPP_DIR}/install/include/utf8cpp/utf8 ) @@ -33,8 +50,8 @@ add_custom_target(make_lib_output_dir ALL COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR} ) -add_dependencies(antlr4_shared make_lib_output_dir) -add_dependencies(antlr4_static make_lib_output_dir) +add_dependencies(antlr4_shared make_lib_output_dir utfcpp) +add_dependencies(antlr4_static make_lib_output_dir utfcpp) if(CMAKE_SYSTEM_NAME MATCHES "Linux") target_link_libraries(antlr4_shared ${UUID_LIBRARIES}) @@ -102,15 +119,23 @@ set_target_properties(antlr4_static COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") install(TARGETS antlr4_shared - DESTINATION lib + DESTINATION lib EXPORT antlr4-targets) install(TARGETS antlr4_static DESTINATION lib EXPORT antlr4-targets) -install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/" +install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/" DESTINATION "include/antlr4-runtime" - COMPONENT dev + COMPONENT dev + FILES_MATCHING PATTERN "*.h" + ) + +install(FILES "${UTFCPP_DIR}/source/utf8.h" + DESTINATION "include/antlr4-runtime") +install(DIRECTORY "${UTFCPP_DIR}/source/utf8" + DESTINATION "include/antlr4-runtime" + COMPONENT dev FILES_MATCHING PATTERN "*.h" ) diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj index 630ee34e9..2c3611c86 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj @@ -123,49 +123,49 @@ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj new file mode 100644 index 000000000..54f0aeb14 --- /dev/null +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj @@ -0,0 +1,660 @@ + + + + + Debug Static + Win32 + + + Debug Static + x64 + + + Debug DLL + Win32 + + + Debug DLL + x64 + + + Release Static + Win32 + + + Release Static + x64 + + + Release DLL + Win32 + + + Release DLL + x64 + + + + {83BE66CD-9C4F-4F84-B72A-DD1855C8FC8A} + Win32Proj + antlr4cpp + 10.0 + + + + DynamicLibrary + true + Unicode + v142 + + + StaticLibrary + true + Unicode + v142 + + + DynamicLibrary + true + Unicode + v142 + + + StaticLibrary + true + Unicode + v142 + + + DynamicLibrary + false + true + Unicode + v142 + + + StaticLibrary + false + true + Unicode + v142 + + + DynamicLibrary + false + true + Unicode + v142 + + + StaticLibrary + false + true + Unicode + v142 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + true + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + false + $(SolutionDir)bin\vs-2019\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ + antlr4-runtime + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + + + + + Level4 + Disabled + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + false + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + true + true + + + + + Level4 + MaxSpeed + true + true + ANTLR4CPP_STATIC;%(PreprocessorDefinitions) + src;%(AdditionalIncludeDirectories) + + + + + 4251 + true + /Zc:__cplusplus %(AdditionalOptions) + + + Windows + true + true + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters new file mode 100644 index 000000000..cc1986923 --- /dev/null +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters @@ -0,0 +1,990 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + {587a2726-4856-4d21-937a-fbaebaa90232} + + + {2662156f-1508-4dad-b991-a8298a6db9bf} + + + {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} + + + {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} + + + {89fd3896-0ab1-476d-8d64-a57f10a5e73b} + + + {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} + + + {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} + + + {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} + + + {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} + + + {1893fffe-7a2b-4708-8ce5-003aa9b749f7} + + + {053a0632-27bc-4043-b5e8-760951b3b5b9} + + + {048c180d-44cf-49ca-a7aa-d0053fea07f5} + + + {3181cae5-cc15-4050-8c45-22af44a823de} + + + {290632d2-c56e-4005-a417-eb83b9531e1a} + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\dfa + + + Header Files\misc + + + Header Files\misc + + + Header Files\misc + + + Header Files\misc + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\pattern + + + Header Files\tree\xpath + + + Header Files + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + + + Header Files\misc + + + Header Files + + + Header Files + + + Header Files\support + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files\tree\xpath + + + Header Files + + + Header Files + + + Source Files\support + + + Header Files\tree + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\dfa + + + Source Files\misc + + + Source Files\misc + + + Source Files\misc + + + Source Files\support + + + Source Files\support + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\tree\pattern + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files\atn + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files\tree\xpath + + + Source Files + + + Source Files\tree + + + Source Files\tree + + + Source Files + + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\atn + + + Source Files\misc + + + Source Files + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj index a256e089a..277186ce0 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj @@ -2889,7 +2889,7 @@ GCC_WARN_UNUSED_LABEL = YES; GCC_WARN_UNUSED_PARAMETER = YES; GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = src/; + HEADER_SEARCH_PATHS = "src/ thirdparty/utfcpp/source/ thirdparty/utfcpp/source/utf8/"; MACOSX_DEPLOYMENT_TARGET = 10.9; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; @@ -2945,7 +2945,7 @@ GCC_WARN_UNUSED_LABEL = YES; GCC_WARN_UNUSED_PARAMETER = YES; GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = src/; + HEADER_SEARCH_PATHS = "src/ thirdparty/utfcpp/source/ thirdparty/utfcpp/source/utf8/"; MACOSX_DEPLOYMENT_TARGET = 10.9; SDKROOT = macosx; }; diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme b/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme index f2e132436..dc8e3432a 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/xcshareddata/xcschemes/antlr4.xcscheme @@ -29,8 +29,6 @@ shouldUseLaunchSchemeArgsEnv = "YES"> - - - - . ///

- /// TO_DO: what to do about lexers + /// TODO: what to do about lexers /// class ANTLR4CPP_PUBLIC ANTLRErrorStrategy { public: diff --git a/runtime/Cpp/runtime/src/ANTLRInputStream.cpp b/runtime/Cpp/runtime/src/ANTLRInputStream.cpp index a5e21618e..2e06c5efc 100755 --- a/runtime/Cpp/runtime/src/ANTLRInputStream.cpp +++ b/runtime/Cpp/runtime/src/ANTLRInputStream.cpp @@ -17,7 +17,11 @@ using namespace antlrcpp; using misc::Interval; +#if __cplusplus >= 201703L +ANTLRInputStream::ANTLRInputStream(std::string_view input) { +#else ANTLRInputStream::ANTLRInputStream(const std::string &input) { +#endif InitializeInstanceFields(); load(input); } @@ -31,6 +35,16 @@ ANTLRInputStream::ANTLRInputStream(std::istream &stream) { load(stream); } +#if __cplusplus >= 201703L +void ANTLRInputStream::load(std::string_view input) { + // Remove the UTF-8 BOM if present. + constexpr std::string_view bom = "\xef\xbb\xbf"; + if (input.compare(0, 3, bom) == 0) + input.remove_prefix(3); + _data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size()); + p = 0; +} +#else void ANTLRInputStream::load(const std::string &input) { // Remove the UTF-8 BOM if present. const char bom[4] = "\xef\xbb\xbf"; @@ -40,6 +54,7 @@ void ANTLRInputStream::load(const std::string &input) { _data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size()); p = 0; } +#endif void ANTLRInputStream::load(std::istream &stream) { if (!stream.good() || stream.eof()) // No fail, bad or EOF. diff --git a/runtime/Cpp/runtime/src/ANTLRInputStream.h b/runtime/Cpp/runtime/src/ANTLRInputStream.h index e9850504d..7b575df2e 100755 --- a/runtime/Cpp/runtime/src/ANTLRInputStream.h +++ b/runtime/Cpp/runtime/src/ANTLRInputStream.h @@ -25,11 +25,19 @@ namespace antlr4 { /// What is name or source of this char stream? std::string name; +#if __cplusplus >= 201703L + ANTLRInputStream(std::string_view input = ""); +#else ANTLRInputStream(const std::string &input = ""); +#endif ANTLRInputStream(const char data_[], size_t numberOfActualCharsInArray); ANTLRInputStream(std::istream &stream); +#if __cplusplus >= 201703L + virtual void load(std::string_view input); +#else virtual void load(const std::string &input); +#endif virtual void load(std::istream &stream); /// Reset the stream so that it's in the same state it was diff --git a/runtime/Cpp/runtime/src/BufferedTokenStream.cpp b/runtime/Cpp/runtime/src/BufferedTokenStream.cpp index 5a31ddb45..241dfe5c4 100755 --- a/runtime/Cpp/runtime/src/BufferedTokenStream.cpp +++ b/runtime/Cpp/runtime/src/BufferedTokenStream.cpp @@ -358,17 +358,18 @@ std::string BufferedTokenStream::getSourceName() const } std::string BufferedTokenStream::getText() { + fill(); return getText(misc::Interval(0U, size() - 1)); } std::string BufferedTokenStream::getText(const misc::Interval &interval) { lazyInit(); - fill(); size_t start = interval.a; size_t stop = interval.b; if (start == INVALID_INDEX || stop == INVALID_INDEX) { return ""; } + sync(stop); if (stop >= _tokens.size()) { stop = _tokens.size() - 1; } diff --git a/runtime/Cpp/runtime/src/CommonTokenFactory.cpp b/runtime/Cpp/runtime/src/CommonTokenFactory.cpp index 0b9a7c04f..b04d68fbc 100755 --- a/runtime/Cpp/runtime/src/CommonTokenFactory.cpp +++ b/runtime/Cpp/runtime/src/CommonTokenFactory.cpp @@ -11,7 +11,7 @@ using namespace antlr4; -const Ref> CommonTokenFactory::DEFAULT = std::make_shared(); +const std::unique_ptr> CommonTokenFactory::DEFAULT(new CommonTokenFactory); CommonTokenFactory::CommonTokenFactory(bool copyText_) : copyText(copyText_) { } diff --git a/runtime/Cpp/runtime/src/CommonTokenFactory.h b/runtime/Cpp/runtime/src/CommonTokenFactory.h index 096f93b58..129e9fc25 100755 --- a/runtime/Cpp/runtime/src/CommonTokenFactory.h +++ b/runtime/Cpp/runtime/src/CommonTokenFactory.h @@ -22,7 +22,7 @@ namespace antlr4 { * This token factory does not explicitly copy token text when constructing * tokens.

*/ - static const Ref> DEFAULT; + static const std::unique_ptr> DEFAULT; protected: /** diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h index 9932a9722..928f2c5aa 100755 --- a/runtime/Cpp/runtime/src/IntStream.h +++ b/runtime/Cpp/runtime/src/IntStream.h @@ -27,7 +27,13 @@ namespace antlr4 { /// class ANTLR4CPP_PUBLIC IntStream { public: - static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013 +#if __cplusplus >= 201703L + static constexpr size_t EOF = std::numeric_limits::max(); +#else + enum : size_t { + EOF = static_cast(-1), // std::numeric_limits::max(); doesn't work in VS 2013 + }; +#endif /// The value returned by when the end of the stream is /// reached. diff --git a/runtime/Cpp/runtime/src/Lexer.cpp b/runtime/Cpp/runtime/src/Lexer.cpp index 3abd4b862..6cb8cd004 100755 --- a/runtime/Cpp/runtime/src/Lexer.cpp +++ b/runtime/Cpp/runtime/src/Lexer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -136,7 +136,7 @@ size_t Lexer::popMode() { } -Ref> Lexer::getTokenFactory() { +TokenFactory* Lexer::getTokenFactory() { return _factory; } @@ -273,7 +273,7 @@ std::string Lexer::getErrorDisplay(const std::string &s) { } void Lexer::recover(RecognitionException * /*re*/) { - // TO_DO: Do we lose character or line position information? + // TODO: Do we lose character or line position information? _input->consume(); } @@ -284,7 +284,7 @@ size_t Lexer::getNumberOfSyntaxErrors() { void Lexer::InitializeInstanceFields() { _syntaxErrors = 0; token = nullptr; - _factory = CommonTokenFactory::DEFAULT; + _factory = CommonTokenFactory::DEFAULT.get(); tokenStartCharIndex = INVALID_INDEX; tokenStartLine = 0; tokenStartCharPositionInLine = 0; diff --git a/runtime/Cpp/runtime/src/Lexer.h b/runtime/Cpp/runtime/src/Lexer.h index f722f7fb1..756195585 100755 --- a/runtime/Cpp/runtime/src/Lexer.h +++ b/runtime/Cpp/runtime/src/Lexer.h @@ -18,20 +18,33 @@ namespace antlr4 { /// of speed. class ANTLR4CPP_PUBLIC Lexer : public Recognizer, public TokenSource { public: - static const size_t DEFAULT_MODE = 0; - static const size_t MORE = static_cast(-2); - static const size_t SKIP = static_cast(-3); +#if __cplusplus >= 201703L + static constexpr size_t DEFAULT_MODE = 0; + static constexpr size_t MORE = std::numeric_limits::max() - 1; + static constexpr size_t SKIP = std::numeric_limits::max() - 2; - static const size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL; - static const size_t HIDDEN = Token::HIDDEN_CHANNEL; - static const size_t MIN_CHAR_VALUE = 0; - static const size_t MAX_CHAR_VALUE = 0x10FFFF; + static constexpr size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL; + static constexpr size_t HIDDEN = Token::HIDDEN_CHANNEL; + static constexpr size_t MIN_CHAR_VALUE = 0; + static constexpr size_t MAX_CHAR_VALUE = 0x10FFFF; +#else + enum : size_t { + DEFAULT_MODE = 0, + MORE = static_cast(-2), // std::numeric_limits::max() - 1; doesn't work in VS 2013 + SKIP = static_cast(-3), // std::numeric_limits::max() - 2; doesn't work in VS 2013 + + DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL, + HIDDEN = Token::HIDDEN_CHANNEL, + MIN_CHAR_VALUE = 0, + MAX_CHAR_VALUE = 0x10FFFF, + }; +#endif CharStream *_input; // Pure reference, usually from statically allocated instance. protected: /// How to create token objects. - Ref> _factory; + TokenFactory *_factory; public: /// The goal of all lexer rules/methods is to create a token object. @@ -100,7 +113,7 @@ namespace antlr4 { this->_factory = factory; } - virtual Ref> getTokenFactory() override; + virtual TokenFactory* getTokenFactory() override; /// Set the char stream and reset the lexer virtual void setInputStream(IntStream *input) override; diff --git a/runtime/Cpp/runtime/src/ListTokenSource.cpp b/runtime/Cpp/runtime/src/ListTokenSource.cpp index 4a734596f..ec93cb9b6 100755 --- a/runtime/Cpp/runtime/src/ListTokenSource.cpp +++ b/runtime/Cpp/runtime/src/ListTokenSource.cpp @@ -82,11 +82,11 @@ std::string ListTokenSource::getSourceName() { return "List"; } -Ref> ListTokenSource::getTokenFactory() { +TokenFactory* ListTokenSource::getTokenFactory() { return _factory; } void ListTokenSource::InitializeInstanceFields() { i = 0; - _factory = CommonTokenFactory::DEFAULT; + _factory = CommonTokenFactory::DEFAULT.get(); } diff --git a/runtime/Cpp/runtime/src/ListTokenSource.h b/runtime/Cpp/runtime/src/ListTokenSource.h index 2f5f63345..70cba9363 100755 --- a/runtime/Cpp/runtime/src/ListTokenSource.h +++ b/runtime/Cpp/runtime/src/ListTokenSource.h @@ -40,7 +40,7 @@ namespace antlr4 { private: /// This is the backing field for and /// . - Ref> _factory = CommonTokenFactory::DEFAULT; + TokenFactory *_factory = CommonTokenFactory::DEFAULT.get(); public: /// Constructs a new instance from the specified @@ -79,7 +79,7 @@ namespace antlr4 { this->_factory = factory; } - virtual Ref> getTokenFactory() override; + virtual TokenFactory* getTokenFactory() override; private: void InitializeInstanceFields(); diff --git a/runtime/Cpp/runtime/src/NoViableAltException.cpp b/runtime/Cpp/runtime/src/NoViableAltException.cpp index ced7f827f..273c208c7 100755 --- a/runtime/Cpp/runtime/src/NoViableAltException.cpp +++ b/runtime/Cpp/runtime/src/NoViableAltException.cpp @@ -9,6 +9,20 @@ using namespace antlr4; +namespace { + +// Create a normal shared pointer if the configurations are to be deleted. If not, then +// the shared pointer is created with a deleter that does nothing. +Ref buildConfigsRef(atn::ATNConfigSet *configs, bool deleteConfigs) { + if (deleteConfigs) { + return Ref(configs); + } else { + return Ref(configs, [](atn::ATNConfigSet *){}); + } +} + +} + NoViableAltException::NoViableAltException(Parser *recognizer) : NoViableAltException(recognizer, recognizer->getTokenStream(), recognizer->getCurrentToken(), recognizer->getCurrentToken(), nullptr, recognizer->getContext(), false) { @@ -17,12 +31,10 @@ NoViableAltException::NoViableAltException(Parser *recognizer) NoViableAltException::NoViableAltException(Parser *recognizer, TokenStream *input,Token *startToken, Token *offendingToken, atn::ATNConfigSet *deadEndConfigs, ParserRuleContext *ctx, bool deleteConfigs) : RecognitionException("No viable alternative", recognizer, input, ctx, offendingToken), - _deadEndConfigs(deadEndConfigs), _startToken(startToken), _deleteConfigs(deleteConfigs) { + _deadEndConfigs(buildConfigsRef(deadEndConfigs, deleteConfigs)), _startToken(startToken) { } NoViableAltException::~NoViableAltException() { - if (_deleteConfigs) - delete _deadEndConfigs; } Token* NoViableAltException::getStartToken() const { @@ -30,5 +42,5 @@ Token* NoViableAltException::getStartToken() const { } atn::ATNConfigSet* NoViableAltException::getDeadEndConfigs() const { - return _deadEndConfigs; + return _deadEndConfigs.get(); } diff --git a/runtime/Cpp/runtime/src/NoViableAltException.h b/runtime/Cpp/runtime/src/NoViableAltException.h index 94d43c54c..b15039d0c 100755 --- a/runtime/Cpp/runtime/src/NoViableAltException.h +++ b/runtime/Cpp/runtime/src/NoViableAltException.h @@ -27,10 +27,9 @@ namespace antlr4 { private: /// Which configurations did we try at input.index() that couldn't match input.LT(1)? - atn::ATNConfigSet* _deadEndConfigs; - - // Flag that indicates if we own the dead end config set and have to delete it on destruction. - bool _deleteConfigs; + /// Shared pointer that conditionally deletes the configurations (based on flag + /// passed during construction) + Ref _deadEndConfigs; /// The token object at the start index; the input stream might /// not be buffering tokens so get a reference to it. (At the diff --git a/runtime/Cpp/runtime/src/Parser.cpp b/runtime/Cpp/runtime/src/Parser.cpp index f65da1433..8b2a1eac2 100755 --- a/runtime/Cpp/runtime/src/Parser.cpp +++ b/runtime/Cpp/runtime/src/Parser.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -190,7 +190,7 @@ void Parser::removeParseListeners() { } void Parser::triggerEnterRuleEvent() { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->enterEveryRule(_ctx); _ctx->enterRule(listener); } @@ -208,7 +208,7 @@ size_t Parser::getNumberOfSyntaxErrors() { return _syntaxErrors; } -Ref> Parser::getTokenFactory() { +TokenFactory* Parser::getTokenFactory() { return _input->getTokenSource()->getTokenFactory(); } @@ -307,14 +307,14 @@ Token* Parser::consume() { tree::ErrorNode *node = createErrorNode(o); _ctx->addChild(node); if (_parseListeners.size() > 0) { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->visitErrorNode(node); } } } else { tree::TerminalNode *node = _ctx->addChild(createTerminalNode(o)); if (_parseListeners.size() > 0) { - for (auto listener : _parseListeners) { + for (auto *listener : _parseListeners) { listener->visitTerminal(node); } } @@ -464,7 +464,7 @@ bool Parser::precpred(RuleContext * /*localctx*/, int precedence) { } bool Parser::inContext(const std::string &/*context*/) { - // TO_DO: useful in parser? + // TODO: useful in parser? return false; } diff --git a/runtime/Cpp/runtime/src/Parser.h b/runtime/Cpp/runtime/src/Parser.h index 515f83548..2cdd66689 100755 --- a/runtime/Cpp/runtime/src/Parser.h +++ b/runtime/Cpp/runtime/src/Parser.h @@ -193,7 +193,7 @@ namespace antlr4 { /// virtual size_t getNumberOfSyntaxErrors(); - virtual Ref> getTokenFactory() override; + virtual TokenFactory* getTokenFactory() override; /// /// Tell our token source and error strategy about a new way to create tokens. diff --git a/runtime/Cpp/runtime/src/ParserRuleContext.cpp b/runtime/Cpp/runtime/src/ParserRuleContext.cpp index cfbb5f885..b67b9a93d 100755 --- a/runtime/Cpp/runtime/src/ParserRuleContext.cpp +++ b/runtime/Cpp/runtime/src/ParserRuleContext.cpp @@ -38,8 +38,8 @@ void ParserRuleContext::copyFrom(ParserRuleContext *ctx) { // copy any error nodes to alt label node if (!ctx->children.empty()) { - for (auto child : ctx->children) { - auto errorNode = dynamic_cast(child); + for (auto *child : ctx->children) { + auto *errorNode = dynamic_cast(child); if (errorNode != nullptr) { errorNode->setParent(this); children.push_back(errorNode); @@ -82,7 +82,7 @@ tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) { } size_t j = 0; // what token with ttype have we found? - for (auto o : children) { + for (auto *o : children) { if (is(o)) { tree::TerminalNode *tnode = dynamic_cast(o); Token *symbol = tnode->getSymbol(); diff --git a/runtime/Cpp/runtime/src/ParserRuleContext.h b/runtime/Cpp/runtime/src/ParserRuleContext.h index e117c3b6b..c9ffe1779 100755 --- a/runtime/Cpp/runtime/src/ParserRuleContext.h +++ b/runtime/Cpp/runtime/src/ParserRuleContext.h @@ -114,7 +114,7 @@ namespace antlr4 { template std::vector getRuleContexts() { std::vector contexts; - for (auto child : children) { + for (auto *child : children) { if (antlrcpp::is(child)) { contexts.push_back(dynamic_cast(child)); } diff --git a/runtime/Cpp/runtime/src/ProxyErrorListener.cpp b/runtime/Cpp/runtime/src/ProxyErrorListener.cpp index 4a961d7f8..0b031327b 100755 --- a/runtime/Cpp/runtime/src/ProxyErrorListener.cpp +++ b/runtime/Cpp/runtime/src/ProxyErrorListener.cpp @@ -26,28 +26,28 @@ void ProxyErrorListener::removeErrorListeners() { void ProxyErrorListener::syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr e) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e); } } void ProxyErrorListener::reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); } } void ProxyErrorListener::reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); } } void ProxyErrorListener::reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, size_t prediction, atn::ATNConfigSet *configs) { - for (auto listener : _delegates) { + for (auto *listener : _delegates) { listener->reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs); } } diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h index 8c0bcb0ba..e7eac02c3 100755 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -11,7 +11,13 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC Recognizer { public: - static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013. +#if __cplusplus >= 201703L + static constexpr size_t EOF = std::numeric_limits::max(); +#else + enum : size_t { + EOF = static_cast(-1), // std::numeric_limits::max(); doesn't work in VS 2013. + }; +#endif Recognizer(); Recognizer(Recognizer const&) = delete; @@ -138,7 +144,7 @@ namespace antlr4 { virtual void setInputStream(IntStream *input) = 0; - virtual Ref> getTokenFactory() = 0; + virtual TokenFactory* getTokenFactory() = 0; template void setTokenFactory(TokenFactory *input); diff --git a/runtime/Cpp/runtime/src/RuleContext.cpp b/runtime/Cpp/runtime/src/RuleContext.cpp index 73cfe24e1..467e5ec42 100755 --- a/runtime/Cpp/runtime/src/RuleContext.cpp +++ b/runtime/Cpp/runtime/src/RuleContext.cpp @@ -75,16 +75,16 @@ antlrcpp::Any RuleContext::accept(tree::ParseTreeVisitor *visitor) { return visitor->visitChildren(this); } -std::string RuleContext::toStringTree(Parser *recog) { - return tree::Trees::toStringTree(this, recog); +std::string RuleContext::toStringTree(Parser *recog, bool pretty) { + return tree::Trees::toStringTree(this, recog, pretty); } -std::string RuleContext::toStringTree(std::vector &ruleNames) { - return tree::Trees::toStringTree(this, ruleNames); +std::string RuleContext::toStringTree(std::vector &ruleNames, bool pretty) { + return tree::Trees::toStringTree(this, ruleNames, pretty); } -std::string RuleContext::toStringTree() { - return toStringTree(nullptr); +std::string RuleContext::toStringTree(bool pretty) { + return toStringTree(nullptr, pretty); } diff --git a/runtime/Cpp/runtime/src/RuleContext.h b/runtime/Cpp/runtime/src/RuleContext.h index bf0ff6631..9ee0d2def 100755 --- a/runtime/Cpp/runtime/src/RuleContext.h +++ b/runtime/Cpp/runtime/src/RuleContext.h @@ -110,15 +110,15 @@ namespace antlr4 { /// (root child1 .. childN). Print just a node if this is a leaf. /// We have to know the recognizer so we can get rule names. /// - virtual std::string toStringTree(Parser *recog) override; + virtual std::string toStringTree(Parser *recog, bool pretty = false) override; /// /// Print out a whole tree, not just a node, in LISP format /// (root child1 .. childN). Print just a node if this is a leaf. /// - virtual std::string toStringTree(std::vector &ruleNames); + virtual std::string toStringTree(std::vector &ruleNames, bool pretty = false); - virtual std::string toStringTree() override; + virtual std::string toStringTree(bool pretty = false) override; virtual std::string toString() override; std::string toString(Recognizer *recog); std::string toString(const std::vector &ruleNames); diff --git a/runtime/Cpp/runtime/src/RuntimeMetaData.cpp b/runtime/Cpp/runtime/src/RuntimeMetaData.cpp index 011f12a51..3ba799806 100755 --- a/runtime/Cpp/runtime/src/RuntimeMetaData.cpp +++ b/runtime/Cpp/runtime/src/RuntimeMetaData.cpp @@ -7,7 +7,7 @@ using namespace antlr4; -const std::string RuntimeMetaData::VERSION = "4.7.2"; +const std::string RuntimeMetaData::VERSION = "4.8"; std::string RuntimeMetaData::getRuntimeVersion() { return VERSION; diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h index a7c1594ff..1878b28a1 100755 --- a/runtime/Cpp/runtime/src/Token.h +++ b/runtime/Cpp/runtime/src/Token.h @@ -14,24 +14,50 @@ namespace antlr4 { /// we obtained this token. class ANTLR4CPP_PUBLIC Token { public: - static const size_t INVALID_TYPE = 0; +#if __cplusplus >= 201703L + static constexpr size_t INVALID_TYPE = 0; +#else + enum : size_t { + INVALID_TYPE = 0, + }; +#endif /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - static const size_t EPSILON = static_cast(-2); - static const size_t MIN_USER_TOKEN_TYPE = 1; - static const size_t EOF = IntStream::EOF; +#if __cplusplus >= 201703L + static constexpr size_t EPSILON = std::numeric_limits::max() - 1; + static constexpr size_t MIN_USER_TOKEN_TYPE = 1; + static constexpr size_t EOF = IntStream::EOF; +#else + enum : size_t { + EPSILON = static_cast(-2), // std::numeric_limits::max() - 1; doesn't work in VS 2013 + MIN_USER_TOKEN_TYPE = 1, + EOF = IntStream::EOF, + }; +#endif virtual ~Token(); /// All tokens go to the parser (unless skip() is called in that rule) /// on a particular "channel". The parser tunes to a particular channel /// so that whitespace etc... can go to the parser on a "hidden" channel. - static const size_t DEFAULT_CHANNEL = 0; +#if __cplusplus >= 201703L + static constexpr size_t DEFAULT_CHANNEL = 0; +#else + enum : size_t { + DEFAULT_CHANNEL = 0, + }; +#endif /// Anything on different channel than DEFAULT_CHANNEL is not parsed /// by parser. - static const size_t HIDDEN_CHANNEL = 1; +#if __cplusplus >= 201703L + static constexpr size_t HIDDEN_CHANNEL = 1; +#else + enum : size_t { + HIDDEN_CHANNEL = 1, + }; +#endif /** * This is the minimum constant value which can be assigned to a @@ -44,7 +70,13 @@ namespace antlr4 { * * @see Token#getChannel() */ - static const size_t MIN_USER_CHANNEL_VALUE = 2; +#if __cplusplus >= 201703L + static constexpr size_t MIN_USER_CHANNEL_VALUE = 2; +#else + enum : size_t { + MIN_USER_CHANNEL_VALUE = 2, + }; +#endif /// Get the text of the token. virtual std::string getText() const = 0; diff --git a/runtime/Cpp/runtime/src/TokenSource.h b/runtime/Cpp/runtime/src/TokenSource.h index 72981cea0..a8ed34f85 100755 --- a/runtime/Cpp/runtime/src/TokenSource.h +++ b/runtime/Cpp/runtime/src/TokenSource.h @@ -79,7 +79,7 @@ namespace antlr4 { /// creating objects from the input. /// /// The currently used by this token source. - virtual Ref> getTokenFactory() = 0; + virtual TokenFactory* getTokenFactory() = 0; }; } // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp b/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp index df20ea9b9..3ccbed757 100755 --- a/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp +++ b/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp @@ -94,7 +94,7 @@ TokenStreamRewriter::TokenStreamRewriter(TokenStream *tokens_) : tokens(tokens_) TokenStreamRewriter::~TokenStreamRewriter() { for (auto program : _programs) { - for (auto operation : program.second) { + for (auto *operation : program.second) { delete operation; } } @@ -323,7 +323,7 @@ std::unordered_map TokenStreamRe // Wipe prior inserts within range std::vector inserts = getKindOfOps(rewrites, i); - for (auto iop : inserts) { + for (auto *iop : inserts) { if (iop->index == rop->index) { // E.g., insert before 2, delete 2..2; update replace // text to include insert before, kill insert @@ -339,7 +339,7 @@ std::unordered_map TokenStreamRe } // Drop any prior replaces contained within std::vector prevReplaces = getKindOfOps(rewrites, i); - for (auto prevRop : prevReplaces) { + for (auto *prevRop : prevReplaces) { if (prevRop->index >= rop->index && prevRop->lastIndex <= rop->lastIndex) { // delete replace as it's a no-op. delete rewrites[prevRop->instructionIndex]; @@ -373,7 +373,7 @@ std::unordered_map TokenStreamRe // combine current insert with prior if any at same index std::vector prevInserts = getKindOfOps(rewrites, i); - for (auto prevIop : prevInserts) { + for (auto *prevIop : prevInserts) { if (prevIop->index == iop->index) { // combine objects // convert to strings...we're in process of toString'ing // whole token buffer so no lazy eval issue with any templates @@ -385,7 +385,7 @@ std::unordered_map TokenStreamRe } // look for replaces where iop.index is in range; error std::vector prevReplaces = getKindOfOps(rewrites, i); - for (auto rop : prevReplaces) { + for (auto *rop : prevReplaces) { if (iop->index == rop->index) { rop->text = catOpText(&iop->text, &rop->text); delete rewrites[i]; diff --git a/runtime/Cpp/runtime/src/TokenStreamRewriter.h b/runtime/Cpp/runtime/src/TokenStreamRewriter.h index 102a9e946..561607a36 100755 --- a/runtime/Cpp/runtime/src/TokenStreamRewriter.h +++ b/runtime/Cpp/runtime/src/TokenStreamRewriter.h @@ -86,8 +86,15 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC TokenStreamRewriter { public: static const std::string DEFAULT_PROGRAM_NAME; - static const size_t PROGRAM_INIT_SIZE = 100; - static const size_t MIN_TOKEN_INDEX = 0; +#if __cplusplus >= 201703L + static constexpr size_t PROGRAM_INIT_SIZE = 100; + static constexpr size_t MIN_TOKEN_INDEX = 0; +#else + enum : size_t { + PROGRAM_INIT_SIZE = 100, + MIN_TOKEN_INDEX = 0, + }; +#endif TokenStreamRewriter(TokenStream *tokens); virtual ~TokenStreamRewriter(); diff --git a/runtime/Cpp/runtime/src/Vocabulary.cpp b/runtime/Cpp/runtime/src/Vocabulary.cpp index dcfa45e4b..9bbf0b23a 100755 --- a/runtime/Cpp/runtime/src/Vocabulary.cpp +++ b/runtime/Cpp/runtime/src/Vocabulary.cpp @@ -22,8 +22,7 @@ Vocabulary::Vocabulary(const std::vector &literalNames, // See note here on -1 part: https://github.com/antlr/antlr4/pull/1146 } -Vocabulary::~Vocabulary() { -} +Vocabulary::~Vocabulary() = default; Vocabulary Vocabulary::fromTokenNames(const std::vector &tokenNames) { if (tokenNames.empty()) { @@ -34,25 +33,18 @@ Vocabulary Vocabulary::fromTokenNames(const std::vector &tokenNames std::vector symbolicNames = tokenNames; std::locale locale; for (size_t i = 0; i < tokenNames.size(); i++) { - std::string tokenName = tokenNames[i]; - if (tokenName == "") { + const std::string& tokenName = tokenNames[i]; + if (tokenName.empty()) { continue; + } else if (tokenName.front() == '\'') { + symbolicNames[i].clear(); + } else if (std::isupper(tokenName.front(), locale)) { + literalNames[i].clear(); + } else { + // wasn't a literal or symbolic name + literalNames[i].clear(); + symbolicNames[i].clear(); } - - if (!tokenName.empty()) { - char firstChar = tokenName[0]; - if (firstChar == '\'') { - symbolicNames[i] = ""; - continue; - } else if (std::isupper(firstChar, locale)) { - literalNames[i] = ""; - continue; - } - } - - // wasn't a literal or symbolic name - literalNames[i] = ""; - symbolicNames[i] = ""; } return Vocabulary(literalNames, symbolicNames, tokenNames); diff --git a/runtime/Cpp/runtime/src/Vocabulary.h b/runtime/Cpp/runtime/src/Vocabulary.h index df78b4364..f06ce6978 100755 --- a/runtime/Cpp/runtime/src/Vocabulary.h +++ b/runtime/Cpp/runtime/src/Vocabulary.h @@ -14,10 +14,6 @@ namespace dfa { /// interface. class ANTLR4CPP_PUBLIC Vocabulary { public: - Vocabulary(Vocabulary const&) = default; - virtual ~Vocabulary(); - Vocabulary& operator=(Vocabulary const&) = default; - /// Gets an empty instance. /// /// @@ -26,7 +22,9 @@ namespace dfa { /// except . static const Vocabulary EMPTY_VOCABULARY; - Vocabulary() {} + Vocabulary() = default; + Vocabulary(Vocabulary const&) = default; + virtual ~Vocabulary(); /// /// Constructs a new instance of from the specified diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h index 25d890b3f..47312978a 100644 --- a/runtime/Cpp/runtime/src/antlr4-common.h +++ b/runtime/Cpp/runtime/src/antlr4-common.h @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -37,6 +36,10 @@ #include #include +#ifndef USE_UTF8_INSTEAD_OF_CODECVT + #include +#endif + // Defines for the Guid class and other platform dependent stuff. #ifdef _WIN32 #ifdef _MSC_VER diff --git a/runtime/Cpp/runtime/src/atn/ATN.cpp b/runtime/Cpp/runtime/src/atn/ATN.cpp index 21924a27d..2334e6bca 100755 --- a/runtime/Cpp/runtime/src/atn/ATN.cpp +++ b/runtime/Cpp/runtime/src/atn/ATN.cpp @@ -183,7 +183,7 @@ std::string ATN::toString() const { ss << "states (" << states.size() << ") {" << std::endl; size_t index = 0; - for (auto state : states) { + for (auto *state : states) { if (state == nullptr) { ss << " " << index++ << ": nul" << std::endl; } else { @@ -193,7 +193,7 @@ std::string ATN::toString() const { } index = 0; - for (auto state : decisionToState) { + for (auto *state : decisionToState) { if (state == nullptr) { ss << " " << index++ << ": nul" << std::endl; } else { diff --git a/runtime/Cpp/runtime/src/atn/ATN.h b/runtime/Cpp/runtime/src/atn/ATN.h index 9c40cee30..125469b92 100755 --- a/runtime/Cpp/runtime/src/atn/ATN.h +++ b/runtime/Cpp/runtime/src/atn/ATN.h @@ -12,7 +12,13 @@ namespace atn { class ANTLR4CPP_PUBLIC ATN { public: - static const size_t INVALID_ALT_NUMBER = 0; +#if __cplusplus >= 201703L + static constexpr size_t INVALID_ALT_NUMBER = 0; +#else + enum : size_t { + INVALID_ALT_NUMBER = 0, + }; +#endif /// Used for runtime deserialization of ATNs from strings. ATN(); diff --git a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp index a775ccbfa..c490f041d 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp @@ -11,8 +11,6 @@ using namespace antlr4::atn; -const size_t ATNConfig::SUPPRESS_PRECEDENCE_FILTER = 0x40000000; - ATNConfig::ATNConfig(ATNState *state_, size_t alt_, Ref const& context_) : ATNConfig(state_, alt_, context_, SemanticContext::NONE) { } diff --git a/runtime/Cpp/runtime/src/atn/ATNConfig.h b/runtime/Cpp/runtime/src/atn/ATNConfig.h index 700a6e120..767655b33 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfig.h +++ b/runtime/Cpp/runtime/src/atn/ATNConfig.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -57,7 +57,7 @@ namespace atn { *

* closure() tracks the depth of how far we dip into the outer context: * depth > 0. Note that it may not be totally accurate depth since I - * don't ever decrement. TO_DO: make it a boolean then

+ * don't ever decrement. TODO: make it a boolean then

* *

* For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method @@ -87,7 +87,6 @@ namespace atn { ATNConfig(ATNConfig const&) = default; virtual ~ATNConfig(); - ATNConfig& operator=(ATNConfig const&) = default; virtual size_t hashCode() const; @@ -115,7 +114,13 @@ namespace atn { * {@link #isPrecedenceFilterSuppressed} property as a bit within the * existing {@link #reachesIntoOuterContext} field. */ - static const size_t SUPPRESS_PRECEDENCE_FILTER; +#if __cplusplus >= 201703L + static constexpr size_t SUPPRESS_PRECEDENCE_FILTER = 0x40000000; +#else + enum : size_t { + SUPPRESS_PRECEDENCE_FILTER = 0x40000000, + }; +#endif }; } // namespace atn @@ -140,7 +145,7 @@ namespace std { size_t operator() (const std::vector> &vector) const { std::size_t seed = 0; - for (auto &config : vector) { + for (const auto &config : vector) { seed ^= config->hashCode() + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; diff --git a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h index 843b055eb..850a07c12 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h +++ b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -18,8 +18,8 @@ namespace atn { /// Track the elements as they are added to the set; supports get(i) std::vector> configs; - // TO_DO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation - // TO_DO: can we track conflicts as they are added to save scanning configs later? + // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation + // TODO: can we track conflicts as they are added to save scanning configs later? size_t uniqueAlt; /** Currently this is only used when we detect SLL conflict; this does diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp index c6cceda13..d1d622aeb 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -57,8 +57,6 @@ using namespace antlr4; using namespace antlr4::atn; using namespace antlrcpp; -const size_t ATNDeserializer::SERIALIZED_VERSION = 3; - namespace { uint32_t deserializeInt32(const std::vector& data, size_t offset) { diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.h b/runtime/Cpp/runtime/src/atn/ATNDeserializer.h index 621e03db7..12fd11d8c 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.h +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.h @@ -13,7 +13,13 @@ namespace atn { class ANTLR4CPP_PUBLIC ATNDeserializer { public: - static const size_t SERIALIZED_VERSION; +#if __cplusplus >= 201703L + static constexpr size_t SERIALIZED_VERSION = 3; +#else + enum : size_t { + SERIALIZED_VERSION = 3, + }; +#endif /// This is the current serialized UUID. // ml: defined as function to avoid the “static initialization order fiasco”. diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp index 206c74281..293bee5e9 100755 --- a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp @@ -166,7 +166,7 @@ std::vector ATNSerializer::serialize() { } data.push_back(containsEof ? 1 : 0); - for (auto &interval : set.getIntervals()) { + for (const auto &interval : set.getIntervals()) { if (interval.a == -1) { if (interval.b == -1) { continue; diff --git a/runtime/Cpp/runtime/src/atn/ATNState.cpp b/runtime/Cpp/runtime/src/atn/ATNState.cpp index 9bc074ce0..5dcb85d84 100755 --- a/runtime/Cpp/runtime/src/atn/ATNState.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNState.cpp @@ -17,7 +17,7 @@ ATNState::ATNState() { } ATNState::~ATNState() { - for (auto transition : transitions) { + for (auto *transition : transitions) { delete transition; } } diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h index 6c73d9435..8ed613735 100755 --- a/runtime/Cpp/runtime/src/atn/ATNState.h +++ b/runtime/Cpp/runtime/src/atn/ATNState.h @@ -81,8 +81,15 @@ namespace atn { ATNState& operator=(ATNState const&) = delete; - static const size_t INITIAL_NUM_TRANSITIONS = 4; - static const size_t INVALID_STATE_NUMBER = static_cast(-1); // std::numeric_limits::max(); +#if __cplusplus >= 201703L + static constexpr size_t INITIAL_NUM_TRANSITIONS = 4; + static constexpr size_t INVALID_STATE_NUMBER = std::numeric_limits::max(); +#else + enum : size_t { + INITIAL_NUM_TRANSITIONS = 4, + INVALID_STATE_NUMBER = static_cast(-1), // std::numeric_limits::max(); doesn't work in VS 2013 + }; +#endif enum { ATN_INVALID_TYPE = 0, diff --git a/runtime/Cpp/runtime/src/atn/AtomTransition.h b/runtime/Cpp/runtime/src/atn/AtomTransition.h index b3fa18864..cc22e5ad9 100755 --- a/runtime/Cpp/runtime/src/atn/AtomTransition.h +++ b/runtime/Cpp/runtime/src/atn/AtomTransition.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -10,7 +10,7 @@ namespace antlr4 { namespace atn { - /// TO_DO: make all transitions sets? no, should remove set edges. + /// TODO: make all transitions sets? no, should remove set edges. class ANTLR4CPP_PUBLIC AtomTransition final : public Transition { public: /// The token type or character value; or, signifies special label. diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.h b/runtime/Cpp/runtime/src/atn/LL1Analyzer.h index b945411b5..e297bc9a9 100755 --- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.h +++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.h @@ -17,7 +17,13 @@ namespace atn { public: /// Special value added to the lookahead sets to indicate that we hit /// a predicate during analysis if {@code seeThruPreds==false}. - static const size_t HIT_PRED = Token::INVALID_TYPE; +#if __cplusplus >= 201703L + static constexpr size_t HIT_PRED = Token::INVALID_TYPE; +#else + enum : size_t { + HIT_PRED = Token::INVALID_TYPE, + }; +#endif const atn::ATN &_atn; diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp index c21ef0b2c..827c3d59f 100755 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp @@ -422,7 +422,7 @@ Ref LexerATNSimulator::getEpsilonTarget(CharStream *input, const if (config->context == nullptr|| config->context->hasEmptyPath()) { // execute actions anywhere in the start rule for a token. // - // TO_DO: if the entry rule is invoked recursively, some + // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but // isEmpty() is false. In this case, the config needs to be diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h index fa113f849..c050d516c 100755 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h @@ -38,8 +38,15 @@ namespace atn { public: - static const size_t MIN_DFA_EDGE = 0; - static const size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN +#if __cplusplus >= 201703L + static constexpr size_t MIN_DFA_EDGE = 0; + static constexpr size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN +#else + enum : size_t { + MIN_DFA_EDGE = 0, + MAX_DFA_EDGE = 127, // forces unicode to stay in ATN + }; +#endif protected: ///

diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp index 7d6cd00f6..025c9c937 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp @@ -774,7 +774,7 @@ std::pair ParserATNSimulator::splitAccordingToSe BitSet ParserATNSimulator::evalSemanticContext(std::vector predPredictions, ParserRuleContext *outerContext, bool complete) { BitSet predictions; - for (auto prediction : predPredictions) { + for (auto *prediction : predPredictions) { if (prediction->pred == SemanticContext::NONE) { predictions.set(prediction->alt); if (!complete) { @@ -922,7 +922,7 @@ void ParserATNSimulator::closure_(Ref const& config, ATNConfigSet *co } } - configs->dipsIntoOuterContext = true; // TO_DO: can remove? only care when we add to set per middle of this method + configs->dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method assert(newDepth > INT_MIN); newDepth--; @@ -1348,6 +1348,8 @@ Parser* ParserATNSimulator::getParser() { return parser; } +#pragma warning (disable:4996) // 'getenv': This function or variable may be unsafe. Consider using _dupenv_s instead. + bool ParserATNSimulator::getLrLoopSetting() { char *var = std::getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"); if (var == nullptr) @@ -1356,6 +1358,8 @@ bool ParserATNSimulator::getLrLoopSetting() { return value == "true" || value == "1"; } +#pragma warning (default:4996) + void ParserATNSimulator::InitializeInstanceFields() { _mode = PredictionMode::LL; _startIndex = 0; diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h index 7e92c906b..6520a44bd 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h @@ -760,7 +760,7 @@ namespace atn { virtual bool evalSemanticContext(Ref const& pred, ParserRuleContext *parserCallStack, size_t alt, bool fullCtx); - /* TO_DO: If we are doing predicates, there is no point in pursuing + /* TODO: If we are doing predicates, there is no point in pursuing closure operations if we reach a DFA state that uniquely predicts alternative. We will not be caching that DFA state and it is a waste to pursue the closure. Might have to advance when we do diff --git a/runtime/Cpp/runtime/src/atn/PredicateTransition.h b/runtime/Cpp/runtime/src/atn/PredicateTransition.h index fed28bdf3..4d9b4205d 100755 --- a/runtime/Cpp/runtime/src/atn/PredicateTransition.h +++ b/runtime/Cpp/runtime/src/atn/PredicateTransition.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -11,7 +11,7 @@ namespace antlr4 { namespace atn { - /// TO_DO: this is old comment: + /// TODO: this is old comment: /// A tree of semantic predicates from the grammar AST if label==SEMPRED. /// In the ATN, labels will always be exactly one predicate, but the DFA /// may have to combine a bunch of them as it collects predicates from diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.cpp b/runtime/Cpp/runtime/src/atn/PredictionContext.cpp index 597e083a6..860a18056 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.cpp +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -335,7 +335,7 @@ Ref PredictionContext::mergeArrays(const Ref M = std::make_shared(mergedParents, mergedReturnStates); // if we created same array as a or b, return that instead - // TO_DO: track whether this is possible above during merge sort for speed + // TODO: track whether this is possible above during merge sort for speed if (*M == *a) { if (mergeCache != nullptr) { mergeCache->put(a, b, a); diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h index 9a52e00e5..e8dfc23f7 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h @@ -30,10 +30,22 @@ namespace atn { // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't // conflict with real return states. - static const size_t EMPTY_RETURN_STATE = static_cast(-10); // std::numeric_limits::max() - 9; +#if __cplusplus >= 201703L + static constexpr size_t EMPTY_RETURN_STATE = std::numeric_limits::max() - 9; +#else + enum : size_t { + EMPTY_RETURN_STATE = static_cast(-10), // std::numeric_limits::max() - 9; doesn't work in VS 2013 + }; +#endif private: - static const size_t INITIAL_HASH = 1; +#if __cplusplus >= 201703L + static constexpr size_t INITIAL_HASH = 1; +#else + enum : size_t { + INITIAL_HASH = 1, + }; +#endif public: static size_t globalNodeCount; diff --git a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp index 6b2762022..62fc12f0a 100755 --- a/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp @@ -99,7 +99,7 @@ std::unique_ptr ProfilingATNSimulator::computeReachSet(ATNConfigSe _decisions[_currentDecision].LL_ATNTransitions++; // count computation even if error if (reachConfigs != nullptr) { } else { // no reach on current lookahead symbol. ERROR. - // TO_DO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule() + // TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule() _decisions[_currentDecision].errors.push_back(ErrorInfo(_currentDecision, closure, _input, _startIndex, _llStopIndex, true)); } } else { diff --git a/runtime/Cpp/runtime/src/dfa/DFA.cpp b/runtime/Cpp/runtime/src/dfa/DFA.cpp index 223635491..3f83180af 100755 --- a/runtime/Cpp/runtime/src/dfa/DFA.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFA.cpp @@ -46,7 +46,7 @@ DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), decision(other.decis DFA::~DFA() { bool s0InList = (s0 == nullptr); - for (auto state : states) { + for (auto *state : states) { if (state == s0) s0InList = true; delete state; @@ -88,7 +88,7 @@ void DFA::setPrecedenceStartState(int precedence, DFAState *startState, SingleWr std::vector DFA::getStates() const { std::vector result; - for (auto state : states) + for (auto *state : states) result.push_back(state); std::sort(result.begin(), result.end(), [](DFAState *o1, DFAState *o2) -> bool { diff --git a/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp b/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp index 34c87a560..d27e53fa7 100755 --- a/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp @@ -27,7 +27,7 @@ std::string DFASerializer::toString() const { std::stringstream ss; std::vector states = _dfa->getStates(); - for (auto s : states) { + for (auto *s : states) { for (size_t i = 0; i < s->edges.size(); i++) { DFAState *t = s->edges[i]; if (t != nullptr && t->stateNumber != INT32_MAX) { diff --git a/runtime/Cpp/runtime/src/dfa/DFAState.cpp b/runtime/Cpp/runtime/src/dfa/DFAState.cpp index a9118dc95..998fed3fb 100755 --- a/runtime/Cpp/runtime/src/dfa/DFAState.cpp +++ b/runtime/Cpp/runtime/src/dfa/DFAState.cpp @@ -42,7 +42,7 @@ DFAState::DFAState(std::unique_ptr configs_) : DFAState() { } DFAState::~DFAState() { - for (auto predicate : predicates) { + for (auto *predicate : predicates) { delete predicate; } } diff --git a/runtime/Cpp/runtime/src/misc/IntervalSet.cpp b/runtime/Cpp/runtime/src/misc/IntervalSet.cpp index 031b9ba5b..80182da00 100755 --- a/runtime/Cpp/runtime/src/misc/IntervalSet.cpp +++ b/runtime/Cpp/runtime/src/misc/IntervalSet.cpp @@ -112,7 +112,7 @@ void IntervalSet::add(const Interval &addition) { IntervalSet IntervalSet::Or(const std::vector &sets) { IntervalSet result; - for (auto &s : sets) { + for (const auto &s : sets) { result.addAll(s); } return result; @@ -271,7 +271,7 @@ bool IntervalSet::contains(ssize_t el) const { if (el < _intervals[0].a) // list is sorted and el is before first interval; not here return false; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { if (el >= interval.a && el <= interval.b) { return true; // found in this interval } @@ -315,7 +315,7 @@ std::vector const& IntervalSet::getIntervals() const { size_t IntervalSet::hashCode() const { size_t hash = MurmurHash::initialize(); - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { hash = MurmurHash::update(hash, interval.a); hash = MurmurHash::update(hash, interval.b); } @@ -349,7 +349,7 @@ std::string IntervalSet::toString(bool elemAreChar) const { } bool firstEntry = true; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { if (!firstEntry) ss << ", "; firstEntry = false; @@ -395,7 +395,7 @@ std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const { } bool firstEntry = true; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { if (!firstEntry) ss << ", "; firstEntry = false; @@ -436,7 +436,7 @@ std::string IntervalSet::elementName(const dfa::Vocabulary &vocabulary, ssize_t size_t IntervalSet::size() const { size_t result = 0; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { result += size_t(interval.b - interval.a + 1); } return result; @@ -444,7 +444,7 @@ size_t IntervalSet::size() const { std::vector IntervalSet::toList() const { std::vector result; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { @@ -456,7 +456,7 @@ std::vector IntervalSet::toList() const { std::set IntervalSet::toSet() const { std::set result; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { @@ -468,7 +468,7 @@ std::set IntervalSet::toSet() const { ssize_t IntervalSet::get(size_t i) const { size_t index = 0; - for (auto &interval : _intervals) { + for (const auto &interval : _intervals) { ssize_t a = interval.a; ssize_t b = interval.b; for (ssize_t v = a; v <= b; v++) { diff --git a/runtime/Cpp/runtime/src/misc/MurmurHash.h b/runtime/Cpp/runtime/src/misc/MurmurHash.h index b8b5a5579..598e13d4c 100755 --- a/runtime/Cpp/runtime/src/misc/MurmurHash.h +++ b/runtime/Cpp/runtime/src/misc/MurmurHash.h @@ -13,7 +13,13 @@ namespace misc { class ANTLR4CPP_PUBLIC MurmurHash { private: - static const size_t DEFAULT_SEED = 0; +#if __cplusplus >= 201703L + static constexpr size_t DEFAULT_SEED = 0; +#else + enum : size_t { + DEFAULT_SEED = 0, + }; +#endif /// Initialize the hash using the default seed value. /// Returns the intermediate hash value. diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h index 5db59f6e6..468db9894 100644 --- a/runtime/Cpp/runtime/src/support/Any.h +++ b/runtime/Cpp/runtime/src/support/Any.h @@ -79,7 +79,7 @@ struct ANTLR4CPP_PUBLIC Any if (_ptr == a._ptr) return *this; - auto old_ptr = _ptr; + auto * old_ptr = _ptr; _ptr = a.clone(); if (old_ptr) diff --git a/runtime/Cpp/runtime/src/support/Arrays.cpp b/runtime/Cpp/runtime/src/support/Arrays.cpp index 694e44c8a..b3c4f94f2 100644 --- a/runtime/Cpp/runtime/src/support/Arrays.cpp +++ b/runtime/Cpp/runtime/src/support/Arrays.cpp @@ -16,7 +16,7 @@ std::string Arrays::listToString(const std::vector &list, const std bool firstEntry = true; ss << '['; - for (auto &entry : list) { + for (const auto &entry : list) { ss << entry; if (firstEntry) { ss << separator; @@ -32,7 +32,7 @@ template <> std::string Arrays::toString(const std::vector &source) { std::string result = "["; bool firstEntry = true; - for (auto value : source) { + for (auto *value : source) { result += value->toStringTree(); if (firstEntry) { result += ", "; diff --git a/runtime/Cpp/runtime/src/support/CPPUtils.cpp b/runtime/Cpp/runtime/src/support/CPPUtils.cpp index 2ca43d34d..86a3751a2 100755 --- a/runtime/Cpp/runtime/src/support/CPPUtils.cpp +++ b/runtime/Cpp/runtime/src/support/CPPUtils.cpp @@ -46,7 +46,7 @@ namespace antlrcpp { case ' ': if (escapeSpaces) { - result += "·"; + result += "\u00B7"; break; } // else fall through diff --git a/runtime/Cpp/runtime/src/support/StringUtils.cpp b/runtime/Cpp/runtime/src/support/StringUtils.cpp index 552f1031a..15e7b8368 100644 --- a/runtime/Cpp/runtime/src/support/StringUtils.cpp +++ b/runtime/Cpp/runtime/src/support/StringUtils.cpp @@ -20,16 +20,26 @@ void replaceAll(std::string& str, std::string const& from, std::string const& to } std::string ws2s(std::wstring const& wstr) { +#ifndef USE_UTF8_INSTEAD_OF_CODECVT std::wstring_convert> converter; std::string narrow = converter.to_bytes(wstr); +#else + std::string narrow; + utf8::utf32to8(wstr.begin(), wstr.end(), std::back_inserter(narrow)); +#endif return narrow; } std::wstring s2ws(const std::string &str) { +#ifndef USE_UTF8_INSTEAD_OF_CODECVT std::wstring_convert> converter; std::wstring wide = converter.from_bytes(str); - +#else + std::wstring wide; + utf8::utf8to32(str.begin(), str.end(), std::back_inserter(wide)); +#endif + return wide; } diff --git a/runtime/Cpp/runtime/src/support/StringUtils.h b/runtime/Cpp/runtime/src/support/StringUtils.h index 49715287e..d00cc52d9 100644 --- a/runtime/Cpp/runtime/src/support/StringUtils.h +++ b/runtime/Cpp/runtime/src/support/StringUtils.h @@ -7,43 +7,65 @@ #include "antlr4-common.h" +#ifdef USE_UTF8_INSTEAD_OF_CODECVT +#include "utf8.h" +#endif + namespace antlrcpp { // For all conversions utf8 <-> utf32. + // I wouldn't prefer wstring_convert because: according to + // https://en.cppreference.com/w/cpp/locale/wstring_convert, + // wstring_convert is deprecated in C++17. + // utfcpp (https://github.com/nemtrif/utfcpp) is a substitution. +#ifndef USE_UTF8_INSTEAD_OF_CODECVT // VS 2015 and VS 2017 have different bugs in std::codecvt_utf8 (VS 2013 works fine). -#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - typedef std::wstring_convert, __int32> UTF32Converter; -#else - typedef std::wstring_convert, char32_t> UTF32Converter; + #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 + typedef std::wstring_convert, __int32> UTF32Converter; + #else + typedef std::wstring_convert, char32_t> UTF32Converter; + #endif #endif - + // The conversion functions fails in VS2017, so we explicitly use a workaround. template inline std::string utf32_to_utf8(T const& data) { - // Don't make the converter static or we have to serialize access to it. - thread_local UTF32Converter converter; + #ifndef USE_UTF8_INSTEAD_OF_CODECVT + // Don't make the converter static or we have to serialize access to it. + thread_local UTF32Converter converter; - #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - auto p = reinterpret_cast(data.data()); - return converter.to_bytes(p, p + data.size()); + #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 + const auto p = reinterpret_cast(data.data()); + return converter.to_bytes(p, p + data.size()); + #else + return converter.to_bytes(data); + #endif #else - return converter.to_bytes(data); + std::string narrow; + utf8::utf32to8(data.begin(), data.end(), std::back_inserter(narrow)); + return narrow; #endif } inline UTF32String utf8_to_utf32(const char* first, const char* last) { - thread_local UTF32Converter converter; + #ifndef USE_UTF8_INSTEAD_OF_CODECVT + thread_local UTF32Converter converter; - #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 - auto r = converter.from_bytes(first, last); - i32string s = reinterpret_cast(r.data()); + #if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000 + auto r = converter.from_bytes(first, last); + i32string s = reinterpret_cast(r.data()); + return s; + #else + std::u32string s = converter.from_bytes(first, last); + return s; + #endif #else - std::u32string s = converter.from_bytes(first, last); + UTF32String wide; + utf8::utf8to32(first, last, std::back_inserter(wide)); + return wide; #endif - - return s; } void replaceAll(std::string &str, std::string const& from, std::string const& to); diff --git a/runtime/Cpp/runtime/src/tree/ParseTree.h b/runtime/Cpp/runtime/src/tree/ParseTree.h index ee50b8039..3b91be80e 100755 --- a/runtime/Cpp/runtime/src/tree/ParseTree.h +++ b/runtime/Cpp/runtime/src/tree/ParseTree.h @@ -39,12 +39,12 @@ namespace tree { /// Print out a whole tree, not just a node, in LISP format /// {@code (root child1 .. childN)}. Print just a node if this is a leaf. - virtual std::string toStringTree() = 0; + virtual std::string toStringTree(bool pretty = false) = 0; virtual std::string toString() = 0; /// Specialize toStringTree so that it can print out more information /// based upon the parser. - virtual std::string toStringTree(Parser *parser) = 0; + virtual std::string toStringTree(Parser *parser, bool pretty = false) = 0; virtual bool operator == (const ParseTree &other) const; @@ -88,7 +88,7 @@ namespace tree { } void reset() { - for (auto entry : _allocated) + for (auto * entry : _allocated) delete entry; _allocated.clear(); } diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h index ca3e24180..166ad806b 100755 --- a/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h +++ b/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h @@ -15,15 +15,39 @@ namespace tree { static ParseTreeWalker &DEFAULT; virtual ~ParseTreeWalker(); - + + /** + * + * Performs a walk on the given parse tree starting at the root and going down recursively + * with depth-first search. On each node, is called before + * recursively walking down into child nodes, then + * is called after the recursive call to wind up. + * + * The listener used by the walker to process grammar rules + * The parse tree to be walked on + */ virtual void walk(ParseTreeListener *listener, ParseTree *t) const; protected: - /// The discovery of a rule node, involves sending two events: the generic - /// and a - /// -specific event. First we trigger the generic and then - /// the rule specific. We do them in reverse order upon finishing the node. + + /** + * + * Enters a grammar rule by first triggering the generic event + * then by triggering the event specific to the given parse tree node + * + * The listener responding to the trigger events + * The grammar rule containing the rule context + */ virtual void enterRule(ParseTreeListener *listener, ParseTree *r) const; + + /** + * + * Exits a grammar rule by first triggering the event specific to the given parse tree node + * then by triggering the generic event + * + * The listener responding to the trigger events + * The grammar rule containing the rule context + */ virtual void exitRule(ParseTreeListener *listener, ParseTree *r) const; }; diff --git a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp index 0f806d4e7..7ab121b73 100755 --- a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp +++ b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp @@ -41,7 +41,7 @@ std::string TerminalNodeImpl::getText() { return symbol->getText(); } -std::string TerminalNodeImpl::toStringTree(Parser * /*parser*/) { +std::string TerminalNodeImpl::toStringTree(Parser * /*parser*/, bool /*pretty*/) { return toString(); } @@ -52,6 +52,6 @@ std::string TerminalNodeImpl::toString() { return symbol->getText(); } -std::string TerminalNodeImpl::toStringTree() { +std::string TerminalNodeImpl::toStringTree(bool /*pretty*/) { return toString(); } diff --git a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h index d865c58e8..6f65d8204 100755 --- a/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h +++ b/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h @@ -23,9 +23,9 @@ namespace tree { virtual antlrcpp::Any accept(ParseTreeVisitor *visitor) override; virtual std::string getText() override; - virtual std::string toStringTree(Parser *parser) override; + virtual std::string toStringTree(Parser *parser, bool pretty = false) override; virtual std::string toString() override; - virtual std::string toStringTree() override; + virtual std::string toStringTree(bool pretty = false) override; }; diff --git a/runtime/Cpp/runtime/src/tree/Trees.cpp b/runtime/Cpp/runtime/src/tree/Trees.cpp index fabad0141..34cfb74d7 100755 --- a/runtime/Cpp/runtime/src/tree/Trees.cpp +++ b/runtime/Cpp/runtime/src/tree/Trees.cpp @@ -25,17 +25,17 @@ using namespace antlrcpp; Trees::Trees() { } -std::string Trees::toStringTree(ParseTree *t) { - return toStringTree(t, nullptr); +std::string Trees::toStringTree(ParseTree *t, bool pretty) { + return toStringTree(t, nullptr, pretty); } -std::string Trees::toStringTree(ParseTree *t, Parser *recog) { +std::string Trees::toStringTree(ParseTree *t, Parser *recog, bool pretty) { if (recog == nullptr) - return toStringTree(t, std::vector()); - return toStringTree(t, recog->getRuleNames()); + return toStringTree(t, std::vector(), pretty); + return toStringTree(t, recog->getRuleNames(), pretty); } -std::string Trees::toStringTree(ParseTree *t, const std::vector &ruleNames) { +std::string Trees::toStringTree(ParseTree *t, const std::vector &ruleNames, bool pretty) { std::string temp = antlrcpp::escapeWhitespace(Trees::getNodeText(t, ruleNames), false); if (t->children.empty()) { return temp; @@ -48,6 +48,7 @@ std::string Trees::toStringTree(ParseTree *t, const std::vector &ru std::stack stack; size_t childIndex = 0; ParseTree *run = t; + size_t indentationLevel = 1; while (childIndex < run->children.size()) { if (childIndex > 0) { ss << ' '; @@ -59,6 +60,13 @@ std::string Trees::toStringTree(ParseTree *t, const std::vector &ru stack.push(childIndex); run = child; childIndex = 0; + if (pretty) { + ++indentationLevel; + ss << std::endl; + for (size_t i = 0; i < indentationLevel; ++i) { + ss << " "; + } + } ss << "(" << temp << " "; } else { ss << temp; @@ -68,6 +76,9 @@ std::string Trees::toStringTree(ParseTree *t, const std::vector &ru childIndex = stack.top(); stack.pop(); run = run->parent; + if (pretty) { + --indentationLevel; + } ss << ")"; } else { break; @@ -181,7 +192,7 @@ std::vector Trees::getDescendants(ParseTree *t) { std::size_t n = t->children.size(); for (size_t i = 0 ; i < n ; i++) { auto descentants = getDescendants(t->children[i]); - for (auto entry: descentants) { + for (auto *entry: descentants) { nodes.push_back(entry); } } diff --git a/runtime/Cpp/runtime/src/tree/Trees.h b/runtime/Cpp/runtime/src/tree/Trees.h index e6a1bb88e..d9d04624f 100755 --- a/runtime/Cpp/runtime/src/tree/Trees.h +++ b/runtime/Cpp/runtime/src/tree/Trees.h @@ -18,17 +18,17 @@ namespace tree { /// Print out a whole tree in LISP form. getNodeText is used on the /// node payloads to get the text for the nodes. Detect /// parse trees and extract data appropriately. - static std::string toStringTree(ParseTree *t); + static std::string toStringTree(ParseTree *t, bool pretty = false); /// Print out a whole tree in LISP form. getNodeText is used on the /// node payloads to get the text for the nodes. Detect /// parse trees and extract data appropriately. - static std::string toStringTree(ParseTree *t, Parser *recog); + static std::string toStringTree(ParseTree *t, Parser *recog, bool pretty = false); /// Print out a whole tree in LISP form. getNodeText is used on the /// node payloads to get the text for the nodes. Detect /// parse trees and extract data appropriately. - static std::string toStringTree(ParseTree *t, const std::vector &ruleNames); + static std::string toStringTree(ParseTree *t, const std::vector &ruleNames, bool pretty = false); static std::string getNodeText(ParseTree *t, Parser *recog); static std::string getNodeText(ParseTree *t, const std::vector &ruleNames); diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h index 35cb90ae4..eefde46c8 100755 --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -45,7 +45,6 @@ namespace pattern { const std::map> &labels, ParseTree *mismatchedNode); ParseTreeMatch(ParseTreeMatch const&) = default; virtual ~ParseTreeMatch(); - ParseTreeMatch& operator=(ParseTreeMatch const&) = default; /// /// Get the last node associated with a specific {@code label}. diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp index cfa588f31..50f44c82c 100755 --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp @@ -37,7 +37,7 @@ std::vector ParseTreePattern::findAll(ParseTree *tree, const std xpath::XPath finder(_matcher->getParser(), xpath); std::vector subtrees = finder.evaluate(tree); std::vector matches; - for (auto t : subtrees) { + for (auto *t : subtrees) { ParseTreeMatch aMatch = match(t); if (aMatch.succeeded()) { matches.push_back(aMatch); diff --git a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h index 3df2f633e..d5b86ff47 100755 --- a/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h +++ b/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -30,7 +30,6 @@ namespace pattern { ParseTree *patternTree); ParseTreePattern(ParseTreePattern const&) = default; virtual ~ParseTreePattern(); - ParseTreePattern& operator=(ParseTreePattern const&) = default; /// /// Match a specific parse tree against this tree pattern. diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp index 6dd13832d..c0398962e 100755 --- a/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp @@ -25,11 +25,10 @@ const std::string XPath::NOT = "!"; XPath::XPath(Parser *parser, const std::string &path) { _parser = parser; _path = path; - _elements = split(path); } -std::vector XPath::split(const std::string &path) { - ANTLRFileStream in(path); +std::vector> XPath::split(const std::string &path) { + ANTLRInputStream in(path); XPathLexer lexer(&in); lexer.removeErrorListeners(); XPathLexerErrorListener listener; @@ -44,7 +43,7 @@ std::vector XPath::split(const std::string &path) { } std::vector tokens = tokenStream.getTokens(); - std::vector elements; + std::vector> elements; size_t n = tokens.size(); size_t i = 0; bool done = false; @@ -62,9 +61,9 @@ std::vector XPath::split(const std::string &path) { i++; next = tokens[i]; } - XPathElement pathElement = getXPathElement(next, anywhere); - pathElement.setInvert(invert); - elements.push_back(pathElement); + std::unique_ptr pathElement = getXPathElement(next, anywhere); + pathElement->setInvert(invert); + elements.push_back(std::move(pathElement)); i++; break; @@ -81,25 +80,26 @@ std::vector XPath::split(const std::string &path) { break; default : - throw IllegalArgumentException("Unknow path element " + el->toString()); + throw IllegalArgumentException("Unknown path element " + el->toString()); } } return elements; } -XPathElement XPath::getXPathElement(Token *wordToken, bool anywhere) { +std::unique_ptr XPath::getXPathElement(Token *wordToken, bool anywhere) { if (wordToken->getType() == Token::EOF) { throw IllegalArgumentException("Missing path element at end of path"); } + std::string word = wordToken->getText(); size_t ttype = _parser->getTokenType(word); ssize_t ruleIndex = _parser->getRuleIndex(word); switch (wordToken->getType()) { case XPathLexer::WILDCARD : if (anywhere) - return XPathWildcardAnywhereElement(); - return XPathWildcardElement(); + return std::unique_ptr(new XPathWildcardAnywhereElement()); + return std::unique_ptr(new XPathWildcardElement()); case XPathLexer::TOKEN_REF: case XPathLexer::STRING : @@ -107,35 +107,42 @@ XPathElement XPath::getXPathElement(Token *wordToken, bool anywhere) { throw IllegalArgumentException(word + " at index " + std::to_string(wordToken->getStartIndex()) + " isn't a valid token name"); } if (anywhere) - return XPathTokenAnywhereElement(word, (int)ttype); - return XPathTokenElement(word, (int)ttype); + return std::unique_ptr(new XPathTokenAnywhereElement(word, (int)ttype)); + return std::unique_ptr(new XPathTokenElement(word, (int)ttype)); default : if (ruleIndex == -1) { throw IllegalArgumentException(word + " at index " + std::to_string(wordToken->getStartIndex()) + " isn't a valid rule name"); } if (anywhere) - return XPathRuleAnywhereElement(word, (int)ruleIndex); - return XPathRuleElement(word, (int)ruleIndex); + return std::unique_ptr(new XPathRuleAnywhereElement(word, (int)ruleIndex)); + return std::unique_ptr(new XPathRuleElement(word, (int)ruleIndex)); } } static ParserRuleContext dummyRoot; +std::vector XPath::findAll(ParseTree *tree, std::string const& xpath, Parser *parser) { + XPath p(parser, xpath); + return p.evaluate(tree); +} + std::vector XPath::evaluate(ParseTree *t) { dummyRoot.children = { t }; // don't set t's parent. std::vector work = { &dummyRoot }; size_t i = 0; - while (i < _elements.size()) { + std::vector> elements = split(_path); + + while (i < elements.size()) { std::vector next; - for (auto node : work) { + for (auto *node : work) { if (!node->children.empty()) { // only try to match next element if it has children // e.g., //func/*/stat might have a token node for which // we can't go looking for stat nodes. - auto matching = _elements[i].evaluate(node); + auto matching = elements[i]->evaluate(node); next.insert(next.end(), matching.begin(), matching.end()); } } diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPath.h b/runtime/Cpp/runtime/src/tree/xpath/XPath.h index 07a35921c..e38d482d5 100755 --- a/runtime/Cpp/runtime/src/tree/xpath/XPath.h +++ b/runtime/Cpp/runtime/src/tree/xpath/XPath.h @@ -61,8 +61,10 @@ namespace xpath { XPath(Parser *parser, const std::string &path); virtual ~XPath() {} - // TO_DO: check for invalid token/rule names, bad syntax - virtual std::vector split(const std::string &path); + // TODO: check for invalid token/rule names, bad syntax + virtual std::vector> split(const std::string &path); + + static std::vector findAll(ParseTree *tree, std::string const& xpath, Parser *parser); /// Return a list of all nodes starting at {@code t} as root that satisfy the /// path. The root {@code /} is relative to the node passed to @@ -71,13 +73,12 @@ namespace xpath { protected: std::string _path; - std::vector _elements; Parser *_parser; /// Convert word like {@code *} or {@code ID} or {@code expr} to a path /// element. {@code anywhere} is {@code true} if {@code //} precedes the /// word. - virtual XPathElement getXPathElement(Token *wordToken, bool anywhere); + virtual std::unique_ptr getXPathElement(Token *wordToken, bool anywhere); }; } // namespace xpath diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp index e446e8cbb..1d145fb57 100755 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp @@ -18,7 +18,7 @@ XPathRuleElement::XPathRuleElement(const std::string &ruleName, size_t ruleIndex std::vector XPathRuleElement::evaluate(ParseTree *t) { // return all children of t that match nodeName std::vector nodes; - for (auto c : t->children) { + for (auto *c : t->children) { if (antlrcpp::is(c)) { ParserRuleContext *ctx = dynamic_cast(c); if ((ctx->getRuleIndex() == _ruleIndex && !_invert) || (ctx->getRuleIndex() != _ruleIndex && _invert)) { diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp index 7d53dd5de..d52fc26af 100755 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp @@ -21,7 +21,7 @@ XPathTokenElement::XPathTokenElement(const std::string &tokenName, size_t tokenT std::vector XPathTokenElement::evaluate(ParseTree *t) { // return all children of t that match nodeName std::vector nodes; - for (auto c : t->children) { + for (auto *c : t->children) { if (antlrcpp::is(c)) { TerminalNode *tnode = dynamic_cast(c); if ((tnode->getSymbol()->getType() == _tokenType && !_invert) || (tnode->getSymbol()->getType() != _tokenType && _invert)) { diff --git a/runtime/Dart/.gitignore b/runtime/Dart/.gitignore new file mode 100644 index 000000000..a45b12ec7 --- /dev/null +++ b/runtime/Dart/.gitignore @@ -0,0 +1,23 @@ +!lib + +# See https://www.dartlang.org/guides/libraries/private-files + +# Files and directories created by pub +.dart_tool/ +.packages +build/ +# If you're building an application, you may want to check-in your pubspec.lock +pubspec.lock + +# Directory created by dartdoc +# If you don't generate documentation locally you can remove this line. +doc/api/ + +# Avoid committing generated Javascript files: +*.dart.js +*.info.json # Produced by the --dump-info flag. +*.js # When generated by dart2js. Don't specify *.js if your + # project includes source files written in JavaScript. +*.js_ +*.js.deps +*.js.map \ No newline at end of file diff --git a/runtime/Dart/CHANGELOG.md b/runtime/Dart/CHANGELOG.md new file mode 100644 index 000000000..84522ce54 --- /dev/null +++ b/runtime/Dart/CHANGELOG.md @@ -0,0 +1,4 @@ + +## 4.8.0-dev.2 + +* Initial release \ No newline at end of file diff --git a/runtime/Dart/LICENSE b/runtime/Dart/LICENSE new file mode 100644 index 000000000..2042d1bda --- /dev/null +++ b/runtime/Dart/LICENSE @@ -0,0 +1,52 @@ +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +MIT License for codepointat.js from https://git.io/codepointat +MIT License for fromcodepoint.js from https://git.io/vDW1m + +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/runtime/Dart/README.md b/runtime/Dart/README.md new file mode 100644 index 000000000..3b2b4a78b --- /dev/null +++ b/runtime/Dart/README.md @@ -0,0 +1,11 @@ +# Dart target for ANTLR 4 + +Dart runtime libraries for ANTLR 4 + +This runtime is available through [pub](https://pub.dev). The package name is 'antlr4'. + +See www.antlr.org for more information on ANTLR. + +See https://github.com/antlr/antlr4/blob/master/doc/dart-target.md for more information on using ANTLR in Dart. + + diff --git a/runtime/Dart/analysis_options.yaml b/runtime/Dart/analysis_options.yaml new file mode 100644 index 000000000..108d1058a --- /dev/null +++ b/runtime/Dart/analysis_options.yaml @@ -0,0 +1 @@ +include: package:pedantic/analysis_options.yaml diff --git a/runtime/Dart/lib/antlr4.dart b/runtime/Dart/lib/antlr4.dart new file mode 100644 index 000000000..0a4b4f60e --- /dev/null +++ b/runtime/Dart/lib/antlr4.dart @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +library antlr4; + +export 'src/atn/atn.dart'; +export 'src/dfa/dfa.dart'; +export 'src/tree/tree.dart'; +export 'src/error/error.dart'; +export 'src/rule_context.dart'; +export 'src/input_stream.dart'; +export 'src/token_stream.dart'; +export 'src/lexer.dart'; +export 'src/parser.dart'; +export 'src/parser_rule_context.dart'; +export 'src/vocabulary.dart'; +export 'src/runtime_meta_data.dart'; +export 'src/token.dart'; diff --git a/runtime/Dart/lib/src/atn/atn.dart b/runtime/Dart/lib/src/atn/atn.dart new file mode 100644 index 000000000..a0400f41d --- /dev/null +++ b/runtime/Dart/lib/src/atn/atn.dart @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/atn.dart'; +export 'src/atn_config.dart'; +export 'src/atn_config_set.dart'; +export 'src/atn_deserializer.dart'; +export 'src/atn_simulator.dart'; +export 'src/atn_state.dart'; +export 'src/info.dart'; +export 'src/lexer_action_executor.dart'; +export 'src/lexer_atn_simulator.dart'; +export 'src/parser_atn_simulator.dart'; +export 'src/profiling_atn_simulator.dart'; +export 'src/transition.dart'; diff --git a/runtime/Dart/lib/src/atn/src/atn.dart b/runtime/Dart/lib/src/atn/src/atn.dart new file mode 100644 index 000000000..5c8c4a455 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn.dart @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../ll1_analyzer.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import 'atn_state.dart'; +import 'atn_type.dart'; +import 'lexer_action.dart'; +import 'transition.dart'; + +class ATN { + static final INVALID_ALT_NUMBER = 0; + + List states = []; + + /// Each subrule/rule is a decision point and we must track them so we + /// can go back later and build DFA predictors for them. This includes + /// all the rules, subrules, optional blocks, ()+, ()* etc... + List decisionToState = []; + + /// Maps from rule index to starting state number. + List ruleToStartState; + + /// Maps from rule index to stop state number. + List ruleToStopState; + + Map modeNameToStartState = {}; + + /// The type of the ATN. + final ATNType grammarType; + + /// The maximum value for any symbol recognized by a transition in the ATN. + final int maxTokenType; + + /// For lexer ATNs, this maps the rule index to the resulting token type. + /// For parser ATNs, this maps the rule index to the generated bypass token + /// type if the + /// {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions} + /// deserialization option was specified; otherwise, this is null. + List ruleToTokenType; + + /// For lexer ATNs, this is an array of [LexerAction] objects which may + /// be referenced by action transitions in the ATN. + List lexerActions; + + List modeToStartState = []; + + /// Used for runtime deserialization of ATNs from strings */ + ATN(this.grammarType, this.maxTokenType); + + /// TODO merge doc comment + /// Compute the set of valid tokens that can occur starting in state [s]. + /// If [ctx] is null, the set of tokens will not include what can follow + /// the rule surrounding [s]. In other words, the set will be + /// restricted to tokens reachable staying within [s]'s rule. + /// + /// Compute the set of valid tokens that can occur starting in [s] and + /// staying in same rule. {@link Token#EPSILON} is in set if we reach end of + /// rule. + IntervalSet nextTokens(ATNState s, [RuleContext ctx]) { + if (ctx != null) { + return LL1Analyzer(this).LOOK(s, ctx); + } + if (s.nextTokenWithinRule != null) return s.nextTokenWithinRule; + s.nextTokenWithinRule = LL1Analyzer(this).LOOK(s, null); + s.nextTokenWithinRule.setReadonly(true); + return s.nextTokenWithinRule; + } + + void addState(ATNState state) { + if (state != null) { + state.atn = this; + state.stateNumber = states.length; + } + + states.add(state); + } + + void removeState(ATNState state) { + states[state.stateNumber] = + null; // just free mem, don't shift states in list + } + + int defineDecisionState(DecisionState s) { + decisionToState.add(s); + s.decision = decisionToState.length - 1; + return s.decision; + } + + DecisionState getDecisionState(int decision) { + if (decisionToState.isNotEmpty) { + return decisionToState[decision]; + } + return null; + } + + int get numberOfDecisions { + return decisionToState.length; + } + + /// Computes the set of input symbols which could follow ATN state number + /// [stateNumber] in the specified full [context]. This method + /// considers the complete parser context, but does not evaluate semantic + /// predicates (i.e. all predicates encountered during the calculation are + /// assumed true). If a path in the ATN exists from the starting state to the + /// [RuleStopState] of the outermost context without matching any + /// symbols, {@link Token#EOF} is added to the returned set. + /// + ///

If [context] is null, it is treated as {@link ParserRuleContext#EMPTY}.

+ /// + /// Note that this does NOT give you the set of all tokens that could + /// appear at a given token position in the input phrase. In other words, + /// it does not answer: + /// + /// "Given a specific partial input phrase, return the set of all tokens + /// that can follow the last token in the input phrase." + /// + /// The big difference is that with just the input, the parser could + /// land right in the middle of a lookahead decision. Getting + /// all *possible* tokens given a partial input stream is a separate + /// computation. See https://github.com/antlr/antlr4/issues/1428 + /// + /// For this function, we are specifying an ATN state and call stack to compute + /// what token(s) can come next and specifically: outside of a lookahead decision. + /// That is what you want for error reporting and recovery upon parse error. + /// + /// @param stateNumber the ATN state number + /// @param context the full parse context + /// @return The set of potentially valid input symbols which could follow the + /// specified state in the specified context. + /// @throws IllegalArgumentException if the ATN does not contain a state with + /// number [stateNumber] + IntervalSet getExpectedTokens(int stateNumber, RuleContext context) { + if (stateNumber < 0 || stateNumber >= states.length) { + throw RangeError.index(stateNumber, states, 'stateNumber'); + } + + var ctx = context; + final s = states[stateNumber]; + var following = nextTokens(s); + if (!following.contains(Token.EPSILON)) { + return following; + } + + final expected = IntervalSet(); + expected.addAll(following); + expected.remove(Token.EPSILON); + while (ctx != null && + ctx.invokingState >= 0 && + following.contains(Token.EPSILON)) { + final invokingState = states[ctx.invokingState]; + RuleTransition rt = invokingState.transition(0); + following = nextTokens(rt.followState); + expected.addAll(following); + expected.remove(Token.EPSILON); + ctx = ctx.parent; + } + + if (following.contains(Token.EPSILON)) { + expected.addOne(Token.EOF); + } + + return expected; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_config.dart b/runtime/Dart/lib/src/atn/src/atn_config.dart new file mode 100644 index 000000000..5b7e9ba84 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_config.dart @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../prediction_context.dart'; +import '../../recognizer.dart'; +import '../../util/murmur_hash.dart'; +import 'atn_state.dart'; +import 'lexer_action_executor.dart'; +import 'semantic_context.dart'; + +Map checkParams(params, isCfg) { + if (params == null) { + final result = { + 'state': null, + 'alt': null, + 'context': null, + 'semanticContext': null + }; + if (isCfg) { + result['reachesIntoOuterContext'] = 0; + } + return result; + } else { + final props = {}; + props['state'] = params.state; + props['alt'] = (params.alt == null) ? null : params.alt; + props['context'] = params.context; + props['semanticContext'] = params.semanticContext; + if (isCfg) { + props['reachesIntoOuterContext'] = params.reachesIntoOuterContext ?? 0; + props['precedenceFilterSuppressed'] = + params.precedenceFilterSuppressed ?? false; + } + return props; + } +} + +/// A tuple: (ATN state, predicted alt, syntactic, semantic context). +/// The syntactic context is a graph-structured stack node whose +/// path(s) to the root is the rule invocation(s) +/// chain used to arrive at the state. The semantic context is +/// the tree of semantic predicates encountered before reaching +/// an ATN state. +class ATNConfig { + /// This field stores the bit mask for implementing the + /// {@link #isPrecedenceFilterSuppressed} property as a bit within the + /// existing {@link #reachesIntoOuterContext} field. + static final int SUPPRESS_PRECEDENCE_FILTER = 0x40000000; + + /// The ATN state associated with this configuration */ + ATNState state; + + /// What alt (or lexer rule) is predicted by this configuration */ + int alt; + + /// The stack of invoking states leading to the rule/states associated + /// with this config. We track only those contexts pushed during + /// execution of the ATN simulator. + PredictionContext context; + + /// We cannot execute predicates dependent upon local context unless + /// we know for sure we are in the correct context. Because there is + /// no way to do this efficiently, we simply cannot evaluate + /// dependent predicates unless we are in the rule that initially + /// invokes the ATN simulator. + /// + ///

+ /// closure() tracks the depth of how far we dip into the outer context: + /// depth > 0. Note that it may not be totally accurate depth since I + /// don't ever decrement. TODO: make it a bool then

+ /// + ///

+ /// For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method + /// is also backed by this field. Since the field is ly accessible, the + /// highest bit which would not cause the value to become negative is used to + /// store this field. This choice minimizes the risk that code which only + /// compares this value to 0 would be affected by the new purpose of the + /// flag. It also ensures the performance of the existing [ATNConfig] + /// constructors as well as certain operations like + /// {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are + /// completely unaffected by the change.

+ int reachesIntoOuterContext = 0; + + SemanticContext semanticContext; + + ATNConfig(this.state, this.alt, this.context, + [this.semanticContext = SemanticContext.NONE]); + + ATNConfig.dup(ATNConfig c, + {this.state, this.alt, this.context, this.semanticContext}) { + state = state ?? c.state; + alt = alt ?? c.alt; + context = context ?? c.context; + semanticContext = semanticContext ?? c.semanticContext; + reachesIntoOuterContext = + c.reachesIntoOuterContext ?? reachesIntoOuterContext; + } + + /// This method gets the value of the {@link #reachesIntoOuterContext} field + /// as it existed prior to the introduction of the + /// {@link #isPrecedenceFilterSuppressed} method. + int get outerContextDepth { + return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER; + } + + bool isPrecedenceFilterSuppressed() { + return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0; + } + + void setPrecedenceFilterSuppressed(bool value) { + if (value) { + reachesIntoOuterContext |= 0x40000000; + } else { + reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER; + } + } + + /// An ATN configuration is equal to another if both have + /// the same state, they predict the same alternative, and + /// syntactic/semantic contexts are the same. + @override + bool operator ==(Object other) { + if (other is ATNConfig && other != null) { + return state.stateNumber == other.state.stateNumber && + alt == other.alt && + (context == other.context || + (context != null && context == other.context)) && + semanticContext == other.semanticContext && + isPrecedenceFilterSuppressed() == + other.isPrecedenceFilterSuppressed(); + } + return false; + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, state.stateNumber); + hashCode = MurmurHash.update(hashCode, alt); + hashCode = MurmurHash.update(hashCode, context); + hashCode = MurmurHash.update(hashCode, semanticContext); + hashCode = MurmurHash.finish(hashCode, 4); + return hashCode; + } + + @override + String toString([Recognizer recog, bool showAlt = true]) { + final buf = StringBuffer(); + // if ( state.ruleIndex>=0 ) { + // if ( recog!=null ) buf.write(recog.ruleNames[state.ruleIndex]+":"); + // else buf.write(state.ruleIndex+":"); + // } + buf.write('('); + buf.write(state); + if (showAlt) { + buf.write(','); + buf.write(alt); + } + if (context != null) { + buf.write(',['); + buf.write(context.toString()); + buf.write(']'); + } + if (semanticContext != null && semanticContext != SemanticContext.NONE) { + buf.write(','); + buf.write(semanticContext); + } + if (outerContextDepth > 0) { + buf.write(',up='); + buf.write(outerContextDepth); + } + buf.write(')'); + return buf.toString(); + } +} + +class LexerATNConfig extends ATNConfig { + /// Gets the [LexerActionExecutor] capable of executing the embedded + /// action(s) for the current configuration. + LexerActionExecutor lexerActionExecutor; + + bool passedThroughNonGreedyDecision = false; + + LexerATNConfig(ATNState state, int alt, PredictionContext context, + [this.lexerActionExecutor]) + : super(state, alt, context, SemanticContext.NONE) { + passedThroughNonGreedyDecision = false; + } + + LexerATNConfig.dup(LexerATNConfig c, ATNState state, + {this.lexerActionExecutor, PredictionContext context}) + : super.dup(c, state: state, context: context) { + lexerActionExecutor = lexerActionExecutor ?? c.lexerActionExecutor; + passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state); + } + + bool hasPassedThroughNonGreedyDecision() { + return passedThroughNonGreedyDecision; + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, state.stateNumber); + hashCode = MurmurHash.update(hashCode, alt); + hashCode = MurmurHash.update(hashCode, context); + hashCode = MurmurHash.update(hashCode, semanticContext); + hashCode = + MurmurHash.update(hashCode, passedThroughNonGreedyDecision ? 1 : 0); + hashCode = MurmurHash.update(hashCode, lexerActionExecutor); + hashCode = MurmurHash.finish(hashCode, 6); + return hashCode; + } + + @override + bool operator ==(Object other) { + if (identical(this, other)) { + return true; + } else if (other is LexerATNConfig) { + final lexerOther = other; + if (passedThroughNonGreedyDecision != + lexerOther.passedThroughNonGreedyDecision) { + return false; + } + + if (lexerActionExecutor != lexerOther.lexerActionExecutor) { + return false; + } + + return super == other; + } + return false; + } + + static bool checkNonGreedyDecision(LexerATNConfig source, ATNState target) { + return source.passedThroughNonGreedyDecision || + target is DecisionState && target.nonGreedy; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_config_set.dart b/runtime/Dart/lib/src/atn/src/atn_config_set.dart new file mode 100644 index 000000000..7a70dd925 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_config_set.dart @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; +import 'dart:math'; + +import 'package:collection/collection.dart'; + +import '../../misc/pair.dart'; +import '../../prediction_context.dart'; +import '../../util/bit_set.dart'; +import '../../util/utils.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; + +class ATNConfigSet extends Iterable { + /// Indicates that the set of configurations is read-only. Do not + /// allow any code to manipulate the set; DFA states will point at + /// the sets and they must not change. This does not protect the other + /// fields; in particular, conflictingAlts is set after + /// we've made this readonly. + bool _readOnly = false; + + bool get readOnly => _readOnly; + + set readOnly(bool readOnly) { + _readOnly = readOnly; + if (readOnly) { + configLookup = null; // can't mod, no need for lookup cache + } + } + + /// The reason that we need this is because we don't want the hash map to use + /// the standard hash code and equals. We need all configurations with the same + /// {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles + /// the number of objects associated with ATNConfigs. The other solution is to + /// use a hash table that lets us specify the equals/hashcode operation. + /// + /// All configs but hashed by (s, i, _, pi) not including context. Wiped out + /// when we go readonly as this set becomes a DFA state. + Set configLookup = HashSet(equals: (a, b) { + if (a == null || b == null) return false; + return a.state.stateNumber == b.state.stateNumber && + a.alt == b.alt && + a.semanticContext == b.semanticContext; + }, hashCode: (ATNConfig o) { + var hashCode = 7; + hashCode = 31 * hashCode + o.state.stateNumber; + hashCode = 31 * hashCode + o.alt; + hashCode = 31 * hashCode + o.semanticContext.hashCode; + return hashCode; + }); + + /// Track the elements as they are added to the set; supports get(i) */ + final List configs = []; + + // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation + // TODO: can we track conflicts as they are added to save scanning configs later? + int uniqueAlt = 0; + + /// Currently this is only used when we detect SLL conflict; this does + /// not necessarily represent the ambiguous alternatives. In fact, + /// I should also point out that this seems to include predicated alternatives + /// that have predicates that evaluate to false. Computed in computeTargetState(). + BitSet conflictingAlts; + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this. + bool hasSemanticContext = false; + bool dipsIntoOuterContext = false; + + /// Indicates that this configuration set is part of a full context + /// LL prediction. It will be used to determine how to merge $. With SLL + /// it's a wildcard whereas it is not for LL context merge. + bool fullCtx; + + int cachedHashCode = -1; + + ATNConfigSet([this.fullCtx = true]); + + ATNConfigSet.dup(ATNConfigSet old) { + fullCtx = old.fullCtx; + addAll(old); + uniqueAlt = old.uniqueAlt; + conflictingAlts = old.conflictingAlts; + hasSemanticContext = old.hasSemanticContext; + dipsIntoOuterContext = old.dipsIntoOuterContext; + } + + /// Adding a new config means merging contexts with existing configs for + /// {@code (s, i, pi, _)}, where [s] is the + /// {@link ATNConfig#state}, [i] is the {@link ATNConfig#alt}, and + /// [pi] is the {@link ATNConfig#semanticContext}. We use + /// {@code (s,i,pi)} as key. + /// + ///

This method updates {@link #dipsIntoOuterContext} and + /// {@link #hasSemanticContext} when necessary.

+ bool add(ATNConfig config, + [Map, PredictionContext> + mergeCache]) { + if (readOnly) throw StateError('This set is readonly'); + if (config.semanticContext != SemanticContext.NONE) { + hasSemanticContext = true; + } + if (config.outerContextDepth > 0) { + dipsIntoOuterContext = true; + } + final existing = configLookup.lookup(config) ?? config; + if (identical(existing, config)) { + // we added this new one + cachedHashCode = -1; + configLookup.add(config); + configs.add(config); // track order here + return true; + } + // a previous (s,i,pi,_), merge with it and save result + final rootIsWildcard = !fullCtx; + final merged = PredictionContext.merge( + existing.context, config.context, rootIsWildcard, mergeCache); + // no need to check for existing.context, config.context in cache + // since only way to create new graphs is "call rule" and here. We + // cache at both places. + existing.reachesIntoOuterContext = + max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext); + + // make sure to preserve the precedence filter suppression during the merge + if (config.isPrecedenceFilterSuppressed()) { + existing.setPrecedenceFilterSuppressed(true); + } + + existing.context = merged; // replace context; no need to alt mapping + return true; + } + + /// Return a List holding list of configs */ + List get elements { + return configs; + } + + Set get states { + final states = {}; + for (var i = 0; i < configs.length; i++) { + states.add(configs[i].state); + } + return states; + } + + /// Gets the complete set of represented alternatives for the configuration + /// set. + /// + /// @return the set of represented alternatives in this configuration set + /// + /// @since 4.3 + BitSet get alts { + final alts = BitSet(); + for (var config in configs) { + alts.set(config.alt); + } + return alts; + } + + List get predicates { + final preds = []; + for (var c in configs) { + if (c.semanticContext != SemanticContext.NONE) { + preds.add(c.semanticContext); + } + } + return preds; + } + + ATNConfig get(int i) { + return configs[i]; + } + + void optimizeConfigs(interpreter) { + if (readOnly) throw StateError('This set is readonly'); + + if (configLookup.isEmpty) return; + + for (var config in configs) { +// int before = PredictionContext.getAllContextNodes(config.context).length; + config.context = interpreter.getCachedContext(config.context); +// int after = PredictionContext.getAllContextNodes(config.context).length; +// System.out.println("configs "+before+"->"+after); + } + } + + bool addAll(coll) { + for (ATNConfig c in coll) { + add(c); + } + return false; + } + + @override + bool operator ==(other) { + return identical(this, other) || + (other is ATNConfigSet && + other != null && + ListEquality().equals(configs, other.configs) && + fullCtx == other.fullCtx && + uniqueAlt == other.uniqueAlt && + conflictingAlts == other.conflictingAlts && + hasSemanticContext == other.hasSemanticContext && + dipsIntoOuterContext == other.dipsIntoOuterContext); + } + + @override + int get hashCode { + if (readOnly) { + if (cachedHashCode == -1) { + cachedHashCode = ListEquality().hash(configs); + } + + return cachedHashCode; + } + + return ListEquality().hash(configs); + } + + @override + int get length { + return configs.length; + } + + @override + bool get isEmpty => configs.isEmpty; + + void updateHashCode(hash) { + if (readOnly) { + if (cachedHashCode == -1) { + cachedHashCode = hashCode; + } + hash.update(cachedHashCode); + } else { + hash.update(hashCode); + } + } + + @override + bool contains(Object o) { + if (configLookup == null) { + throw UnsupportedError( + 'This method is not implemented for readonly sets.'); + } + + return configLookup.contains(o); + } + + @override + Iterator get iterator => configs.iterator; + + void clear() { + if (readOnly) throw StateError('This set is readonly'); + configs.clear(); + cachedHashCode = -1; + configLookup.clear(); + } + + @override + String toString() { + final buf = StringBuffer(); + buf.write(arrayToString(elements)); + if (hasSemanticContext) { + buf.write(',hasSemanticContext=$hasSemanticContext'); + } + if (uniqueAlt != ATN.INVALID_ALT_NUMBER) buf.write(',uniqueAlt=$uniqueAlt'); + if (conflictingAlts != null) buf.write(',conflictingAlts=$conflictingAlts'); + if (dipsIntoOuterContext) buf.write(',dipsIntoOuterContext'); + return buf.toString(); + } +} + +class OrderedATNConfigSet extends ATNConfigSet { + @override + final configLookup = {}; +} diff --git a/runtime/Dart/lib/src/atn/src/atn_deserializer.dart b/runtime/Dart/lib/src/atn/src/atn_deserializer.dart new file mode 100644 index 000000000..fbbec6b0c --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_deserializer.dart @@ -0,0 +1,809 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../misc/pair.dart'; +import '../../token.dart'; +import 'atn.dart'; +import 'atn_state.dart'; +import 'atn_type.dart'; +import 'lexer_action.dart'; +import 'transition.dart'; + +class ATNDeserializationOptions { + static final ATNDeserializationOptions defaultOptions = + ATNDeserializationOptions()..makeReadOnly(); + + bool readOnly; + bool verifyATN; + bool generateRuleBypassTransitions; + + ATNDeserializationOptions([ATNDeserializationOptions options]) { + if (options == null) { + verifyATN = true; + generateRuleBypassTransitions = false; + } else { + verifyATN = options.verifyATN; + generateRuleBypassTransitions = + options.generateRuleBypassTransitions; + } + } + + bool isReadOnly() { + return readOnly; + } + + void makeReadOnly() { + readOnly = true; + } + + bool isVerifyATN() { + return verifyATN; + } + + void setVerifyATN(bool verifyATN) { + throwIfReadOnly(); + this.verifyATN = verifyATN; + } + + bool isGenerateRuleBypassTransitions() { + return generateRuleBypassTransitions; + } + + void setGenerateRuleBypassTransitions(bool generateRuleBypassTransitions) { + throwIfReadOnly(); + this.generateRuleBypassTransitions = generateRuleBypassTransitions; + } + + void throwIfReadOnly() { + if (isReadOnly()) { + throw StateError('The object is read only.'); + } + } +} + +class ATNDeserializer { + /// This value should never change. Updates following this version are + /// reflected as change in the unique ID SERIALIZED_UUID. + static final SERIALIZED_VERSION = 3; + + /** WARNING: DO NOT MERGE THESE LINES. If UUIDs differ during a merge, + * resolve the conflict by generating a new ID! + */ + /// This is the earliest supported serialized UUID. + static final BASE_SERIALIZED_UUID = '33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3'; + + /// This UUID indicates an extension of {@link BASE_SERIALIZED_UUID} for the + /// addition of precedence predicates. + static final ADDED_PRECEDENCE_TRANSITIONS = + '1DA0C57D-6C06-438A-9B27-10BCB3CE0F61'; + + /// This UUID indicates an extension of {@link #ADDED_PRECEDENCE_TRANSITIONS} + /// for the addition of lexer actions encoded as a sequence of + /// [LexerAction] instances. + static final ADDED_LEXER_ACTIONS = 'AADB8D7E-AEEF-4415-AD2B-8204D6CF042E'; + + /// This UUID indicates the serialized ATN contains two sets of + /// IntervalSets, where the second set's values are encoded as + /// 32-bit integers to support the full Unicode SMP range up to U+10FFFF. + static final ADDED_UNICODE_SMP = '59627784-3BE5-417A-B9EB-8131A7286089'; + + /// This list contains all of the currently supported UUIDs, ordered by when + /// the feature first appeared in this branch. + static final SUPPORTED_UUIDS = [ + BASE_SERIALIZED_UUID, + ADDED_PRECEDENCE_TRANSITIONS, + ADDED_LEXER_ACTIONS, + ADDED_UNICODE_SMP + ]; + + /// This is the current serialized UUID. + static final SERIALIZED_UUID = ADDED_UNICODE_SMP; + + ATNDeserializationOptions deserializationOptions; + List data; + var pos; + String uuid; + + ATNDeserializer([options]) { + deserializationOptions = + options ?? ATNDeserializationOptions.defaultOptions; + } + + /// Determines if a particular serialized representation of an ATN supports + /// a particular feature, identified by the [UUID] used for serializing + /// the ATN at the time the feature was first introduced. + /// + /// @param feature The [UUID] marking the first time the feature was + /// supported in the serialized ATN. + /// @param actualUuid The [UUID] of the actual serialized ATN which is + /// currently being deserialized. + /// @return [true] if the [actualUuid] value represents a + /// serialized ATN at or after the feature identified by [feature] was + /// introduced; otherwise, [false]. + bool isFeatureSupported(feature, actualUuid) { + final idx1 = SUPPORTED_UUIDS.indexOf(feature); + if (idx1 < 0) { + return false; + } + final idx2 = SUPPORTED_UUIDS.indexOf(actualUuid); + return idx2 >= idx1; + } + + ATN deserialize(List data) { + reset(data); + checkVersion(); + checkUUID(); + final atn = readATN(); + readStates(atn); + readRules(atn); + readModes(atn); + final sets = []; + // First, deserialize sets with 16-bit arguments <= U+FFFF. + readSets(atn, sets, () => readInt()); + // Next, if the ATN was serialized with the Unicode SMP feature, + // deserialize sets with 32-bit arguments <= U+10FFFF. + if (isFeatureSupported(ADDED_UNICODE_SMP, uuid)) { + readSets(atn, sets, () => readInt32()); + } + readEdges(atn, sets); + readDecisions(atn); + readLexerActions(atn); + markPrecedenceDecisions(atn); + verifyATN(atn); + if (deserializationOptions.generateRuleBypassTransitions && + atn.grammarType == ATNType.PARSER) { + generateRuleBypassTransitions(atn); + // re-verify after modification + verifyATN(atn); + } + return atn; + } + + /// Each char value in data is shifted by +2 at the entry to this method. + /// This is an encoding optimization targeting the serialized values 0 + /// and -1 (serialized to 0xFFFF), each of which are very common in the + /// serialized form of the ATN. In the modified UTF-8 that Java uses for + /// compiled string literals, these two character values have multi-byte + /// forms. By shifting each value by +2, they become characters 2 and 1 + /// prior to writing the string, each of which have single-byte + /// representations. Since the shift occurs in the tool during ATN + /// serialization, each target is responsible for adjusting the values + /// during deserialization. + /// + /// As a special case, note that the first element of data is not + /// adjusted because it contains the major version number of the + /// serialized ATN, which was fixed at 3 at the time the value shifting + /// was implemented. + void reset(List data) { + final adjust = (int c) { + final v = c; + return v > 1 ? v - 2 : v + 65534; + }; + final temp = data.map(adjust).toList(); + // don't adjust the first value since that's the version number + temp[0] = data[0]; + this.data = temp; + pos = 0; + } + + void checkVersion() { + final version = readInt(); + if (version != SERIALIZED_VERSION) { + throw ('Could not deserialize ATN with version $version (expected $SERIALIZED_VERSION).'); + } + } + + void checkUUID() { + final uuid = readUUID(); + if (!SUPPORTED_UUIDS.contains(uuid)) { + throw ('Could not deserialize ATN with UUID: $uuid (expected $SERIALIZED_UUID or a legacy UUID).'); + } + this.uuid = uuid; + } + + ATN readATN() { + final grammarType = readInt(); + final maxTokenType = readInt(); + return ATN(ATNType.values[grammarType], maxTokenType); + } + + void readStates(ATN atn) { + final loopBackStateNumbers = >[]; + final endStateNumbers = >[]; + final nstates = readInt(); + for (var i = 0; i < nstates; i++) { + final stype = StateType.values[readInt()]; + // ignore bad type of states + if (stype == StateType.INVALID_TYPE) { + atn.addState(null); + continue; + } + + var ruleIndex = readInt(); + if (ruleIndex == 0xFFFF) { + ruleIndex = -1; + } + + final s = stateFactory(stype, ruleIndex); + if (s is LoopEndState) { + // special case + final loopBackStateNumber = readInt(); + loopBackStateNumbers.add(Pair(s, loopBackStateNumber)); + } else if (s is BlockStartState) { + final endStateNumber = readInt(); + endStateNumbers.add(Pair(s, endStateNumber)); + } + atn.addState(s); + } + + // delay the assignment of loop back and end states until we know all the state instances have been initialized + for (final pair in loopBackStateNumbers) { + pair.a.loopBackState = atn.states[pair.b]; + } + + for (final pair in endStateNumbers) { + pair.a.endState = atn.states[pair.b] as BlockEndState; + } + + final numNonGreedyStates = readInt(); + for (var i = 0; i < numNonGreedyStates; i++) { + final stateNumber = readInt(); + (atn.states[stateNumber] as DecisionState).nonGreedy = true; + } + if (isFeatureSupported(ADDED_PRECEDENCE_TRANSITIONS, uuid)) { + final numPrecedenceStates = readInt(); + for (var i = 0; i < numPrecedenceStates; i++) { + final stateNumber = readInt(); + (atn.states[stateNumber] as RuleStartState).isLeftRecursiveRule = true; + } + } + } + + void readRules(ATN atn) { + final nrules = readInt(); + if (atn.grammarType == ATNType.LEXER) { + atn.ruleToTokenType = List(nrules); + } + + atn.ruleToStartState = List(nrules); + for (var i = 0; i < nrules; i++) { + final s = readInt(); + RuleStartState startState = atn.states[s]; + atn.ruleToStartState[i] = startState; + if (atn.grammarType == ATNType.LEXER) { + var tokenType = readInt(); + if (tokenType == 0xFFFF) { + tokenType = Token.EOF; + } + + atn.ruleToTokenType[i] = tokenType; + + if (!isFeatureSupported(ADDED_LEXER_ACTIONS, uuid)) { + // this piece of unused metadata was serialized prior to the + // addition of LexerAction + final actionIndexIgnored = readInt(); + } + } + } + + atn.ruleToStopState = List(nrules); + for (var state in atn.states) { + if (!(state is RuleStopState)) { + continue; + } + + RuleStopState stopState = state; + atn.ruleToStopState[state.ruleIndex] = stopState; + atn.ruleToStartState[state.ruleIndex].stopState = stopState; + } + } + + void readModes(ATN atn) { + final nmodes = readInt(); + for (var i = 0; i < nmodes; i++) { + final s = readInt(); + atn.modeToStartState.add(atn.states[s] as TokensStartState); + } + } + + void readSets(ATN atn, List sets, readUnicode) { + final nsets = readInt(); + for (var i = 0; i < nsets; i++) { + final nintervals = readInt(); + final set = IntervalSet(); + sets.add(set); + + final containsEof = readInt() != 0; + if (containsEof) { + set.addOne(-1); + } + + for (var j = 0; j < nintervals; j++) { + int a = readUnicode(); + int b = readUnicode(); + set.addRange(a, b); + } + } + } + + void readEdges(ATN atn, sets) { + final nedges = readInt(); + for (var i = 0; i < nedges; i++) { + final src = readInt(); + final trg = readInt(); + final ttype = TransitionType.values[readInt()]; + final arg1 = readInt(); + final arg2 = readInt(); + final arg3 = readInt(); + final trans = + edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets); +// System.out.println("EDGE "+trans.getClass().getSimpleName()+" "+ +// src+"->"+trg+ +// " "+Transition.serializationNames[ttype]+ +// " "+arg1+","+arg2+","+arg3); + final srcState = atn.states[src]; + srcState.addTransition(trans); + } + + // edges for rule stop states can be derived, so they aren't serialized + for (var state in atn.states) { + for (var i = 0; i < state.numberOfTransitions; i++) { + final t = state.transition(i); + if (t is RuleTransition) { + final ruleTransition = t; + var outermostPrecedenceReturn = -1; + if (atn.ruleToStartState[ruleTransition.target.ruleIndex] + .isLeftRecursiveRule) { + if (ruleTransition.precedence == 0) { + outermostPrecedenceReturn = ruleTransition.target.ruleIndex; + } + } + + final returnTransition = EpsilonTransition( + ruleTransition.followState, outermostPrecedenceReturn); + atn.ruleToStopState[ruleTransition.target.ruleIndex] + .addTransition(returnTransition); + } + } + } + + for (var state in atn.states) { + if (state is BlockStartState) { + // we need to know the end state to set its start state + if (state.endState == null) { + throw StateError(''); + } + + // block end states can only be associated to a single block start state + if (state.endState.startState != null) { + throw StateError(''); + } + + state.endState.startState = state; + } + + if (state is PlusLoopbackState) { + final loopbackState = state; + for (var i = 0; i < loopbackState.numberOfTransitions; i++) { + final target = loopbackState.transition(i).target; + if (target is PlusBlockStartState) { + target.loopBackState = loopbackState; + } + } + } else if (state is StarLoopbackState) { + final loopbackState = state; + for (var i = 0; i < loopbackState.numberOfTransitions; i++) { + final target = loopbackState.transition(i).target; + if (target is StarLoopEntryState) { + target.loopBackState = loopbackState; + } + } + } + } + } + + void readDecisions(ATN atn) { + final ndecisions = readInt(); + for (var i = 1; i <= ndecisions; i++) { + final s = readInt(); + DecisionState decState = atn.states[s]; + atn.decisionToState.add(decState); + decState.decision = i - 1; + } + } + + void readLexerActions(ATN atn) { + if (atn.grammarType == ATNType.LEXER) { + if (isFeatureSupported(ADDED_LEXER_ACTIONS, uuid)) { + atn.lexerActions = List(readInt()); + for (var i = 0; i < atn.lexerActions.length; i++) { + final actionType = LexerActionType.values[readInt()]; + var data1 = readInt(); + if (data1 == 0xFFFF) { + data1 = -1; + } + + var data2 = readInt(); + if (data2 == 0xFFFF) { + data2 = -1; + } + final lexerAction = + lexerActionFactory(actionType, data1, data2); + + atn.lexerActions[i] = lexerAction; + } + } else { + // for compatibility with older serialized ATNs, convert the old + // serialized action index for action transitions to the new + // form, which is the index of a LexerCustomAction + final legacyLexerActions = []; + for (var state in atn.states) { + for (var i = 0; i < state.numberOfTransitions; i++) { + final transition = state.transition(i); + if (transition is ActionTransition) { + final ruleIndex = transition.ruleIndex; + final actionIndex = transition.actionIndex; + final lexerAction = + LexerCustomAction(ruleIndex, actionIndex); + state.setTransition( + i, + ActionTransition(transition.target, ruleIndex, + legacyLexerActions.length, false)); + legacyLexerActions.add(lexerAction); + } + } + } + + atn.lexerActions = legacyLexerActions; + } + } + } + + void generateRuleBypassTransitions(ATN atn) { + for (var i = 0; i < atn.ruleToStartState.length; i++) { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1; + } + for (var i = 0; i < atn.ruleToStartState.length; i++) { + generateRuleBypassTransition(atn, i); + } + } + + void generateRuleBypassTransition(ATN atn, int idx) { + final bypassStart = BasicBlockStartState(); + bypassStart.ruleIndex = idx; + atn.addState(bypassStart); + + final bypassStop = BlockEndState(); + bypassStop.ruleIndex = idx; + atn.addState(bypassStop); + + bypassStart.endState = bypassStop; + atn.defineDecisionState(bypassStart); + + bypassStop.startState = bypassStart; + + ATNState endState; + Transition excludeTransition; + if (atn.ruleToStartState[idx].isLeftRecursiveRule) { + // wrap from the beginning of the rule to the StarLoopEntryState + endState = null; + for (var state in atn.states) { + if (state.ruleIndex != idx) { + continue; + } + + if (!(state is StarLoopEntryState)) { + continue; + } + + final maybeLoopEndState = + state.transition(state.numberOfTransitions - 1).target; + if (!(maybeLoopEndState is LoopEndState)) { + continue; + } + + if (maybeLoopEndState.epsilonOnlyTransitions && + maybeLoopEndState.transition(0).target is RuleStopState) { + endState = state; + break; + } + } + + if (endState == null) { + throw UnsupportedError( + "Couldn't identify final state of the precedence rule prefix section."); + } + + excludeTransition = + (endState as StarLoopEntryState).loopBackState.transition(0); + } else { + endState = atn.ruleToStopState[idx]; + } + + // all non-excluded transitions that currently target end state need to target blockEnd instead + for (var state in atn.states) { + for (var transition in state.transitions) { + if (transition == excludeTransition) { + continue; + } + + if (transition.target == endState) { + transition.target = bypassStop; + } + } + } + + // all transitions leaving the rule start state need to leave blockStart instead + while (atn.ruleToStartState[idx].numberOfTransitions > 0) { + final transition = atn.ruleToStartState[idx].removeTransition( + atn.ruleToStartState[idx].numberOfTransitions - 1); + bypassStart.addTransition(transition); + } + + // link the new states + atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart)); + bypassStop.addTransition(EpsilonTransition(endState)); + + ATNState matchState = BasicState(); + atn.addState(matchState); + matchState.addTransition( + AtomTransition(bypassStop, atn.ruleToTokenType[idx])); + bypassStart.addTransition(EpsilonTransition(matchState)); + } + + /// Analyze the [StarLoopEntryState] states in the specified ATN to set + /// the {@link StarLoopEntryState#isPrecedenceDecision} field to the + /// correct value. + /// + /// @param atn The ATN. + void markPrecedenceDecisions(ATN atn) { + for (var state in atn.states) { + if (state is StarLoopEntryState) { + /* We analyze the ATN to determine if this ATN decision state is the + * decision for the closure block that determines whether a + * precedence rule should continue or complete. + */ + if (atn.ruleToStartState[state.ruleIndex].isLeftRecursiveRule) { + final maybeLoopEndState = + state.transition(state.numberOfTransitions - 1).target; + if (maybeLoopEndState is LoopEndState) { + if (maybeLoopEndState.epsilonOnlyTransitions && + maybeLoopEndState.transition(0).target is RuleStopState) { + state.isPrecedenceDecision = true; + } + } + } + } + } + } + + void verifyATN(ATN atn) { + // verify assumptions + for (var state in atn.states) { + if (state == null) { + continue; + } + + checkCondition(state.onlyHasEpsilonTransitions() || + state.numberOfTransitions <= 1); + + if (state is PlusBlockStartState) { + checkCondition(state.loopBackState != null); + } + + if (state is StarLoopEntryState) { + final starLoopEntryState = state; + checkCondition(starLoopEntryState.loopBackState != null); + checkCondition(starLoopEntryState.numberOfTransitions == 2); + + if (starLoopEntryState.transition(0).target is StarBlockStartState) { + checkCondition( + starLoopEntryState.transition(1).target is LoopEndState); + checkCondition(!starLoopEntryState.nonGreedy); + } else if (starLoopEntryState.transition(0).target is LoopEndState) { + checkCondition( + starLoopEntryState.transition(1).target is StarBlockStartState); + checkCondition(starLoopEntryState.nonGreedy); + } else { + throw StateError(''); + } + } + + if (state is StarLoopbackState) { + checkCondition(state.numberOfTransitions == 1); + checkCondition(state.transition(0).target is StarLoopEntryState); + } + + if (state is LoopEndState) { + checkCondition(state.loopBackState != null); + } + + if (state is RuleStartState) { + checkCondition(state.stopState != null); + } + + if (state is BlockStartState) { + checkCondition(state.endState != null); + } + + if (state is BlockEndState) { + checkCondition(state.startState != null); + } + + if (state is DecisionState) { + final decisionState = state; + checkCondition(decisionState.numberOfTransitions <= 1 || + decisionState.decision >= 0); + } else { + checkCondition( + state.numberOfTransitions <= 1 || state is RuleStopState); + } + } + } + + void checkCondition(bool condition, [String message = '']) { + if (!condition) { + throw StateError(message); + } + } + + int readInt() { + return data[pos++]; + } + + int readInt32() { + final low = readInt(); + final high = readInt(); + return low | (high << 16); + } + + int readLong() { + final low = readInt32(); + final high = readInt32(); + return (low & 0x00000000FFFFFFFF) | (high << 32); + } + + static final byteToHex = List.generate(256, (i) => i.toRadixString(16).padLeft(2, '0').toUpperCase()); + + String readUUID() { + final bb = List(16); + for (var i = 7; i >= 0; i--) { + final int = readInt(); + /* jshint bitwise: false */ + bb[(2 * i) + 1] = int & 0xFF; + bb[2 * i] = (int >> 8) & 0xFF; + } + return byteToHex[bb[0]] + byteToHex[bb[1]] + + byteToHex[bb[2]] + byteToHex[bb[3]] + '-' + + byteToHex[bb[4]] + byteToHex[bb[5]] + '-' + + byteToHex[bb[6]] + byteToHex[bb[7]] + '-' + + byteToHex[bb[8]] + byteToHex[bb[9]] + '-' + + byteToHex[bb[10]] + byteToHex[bb[11]] + + byteToHex[bb[12]] + byteToHex[bb[13]] + + byteToHex[bb[14]] + byteToHex[bb[15]]; + } + + Transition edgeFactory(ATN atn, TransitionType type, int src, int trg, + int arg1, int arg2, int arg3, List sets) { + final target = atn.states[trg]; + switch (type) { + case TransitionType.EPSILON: + return EpsilonTransition(target); + case TransitionType.RANGE: + return arg3 != 0 + ? RangeTransition(target, Token.EOF, arg2) + : RangeTransition(target, arg1, arg2); + case TransitionType.RULE: + final rt = + RuleTransition(atn.states[arg1], arg2, arg3, target); + return rt; + case TransitionType.PREDICATE: + final pt = + PredicateTransition(target, arg1, arg2, arg3 != 0); + return pt; + case TransitionType.PRECEDENCE: + return PrecedencePredicateTransition(target, arg1); + case TransitionType.ATOM: + return arg3 != 0 + ? AtomTransition(target, Token.EOF) + : AtomTransition(target, arg1); + case TransitionType.ACTION: + final a = + ActionTransition(target, arg1, arg2, arg3 != 0); + return a; + case TransitionType.SET: + return SetTransition(target, sets[arg1]); + case TransitionType.NOT_SET: + return NotSetTransition(target, sets[arg1]); + case TransitionType.WILDCARD: + return WildcardTransition(target); + case TransitionType.INVALID: + throw ArgumentError.value(type, 'transition type', 'not valid.'); + default: + throw ArgumentError.value(type, 'transition type', 'not valid.'); + } + } + + ATNState stateFactory(StateType type, int ruleIndex) { + ATNState s; + switch (type) { + case StateType.INVALID_TYPE: + return null; + case StateType.BASIC: + s = BasicState(); + break; + case StateType.RULE_START: + s = RuleStartState(); + break; + case StateType.BLOCK_START: + s = BasicBlockStartState(); + break; + case StateType.PLUS_BLOCK_START: + s = PlusBlockStartState(); + break; + case StateType.STAR_BLOCK_START: + s = StarBlockStartState(); + break; + case StateType.TOKEN_START: + s = TokensStartState(); + break; + case StateType.RULE_STOP: + s = RuleStopState(); + break; + case StateType.BLOCK_END: + s = BlockEndState(); + break; + case StateType.STAR_LOOP_BACK: + s = StarLoopbackState(); + break; + case StateType.STAR_LOOP_ENTRY: + s = StarLoopEntryState(); + break; + case StateType.PLUS_LOOP_BACK: + s = PlusLoopbackState(); + break; + case StateType.LOOP_END: + s = LoopEndState(); + break; + default: + throw ArgumentError.value(type, 'state type', 'not valid.'); + } + + s.ruleIndex = ruleIndex; + return s; + } + + LexerAction lexerActionFactory(LexerActionType type, int data1, int data2) { + switch (type) { + case LexerActionType.CHANNEL: + return LexerChannelAction(data1); + + case LexerActionType.CUSTOM: + return LexerCustomAction(data1, data2); + + case LexerActionType.MODE: + return LexerModeAction(data1); + + case LexerActionType.MORE: + return LexerMoreAction.INSTANCE; + + case LexerActionType.POP_MODE: + return LexerPopModeAction.INSTANCE; + + case LexerActionType.PUSH_MODE: + return LexerPushModeAction(data1); + + case LexerActionType.SKIP: + return LexerSkipAction.INSTANCE; + + case LexerActionType.TYPE: + return LexerTypeAction(data1); + default: + throw ArgumentError.value(type, 'lexer action type', 'not valid.'); + } + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_simulator.dart b/runtime/Dart/lib/src/atn/src/atn_simulator.dart new file mode 100644 index 000000000..0af0d4b16 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_simulator.dart @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../dfa/dfa.dart'; +import '../../prediction_context.dart'; +import 'atn.dart'; +import 'atn_config_set.dart'; + +abstract class ATNSimulator { + /// Must distinguish between missing edge and edge we know leads nowhere */ + + static final DFAState ERROR = + DFAState(stateNumber: 0x7FFFFFFF, configs: ATNConfigSet()); + + final ATN atn; + + /// The context cache maps all PredictionContext objects that are equals() + /// to a single cached copy. This cache is shared across all contexts + /// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + /// to use only cached nodes/graphs in addDFAState(). We don't want to + /// fill this during closure() since there are lots of contexts that + /// pop up but are not used ever again. It also greatly slows down closure(). + /// + ///

This cache makes a huge difference in memory and a little bit in speed. + /// For the Java grammar on java.*, it dropped the memory requirements + /// at the end from 25M to 16M. We don't store any of the full context + /// graphs in the DFA because they are limited to local context only, + /// but apparently there's a lot of repetition there as well. We optimize + /// the config contexts before storing the config set in the DFA states + /// by literally rebuilding them with cached subgraphs only.

+ /// + ///

I tried a cache for use during closure operations, that was + /// whacked after each adaptivePredict(). It cost a little bit + /// more time I think and doesn't save on the overall footprint + /// so it's not worth the complexity.

+ final PredictionContextCache sharedContextCache; + + ATNSimulator(this.atn, this.sharedContextCache); + + void reset(); + + /// Clear the DFA cache used by the current instance. Since the DFA cache may + /// be shared by multiple ATN simulators, this method may affect the + /// performance (but not accuracy) of other parsers which are being used + /// concurrently. + /// + /// @throws UnsupportedOperationException if the current instance does not + /// support clearing the DFA. + /// + /// @since 4.3 + void clearDFA() { + throw UnsupportedError( + 'This ATN simulator does not support clearing the DFA.'); + } + + PredictionContext getCachedContext(PredictionContext context) { + if (sharedContextCache == null) return context; + + final visited = {}; + return PredictionContext.getCachedContext( + context, sharedContextCache, visited); + } +} + +/// Used to cache [PredictionContext] objects. Its used for the shared +/// context cash associated with contexts in DFA states. This cache +/// can be used for both lexers and parsers. +class PredictionContextCache { + final cache = {}; + + /// Add a context to the cache and return it. If the context already exists, + /// return that one instead and do not add a new context to the cache. + /// Protect shared cache from unsafe thread access. + PredictionContext add(PredictionContext ctx) { + if (ctx == PredictionContext.EMPTY) return PredictionContext.EMPTY; + final existing = cache[ctx]; + if (existing != null) { +// System.out.println(name+" reuses "+existing); + return existing; + } + cache[ctx] = ctx; + return ctx; + } + + PredictionContext operator [](PredictionContext ctx) { + return cache[ctx]; + } + + int get length { + return cache.length; + } +} diff --git a/runtime/Dart/lib/src/atn/src/atn_state.dart b/runtime/Dart/lib/src/atn/src/atn_state.dart new file mode 100644 index 000000000..340e97208 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_state.dart @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../interval_set.dart'; +import 'atn.dart'; +import 'transition.dart'; + +var INITIAL_NUM_TRANSITIONS = 4; + +enum StateType { + INVALID_TYPE, + BASIC, + RULE_START, + BLOCK_START, + PLUS_BLOCK_START, + STAR_BLOCK_START, + TOKEN_START, + RULE_STOP, + BLOCK_END, + STAR_LOOP_BACK, + STAR_LOOP_ENTRY, + PLUS_LOOP_BACK, + LOOP_END, +} + +/// The following images show the relation of states and +/// {@link ATNState#transitions} for various grammar constructs. +/// +///
    +/// +///
  • Solid edges marked with an ε indicate a required +/// [EpsilonTransition].
  • +/// +///
  • Dashed edges indicate locations where any transition derived from +/// [Transition] might appear.
  • +/// +///
  • Dashed nodes are place holders for either a sequence of linked +/// [BasicState] states or the inclusion of a block representing a nested +/// construct in one of the forms below.
  • +/// +///
  • Nodes showing multiple outgoing alternatives with a {@code ...} support +/// any number of alternatives (one or more). Nodes without the {@code ...} only +/// support the exact number of alternatives shown in the diagram.
  • +/// +///
+/// +///

Basic Blocks

+/// +///

Rule

+/// +/// +/// +///

Block of 1 or more alternatives

+/// +/// +/// +///

Greedy Loops

+/// +///

Greedy Closure: {@code (...)*}

+/// +/// +/// +///

Greedy Positive Closure: {@code (...)+}

+/// +/// +/// +///

Greedy Optional: {@code (...)?}

+/// +/// +/// +///

Non-Greedy Loops

+/// +///

Non-Greedy Closure: {@code (...)*?}

+/// +/// +/// +///

Non-Greedy Positive Closure: {@code (...)+?}

+/// +/// +/// +///

Non-Greedy Optional: {@code (...)??}

+/// +/// +abstract class ATNState { + static final int INITIAL_NUM_TRANSITIONS = 4; + + static final int INVALID_STATE_NUMBER = -1; + + /// Which ATN are we in? */ + ATN atn; + + int stateNumber = INVALID_STATE_NUMBER; + + int ruleIndex; // at runtime, we don't have Rule objects + + bool epsilonOnlyTransitions = false; + + /// Track the transitions emanating from this ATN state. */ + List transitions = []; + + /// Used to cache lookahead during parsing, not used during construction */ + IntervalSet nextTokenWithinRule; + + @override + int get hashCode { + return stateNumber; + } + + @override + bool operator ==(Object o) { + // are these states same object? + if (o is ATNState) return stateNumber == o.stateNumber; + return false; + } + + bool isNonGreedyExitState() { + return false; + } + + @override + String toString() { + return stateNumber.toString(); + } + + int get numberOfTransitions { + return transitions.length; + } + + void addTransition(Transition e) { + addTransitionAt(transitions.length, e); + } + + void addTransitionAt(int index, Transition e) { + if (transitions.isEmpty) { + epsilonOnlyTransitions = e.isEpsilon; + } else if (epsilonOnlyTransitions != e.isEpsilon) { + log('ATN state $stateNumber has both epsilon and non-epsilon transitions.\n', + level: Level.SEVERE.value); + epsilonOnlyTransitions = false; + } + + var alreadyPresent = false; + for (var t in transitions) { + if (t.target.stateNumber == e.target.stateNumber) { + if (t.label != null && e.label != null && t.label == e.label) { +// System.err.println("Repeated transition upon "+e.label()+" from "+stateNumber+"->"+t.target.stateNumber); + alreadyPresent = true; + break; + } else if (t.isEpsilon && e.isEpsilon) { +// System.err.println("Repeated epsilon transition from "+stateNumber+"->"+t.target.stateNumber); + alreadyPresent = true; + break; + } + } + } + if (!alreadyPresent) { + transitions.insert(index, e); + } + } + + Transition transition(int i) { + return transitions[i]; + } + + void setTransition(int i, Transition e) { + transitions[i] = e; + } + + Transition removeTransition(int index) { + return transitions.removeAt(index); + } + + StateType get stateType; + + bool onlyHasEpsilonTransitions() => epsilonOnlyTransitions; + + void setRuleIndex(int ruleIndex) { + this.ruleIndex = ruleIndex; + } +} + +class BasicState extends ATNState { + @override + StateType get stateType => StateType.BASIC; +} + +class RuleStartState extends ATNState { + var stopState; + var isLeftRecursiveRule = false; + + @override + StateType get stateType => StateType.RULE_START; +} + +abstract class DecisionState extends ATNState { + int decision = 0; + bool nonGreedy = false; +} + +// The start of a regular {@code (...)} block. +abstract class BlockStartState extends DecisionState { + BlockEndState endState; +} + +class BasicBlockStartState extends BlockStartState { + @override + StateType get stateType => StateType.BLOCK_START; +} + +/// Start of {@code (A|B|...)+} loop. Technically a decision state, but +/// we don't use for code generation; somebody might need it, so I'm defining +/// it for completeness. In reality, the [PlusLoopbackState] node is the +/// real decision-making note for {@code A+}. +class PlusBlockStartState extends BlockStartState { + PlusLoopbackState loopBackState; + + @override + StateType get stateType => StateType.PLUS_BLOCK_START; +} + +/// The block that begins a closure loop. +class StarBlockStartState extends BlockStartState { + @override + StateType get stateType => StateType.STAR_BLOCK_START; +} + +/// The Tokens rule start state linking to each lexer rule start state */ +class TokensStartState extends DecisionState { + @override + StateType get stateType => StateType.TOKEN_START; +} + +/// The last node in the ATN for a rule, unless that rule is the start symbol. +/// In that case, there is one transition to EOF. Later, we might encode +/// references to all calls to this rule to compute FOLLOW sets for +/// error handling. +class RuleStopState extends ATNState { + @override + StateType get stateType => StateType.RULE_STOP; +} + +/// Terminal node of a simple {@code (a|b|c)} block. +class BlockEndState extends ATNState { + BlockStartState startState; + + @override + StateType get stateType => StateType.BLOCK_END; +} + +class StarLoopbackState extends ATNState { + StarLoopEntryState get loopEntryState { + return transition(0).target; + } + + @override + StateType get stateType => StateType.STAR_LOOP_BACK; +} + +class StarLoopEntryState extends DecisionState { + StarLoopbackState loopBackState; + + /// Indicates whether this state can benefit from a precedence DFA during SLL + /// decision making. + /// + ///

This is a computed property that is calculated during ATN deserialization + /// and stored for use in [ParserATNSimulator] and + /// [ParserInterpreter].

+ /// + /// @see DFA#isPrecedenceDfa() + bool isPrecedenceDecision = false; + + @override + StateType get stateType => StateType.STAR_LOOP_ENTRY; +} + +/// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: +/// one to the loop back to start of the block and one to exit. +class PlusLoopbackState extends DecisionState { + @override + StateType get stateType => StateType.PLUS_LOOP_BACK; +} + +/// Mark the end of a * or + loop. +class LoopEndState extends ATNState { + ATNState loopBackState; + + @override + StateType get stateType => StateType.LOOP_END; +} diff --git a/runtime/Dart/lib/src/atn/src/atn_type.dart b/runtime/Dart/lib/src/atn/src/atn_type.dart new file mode 100644 index 000000000..fa1f56844 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/atn_type.dart @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/// Represents the type of recognizer an ATN applies to. +enum ATNType { + /// A lexer grammar. + LEXER, + + /// A parser grammar. + PARSER +} diff --git a/runtime/Dart/lib/src/atn/src/info.dart b/runtime/Dart/lib/src/atn/src/info.dart new file mode 100644 index 000000000..368c372a3 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/info.dart @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import 'atn_config_set.dart'; +import 'profiling_atn_simulator.dart'; +import 'semantic_context.dart'; + +/// This class represents profiling event information for a context sensitivity. +/// Context sensitivities are decisions where a particular input resulted in an +/// SLL conflict, but LL prediction produced a single unique alternative. +/// +///

+/// In some cases, the unique alternative identified by LL prediction is not +/// equal to the minimum represented alternative in the conflicting SLL +/// configuration set. Grammars and inputs which result in this scenario are +/// unable to use {@link PredictionMode#SLL}, which in turn means they cannot use +/// the two-stage parsing strategy to improve parsing performance for that +/// input.

+/// +/// @see ParserATNSimulator#reportContextSensitivity +/// @see ANTLRErrorListener#reportContextSensitivity +/// +/// @since 4.3 +class ContextSensitivityInfo extends DecisionEventInfo { + /// Constructs a new instance of the [ContextSensitivityInfo] class + /// with the specified detailed context sensitivity information. + /// + /// @param decision The decision number + /// @param configs The final configuration set containing the unique + /// alternative identified by full-context prediction + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the context sensitivity was + /// identified during full-context prediction + ContextSensitivityInfo(int decision, ATNConfigSet configs, TokenStream input, + int startIndex, int stopIndex) + : super(decision, configs, input, startIndex, stopIndex, true); +} + +/// This is the base class for gathering detailed information about prediction +/// events which occur during parsing. +/// +/// Note that we could record the parser call stack at the time this event +/// occurred but in the presence of left recursive rules, the stack is kind of +/// meaningless. It's better to look at the individual configurations for their +/// individual stacks. Of course that is a [PredictionContext] object +/// not a parse tree node and so it does not have information about the extent +/// (start...stop) of the various subtrees. Examining the stack tops of all +/// configurations provide the return states for the rule invocations. +/// From there you can get the enclosing rule. +/// +/// @since 4.3 +class DecisionEventInfo { + /// The invoked decision number which this event is related to. + /// + /// @see ATN#decisionToState + final int decision; + + /// The configuration set containing additional information relevant to the + /// prediction state when the current event occurred, or null if no + /// additional information is relevant or available. + final ATNConfigSet configs; + + /// The input token stream which is being parsed. + final TokenStream input; + + /// The token index in the input stream at which the current prediction was + /// originally invoked. + final int startIndex; + + /// The token index in the input stream at which the current event occurred. + final int stopIndex; + + /// [true] if the current event occurred during LL prediction; + /// otherwise, [false] if the input occurred during SLL prediction. + final bool fullCtx; + + DecisionEventInfo(this.decision, this.configs, this.input, this.startIndex, + this.stopIndex, this.fullCtx); +} + +/// This class contains profiling gathered for a particular decision. +/// +///

+/// Parsing performance in ANTLR 4 is heavily influenced by both static factors +/// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the +/// choice of input and the state of the DFA cache at the time profiling +/// operations are started). For best results, gather and use aggregate +/// statistics from a large sample of inputs representing the inputs expected in +/// production before using the results to make changes in the grammar.

+/// +/// @since 4.3 +class DecisionInfo { + /// The decision number, which is an index into {@link ATN#decisionToState}. + final int decision; + + /// The total number of times {@link ParserATNSimulator#adaptivePredict} was + /// invoked for this decision. + int invocations; + + /// The total time spent in {@link ParserATNSimulator#adaptivePredict} for + /// this decision, in nanoseconds. + /// + ///

+ /// The value of this field contains the sum of differential results obtained + /// by {@link System#nanoTime()}, and is not adjusted to compensate for JIT + /// and/or garbage collection overhead. For best accuracy, use a modern JVM + /// implementation that provides precise results from + /// {@link System#nanoTime()}, and perform profiling in a separate process + /// which is warmed up by parsing the input prior to profiling. If desired, + /// call {@link ATNSimulator#clearDFA} to reset the DFA cache to its initial + /// state before starting the profiling measurement pass.

+ int timeInPrediction; + + /// The sum of the lookahead required for SLL prediction for this decision. + /// Note that SLL prediction is used before LL prediction for performance + /// reasons even when {@link PredictionMode#LL} or + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. + int SLL_TotalLook; + + /// Gets the minimum lookahead required for any single SLL prediction to + /// complete for this decision, by reaching a unique prediction, reaching an + /// SLL conflict state, or encountering a syntax error. + int SLL_MinLook; + + /// Gets the maximum lookahead required for any single SLL prediction to + /// complete for this decision, by reaching a unique prediction, reaching an + /// SLL conflict state, or encountering a syntax error. + int SLL_MaxLook; + + /// Gets the [LookaheadEventInfo] associated with the event where the + /// {@link #SLL_MaxLook} value was set. + LookaheadEventInfo SLL_MaxLookEvent; + + /// The sum of the lookahead required for LL prediction for this decision. + /// Note that LL prediction is only used when SLL prediction reaches a + /// conflict state. + int LL_TotalLook; + + /// Gets the minimum lookahead required for any single LL prediction to + /// complete for this decision. An LL prediction completes when the algorithm + /// reaches a unique prediction, a conflict state (for + /// {@link PredictionMode#LL}, an ambiguity state (for + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + int LL_MinLook; + + /// Gets the maximum lookahead required for any single LL prediction to + /// complete for this decision. An LL prediction completes when the algorithm + /// reaches a unique prediction, a conflict state (for + /// {@link PredictionMode#LL}, an ambiguity state (for + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + int LL_MaxLook; + + /// Gets the [LookaheadEventInfo] associated with the event where the + /// {@link #LL_MaxLook} value was set. + LookaheadEventInfo LL_MaxLookEvent; + + /// A collection of [ContextSensitivityInfo] instances describing the + /// context sensitivities encountered during LL prediction for this decision. + /// + /// @see ContextSensitivityInfo + final List contextSensitivities = []; + + /// A collection of [ErrorInfo] instances describing the parse errors + /// identified during calls to {@link ParserATNSimulator#adaptivePredict} for + /// this decision. + /// + /// @see ErrorInfo + final List errors = []; + + /// A collection of [AmbiguityInfo] instances describing the + /// ambiguities encountered during LL prediction for this decision. + /// + /// @see AmbiguityInfo + final List ambiguities = []; + + /// A collection of [PredicateEvalInfo] instances describing the + /// results of evaluating individual predicates during prediction for this + /// decision. + /// + /// @see PredicateEvalInfo + final List predicateEvals = []; + + /// The total number of ATN transitions required during SLL prediction for + /// this decision. An ATN transition is determined by the number of times the + /// DFA does not contain an edge that is required for prediction, resulting + /// in on-the-fly computation of that edge. + /// + ///

+ /// If DFA caching of SLL transitions is employed by the implementation, ATN + /// computation may cache the computed edge for efficient lookup during + /// future parsing of this decision. Otherwise, the SLL parsing algorithm + /// will use ATN transitions exclusively.

+ /// + /// @see #SLL_ATNTransitions + /// @see ParserATNSimulator#computeTargetState + /// @see LexerATNSimulator#computeTargetState + int SLL_ATNTransitions; + + /// The total number of DFA transitions required during SLL prediction for + /// this decision. + /// + ///

If the ATN simulator implementation does not use DFA caching for SLL + /// transitions, this value will be 0.

+ /// + /// @see ParserATNSimulator#getExistingTargetState + /// @see LexerATNSimulator#getExistingTargetState + int SLL_DFATransitions; + + /// Gets the total number of times SLL prediction completed in a conflict + /// state, resulting in fallback to LL prediction. + /// + ///

Note that this value is not related to whether or not + /// {@link PredictionMode#SLL} may be used successfully with a particular + /// grammar. If the ambiguity resolution algorithm applied to the SLL + /// conflicts for this decision produce the same result as LL prediction for + /// this decision, {@link PredictionMode#SLL} would produce the same overall + /// parsing result as {@link PredictionMode#LL}.

+ int LL_Fallback; + + /// The total number of ATN transitions required during LL prediction for + /// this decision. An ATN transition is determined by the number of times the + /// DFA does not contain an edge that is required for prediction, resulting + /// in on-the-fly computation of that edge. + /// + ///

+ /// If DFA caching of LL transitions is employed by the implementation, ATN + /// computation may cache the computed edge for efficient lookup during + /// future parsing of this decision. Otherwise, the LL parsing algorithm will + /// use ATN transitions exclusively.

+ /// + /// @see #LL_DFATransitions + /// @see ParserATNSimulator#computeTargetState + /// @see LexerATNSimulator#computeTargetState + int LL_ATNTransitions; + + /// The total number of DFA transitions required during LL prediction for + /// this decision. + /// + ///

If the ATN simulator implementation does not use DFA caching for LL + /// transitions, this value will be 0.

+ /// + /// @see ParserATNSimulator#getExistingTargetState + /// @see LexerATNSimulator#getExistingTargetState + int LL_DFATransitions; + + /// Constructs a new instance of the [DecisionInfo] class to contain + /// statistics for a particular decision. + /// + /// @param decision The decision number + DecisionInfo(this.decision); + + @override + String toString() { + return '{' + 'decision=$decision' + ', contextSensitivities=${contextSensitivities.length}' + ', errors=${errors.length}' + ', ambiguities=${ambiguities.length}' + ', SLL_lookahead=$SLL_TotalLook' + ', SLL_ATNTransitions=$SLL_ATNTransitions, SLL_DFATransitions=$SLL_DFATransitions, LL_Fallback=$LL_Fallback, LL_lookahead=$LL_TotalLook, LL_ATNTransitions=$LL_ATNTransitions}'; + } +} + +/// This class represents profiling event information for an ambiguity. +/// Ambiguities are decisions where a particular input resulted in an SLL +/// conflict, followed by LL prediction also reaching a conflict state +/// (indicating a true ambiguity in the grammar). +/// +///

+/// This event may be reported during SLL prediction in cases where the +/// conflicting SLL configuration set provides sufficient information to +/// determine that the SLL conflict is truly an ambiguity. For example, if none +/// of the ATN configurations in the conflicting SLL configuration set have +/// traversed a global follow transition (i.e. +/// {@link ATNConfig#reachesIntoOuterContext} is 0 for all configurations), then +/// the result of SLL prediction for that input is known to be equivalent to the +/// result of LL prediction for that input.

+/// +///

+/// In some cases, the minimum represented alternative in the conflicting LL +/// configuration set is not equal to the minimum represented alternative in the +/// conflicting SLL configuration set. Grammars and inputs which result in this +/// scenario are unable to use {@link PredictionMode#SLL}, which in turn means +/// they cannot use the two-stage parsing strategy to improve parsing performance +/// for that input.

+/// +/// @see ParserATNSimulator#reportAmbiguity +/// @see ANTLRErrorListener#reportAmbiguity +/// +/// @since 4.3 +class AmbiguityInfo extends DecisionEventInfo { + /// The set of alternative numbers for this decision event that lead to a valid parse. */ + BitSet ambigAlts; + + /// Constructs a new instance of the [AmbiguityInfo] class with the + /// specified detailed ambiguity information. + /// + /// @param decision The decision number + /// @param configs The final configuration set identifying the ambiguous + /// alternatives for the current input + /// @param ambigAlts The set of alternatives in the decision that lead to a valid parse. + /// The predicted alt is the min(ambigAlts) + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the ambiguity was identified during + /// prediction + /// @param fullCtx [true] if the ambiguity was identified during LL + /// prediction; otherwise, [false] if the ambiguity was identified + /// during SLL prediction + AmbiguityInfo(int decision, ATNConfigSet configs, this.ambigAlts, + TokenStream input, int startIndex, int stopIndex, bool fullCtx) + : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for a syntax error +/// identified during prediction. Syntax errors occur when the prediction +/// algorithm is unable to identify an alternative which would lead to a +/// successful parse. +/// +/// @see Parser#notifyErrorListeners(Token, String, RecognitionException) +/// @see ANTLRErrorListener#syntaxError +/// +/// @since 4.3 +class ErrorInfo extends DecisionEventInfo { + /// Constructs a new instance of the [ErrorInfo] class with the + /// specified detailed syntax error information. + /// + /// @param decision The decision number + /// @param configs The final configuration set reached during prediction + /// prior to reaching the {@link ATNSimulator#ERROR} state + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the syntax error was identified + /// @param fullCtx [true] if the syntax error was identified during LL + /// prediction; otherwise, [false] if the syntax error was identified + /// during SLL prediction + ErrorInfo(int decision, ATNConfigSet configs, TokenStream input, + int startIndex, int stopIndex, bool fullCtx) + : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for tracking the lookahead +/// depth required in order to make a prediction. +/// +/// @since 4.3 +class LookaheadEventInfo extends DecisionEventInfo { + /// The alternative chosen by adaptivePredict(), not necessarily + /// the outermost alt shown for a rule; left-recursive rules have + /// user-level alts that differ from the rewritten rule with a (...) block + /// and a (..)* loop. + int predictedAlt; + + /// Constructs a new instance of the [LookaheadEventInfo] class with + /// the specified detailed lookahead information. + /// + /// @param decision The decision number + /// @param configs The final configuration set containing the necessary + /// information to determine the result of a prediction, or null if + /// the final configuration set is not available + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the prediction was finally made + /// @param fullCtx [true] if the current lookahead is part of an LL + /// prediction; otherwise, [false] if the current lookahead is part of + /// an SLL prediction + LookaheadEventInfo(int decision, ATNConfigSet configs, this.predictedAlt, + TokenStream input, int startIndex, int stopIndex, bool fullCtx) + : super(decision, configs, input, startIndex, stopIndex, fullCtx); +} + +/// This class represents profiling event information for semantic predicate +/// evaluations which occur during prediction. +/// +/// @see ParserATNSimulator#evalSemanticContext +/// +/// @since 4.3 +class PredicateEvalInfo extends DecisionEventInfo { + /// The semantic context which was evaluated. + final SemanticContext semctx; + + /// The alternative number for the decision which is guarded by the semantic + /// context {@link #semctx}. Note that other ATN + /// configurations may predict the same alternative which are guarded by + /// other semantic contexts and/or {@link SemanticContext#NONE}. + final int predictedAlt; + + /// The result of evaluating the semantic context {@link #semctx}. + final bool evalResult; + + /// Constructs a new instance of the [PredicateEvalInfo] class with the + /// specified detailed predicate evaluation information. + /// + /// @param decision The decision number + /// @param input The input token stream + /// @param startIndex The start index for the current prediction + /// @param stopIndex The index at which the predicate evaluation was + /// triggered. Note that the input stream may be reset to other positions for + /// the actual evaluation of individual predicates. + /// @param semctx The semantic context which was evaluated + /// @param evalResult The results of evaluating the semantic context + /// @param predictedAlt The alternative number for the decision which is + /// guarded by the semantic context [semctx]. See {@link #predictedAlt} + /// for more information. + /// @param fullCtx [true] if the semantic context was + /// evaluated during LL prediction; otherwise, [false] if the semantic + /// context was evaluated during SLL prediction + /// + /// @see ParserATNSimulator#evalSemanticContext(SemanticContext, ParserRuleContext, int, boolean) + /// @see SemanticContext#eval(Recognizer, RuleContext) + PredicateEvalInfo( + int decision, + TokenStream input, + int startIndex, + int stopIndex, + this.semctx, + this.evalResult, + this.predictedAlt, + bool fullCtx) + : super(decision, ATNConfigSet(), input, startIndex, stopIndex, fullCtx); +} + +/// This class provides access to specific and aggregate statistics gathered +/// during profiling of a parser. +/// +/// @since 4.3 +class ParseInfo { + final ProfilingATNSimulator atnSimulator; + + ParseInfo(this.atnSimulator); + + /// Gets an array of [DecisionInfo] instances containing the profiling + /// information gathered for each decision in the ATN. + /// + /// @return An array of [DecisionInfo] instances, indexed by decision + /// number. + List get decisionInfo { + return atnSimulator.decisionInfo; + } + + /// Gets the decision numbers for decisions that required one or more + /// full-context predictions during parsing. These are decisions for which + /// {@link DecisionInfo#LL_Fallback} is non-zero. + /// + /// @return A list of decision numbers which required one or more + /// full-context predictions during parsing. + List get llDecisions { + final decisions = atnSimulator.decisionInfo; + final LL = []; + for (var i = 0; i < decisions.length; i++) { + final fallBack = decisions[i].LL_Fallback; + if (fallBack > 0) LL.add(i); + } + return LL; + } + + /// Gets the total time spent during prediction across all decisions made + /// during parsing. This value is the sum of + /// {@link DecisionInfo#timeInPrediction} for all decisions. + int get totalTimeInPrediction { + final decisions = atnSimulator.decisionInfo; + var t = 0; + for (var i = 0; i < decisions.length; i++) { + t += decisions[i].timeInPrediction; + } + return t; + } + + /// Gets the total number of SLL lookahead operations across all decisions + /// made during parsing. This value is the sum of + /// {@link DecisionInfo#SLL_TotalLook} for all decisions. + int get totalSLLLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_TotalLook; + } + return k; + } + + /// Gets the total number of LL lookahead operations across all decisions + /// made during parsing. This value is the sum of + /// {@link DecisionInfo#LL_TotalLook} for all decisions. + int get totalLLLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].LL_TotalLook; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for SLL prediction + /// across all decisions made during parsing. + int get totalSLLATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_ATNTransitions; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for LL prediction + /// across all decisions made during parsing. + int get totalLLATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].LL_ATNTransitions; + } + return k; + } + + /// Gets the total number of ATN lookahead operations for SLL and LL + /// prediction across all decisions made during parsing. + /// + ///

+ /// This value is the sum of {@link #getTotalSLLATNLookaheadOps} and + /// {@link #getTotalLLATNLookaheadOps}.

+ int get totalATNLookaheadOps { + final decisions = atnSimulator.decisionInfo; + var k = 0; + for (var i = 0; i < decisions.length; i++) { + k += decisions[i].SLL_ATNTransitions; + k += decisions[i].LL_ATNTransitions; + } + return k; + } + + /// Gets the total number of DFA states stored in the DFA cache for all + /// decisions in the ATN. + int get dfaSize { + var n = 0; + final decisionToDFA = atnSimulator.decisionToDFA; + for (var i = 0; i < decisionToDFA.length; i++) { + n += getDFASizeAt(i); + } + return n; + } + + /// Gets the total number of DFA states stored in the DFA cache for a + /// particular decision. + int getDFASizeAt(int decision) { + final decisionToDFA = atnSimulator.decisionToDFA[decision]; + return decisionToDFA.states.length; + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_action.dart b/runtime/Dart/lib/src/atn/src/lexer_action.dart new file mode 100644 index 000000000..a8630671e --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_action.dart @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../lexer.dart'; +import '../../util/murmur_hash.dart'; + +/// Represents the serialization type of a [LexerAction]. +/// +/// @since 4.2 +enum LexerActionType { + /// The type of a [LexerChannelAction] action. + CHANNEL, + /// The type of a [LexerCustomAction] action. + CUSTOM, + /// The type of a [LexerModeAction] action. + MODE, + /// The type of a [LexerMoreAction] action. + MORE, + /// The type of a [LexerPopModeAction] action. + POP_MODE, + /// The type of a [LexerPushModeAction] action. + PUSH_MODE, + /// The type of a [LexerSkipAction] action. + SKIP, + /// The type of a [LexerTypeAction] action. + TYPE, +} + +/// Represents a single action which can be executed following the successful +/// match of a lexer rule. Lexer actions are used for both embedded action syntax +/// and ANTLR 4's new lexer command syntax. +/// +/// @since 4.2 +abstract class LexerAction { + /// Gets the serialization type of the lexer action. + /// + /// @return The serialization type of the lexer action. + LexerActionType get actionType; + + /// Gets whether the lexer action is position-dependent. Position-dependent + /// actions may have different semantics depending on the [CharStream] + /// index at the time the action is executed. + /// + ///

Many lexer commands, including [type], [skip], and + /// [more], do not check the input index during their execution. + /// Actions like this are position-independent, and may be stored more + /// efficiently as part of the {@link LexerATNConfig#lexerActionExecutor}.

+ /// + /// @return [true] if the lexer action semantics can be affected by the + /// position of the input [CharStream] at the time it is executed; + /// otherwise, [false]. + bool get isPositionDependent; + + /// Execute the lexer action in the context of the specified [Lexer]. + /// + ///

For position-dependent actions, the input stream must already be + /// positioned correctly prior to calling this method.

+ /// + /// @param lexer The lexer instance. + void execute(Lexer lexer); +} + +/// Implements the [channel] lexer action by calling +/// {@link Lexer#setChannel} with the assigned channel. +/// +/// @since 4.2 +class LexerChannelAction implements LexerAction { + /// Gets the channel to use for the [Token] created by the lexer. + /// + /// @return The channel to use for the [Token] created by the lexer. + final int channel; + + /// Constructs a new [channel] action with the specified channel value. + /// @param channel The channel value to pass to {@link Lexer#setChannel}. + LexerChannelAction(this.channel); + + @override + LexerActionType get actionType => LexerActionType.CHANNEL; + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#setChannel} with the + /// value provided by {@link #getChannel}.

+ @override + void execute(Lexer lexer) { + lexer.channel = channel; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, channel); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerChannelAction) { + return channel == obj.channel; + } + + return false; + } + + @override + String toString() { + return 'channel($channel)'; + } +} + +/// Executes a custom lexer action by calling {@link Recognizer#action} with the +/// rule and action indexes assigned to the custom action. The implementation of +/// a custom action is added to the generated code for the lexer in an override +/// of {@link Recognizer#action} when the grammar is compiled. +/// +///

This class may represent embedded actions created with the {...} +/// syntax in ANTLR 4, as well as actions created for lexer commands where the +/// command argument could not be evaluated when the grammar was compiled.

+/// +/// @since 4.2 +class LexerCustomAction implements LexerAction { + /// Gets the rule index to use for calls to {@link Recognizer#action}. + /// + /// @return The rule index for the custom action. + final int ruleIndex; + + /// Gets the action index to use for calls to {@link Recognizer#action}. + /// + /// @return The action index for the custom action. + final int actionIndex; + + /// Constructs a custom lexer action with the specified rule and action + /// indexes. + /// + /// @param ruleIndex The rule index to use for calls to + /// {@link Recognizer#action}. + /// @param actionIndex The action index to use for calls to + /// {@link Recognizer#action}. + LexerCustomAction(this.ruleIndex, this.actionIndex); + + /// {@inheritDoc} + /// + /// @return This method returns {@link LexerActionType#CUSTOM}. + + @override + LexerActionType get actionType => LexerActionType.CUSTOM; + + /// Gets whether the lexer action is position-dependent. Position-dependent + /// actions may have different semantics depending on the [CharStream] + /// index at the time the action is executed. + /// + ///

Custom actions are position-dependent since they may represent a + /// user-defined embedded action which makes calls to methods like + /// {@link Lexer#getText}.

+ /// + /// @return This method returns [true]. + + @override + bool get isPositionDependent => true; + + /// {@inheritDoc} + /// + ///

Custom actions are implemented by calling {@link Lexer#action} with the + /// appropriate rule and action indexes.

+ + @override + void execute(Lexer lexer) { + lexer.action(null, ruleIndex, actionIndex); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, ruleIndex); + hash = MurmurHash.update(hash, actionIndex); + return MurmurHash.finish(hash, 3); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerCustomAction) { + return ruleIndex == obj.ruleIndex && actionIndex == obj.actionIndex; + } + return false; + } +} + +/// Implements the [mode] lexer action by calling {@link Lexer#mode} with +/// the assigned mode. +/// +/// @since 4.2 +class LexerModeAction implements LexerAction { + /// Get the lexer mode this action should transition the lexer to. + /// + /// @return The lexer mode for this [mode] command. + final int mode; + + /// Constructs a new [mode] action with the specified mode value. + /// @param mode The mode value to pass to {@link Lexer#mode}. + LexerModeAction(this.mode); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#MODE}. + + @override + LexerActionType get actionType => LexerActionType.MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#mode} with the + /// value provided by {@link #getMode}.

+ + @override + void execute(Lexer lexer) { + lexer.mode(mode); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, mode); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerModeAction) { + return mode == obj.mode; + } + return false; + } + + @override + String toString() { + return 'mode($mode)'; + } +} + +/// Implements the [more] lexer action by calling {@link Lexer#more}. +/// +///

The [more] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

+/// +/// @since 4.2 +class LexerMoreAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerMoreAction INSTANCE = LexerMoreAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#MORE}. + @override + LexerActionType get actionType => LexerActionType.MORE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#more}.

+ + @override + void execute(Lexer lexer) { + lexer.more(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'more'; + } +} + +/// Implements the [popMode] lexer action by calling {@link Lexer#popMode}. +/// +///

The [popMode] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

+/// +/// @since 4.2 +class LexerPopModeAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerPopModeAction INSTANCE = LexerPopModeAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#POP_MODE}. + + @override + LexerActionType get actionType => LexerActionType.POP_MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#popMode}.

+ + @override + void execute(Lexer lexer) { + lexer.popMode(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'popMode'; + } +} + +/// Implements the [pushMode] lexer action by calling +/// {@link Lexer#pushMode} with the assigned mode. +/// +/// @since 4.2 +class LexerPushModeAction implements LexerAction { + /// Get the lexer mode this action should transition the lexer to. + /// + /// @return The lexer mode for this [pushMode] command. + final int mode; + + /// Constructs a new [pushMode] action with the specified mode value. + /// @param mode The mode value to pass to {@link Lexer#pushMode}. + LexerPushModeAction(this.mode); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#PUSH_MODE}. + + @override + LexerActionType get actionType => LexerActionType.PUSH_MODE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#pushMode} with the + /// value provided by {@link #getMode}.

+ + @override + void execute(Lexer lexer) { + lexer.pushMode(mode); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, mode); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerPushModeAction) { + return mode == obj.mode; + } + return false; + } + + @override + String toString() { + return 'pushMode($mode)'; + } +} + +/// Implements the [skip] lexer action by calling {@link Lexer#skip}. +/// +///

The [skip] command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by {@link #INSTANCE}.

+/// +/// @since 4.2 +class LexerSkipAction implements LexerAction { + /// Provides a singleton instance of this parameterless lexer action. + static final LexerSkipAction INSTANCE = LexerSkipAction(); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#SKIP}. + + @override + LexerActionType get actionType => LexerActionType.SKIP; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#skip}.

+ @override + void execute(Lexer lexer) { + lexer.skip(); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + return MurmurHash.finish(hash, 1); + } + + @override + bool operator ==(Object obj) { + return identical(obj, this); + } + + @override + String toString() { + return 'skip'; + } +} + +/// Implements the [type] lexer action by calling {@link Lexer#setType} +/// with the assigned type. +/// +/// @since 4.2 +class LexerTypeAction implements LexerAction { + /// Gets the type to assign to a token created by the lexer. + /// @return The type to assign to a token created by the lexer. + final int type; + + /// Constructs a new [type] action with the specified token type value. + /// @param type The type to assign to the token using {@link Lexer#setType}. + LexerTypeAction(this.type); + + /// {@inheritDoc} + /// @return This method returns {@link LexerActionType#TYPE}. + @override + LexerActionType get actionType => LexerActionType.TYPE; + + /// {@inheritDoc} + /// @return This method returns [false]. + + @override + bool get isPositionDependent => false; + + /// {@inheritDoc} + /// + ///

This action is implemented by calling {@link Lexer#setType} with the + /// value provided by {@link #getType}.

+ + @override + void execute(Lexer lexer) { + lexer.type = type; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, actionType.index); + hash = MurmurHash.update(hash, type); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (obj is LexerTypeAction) { + return type == obj.type; + } + return false; + } + + @override + String toString() { + return 'type($type)'; + } +} + +/// This implementation of [LexerAction] is used for tracking input offsets +/// for position-dependent actions within a [LexerActionExecutor]. +/// +///

This action is not serialized as part of the ATN, and is only required for +/// position-dependent lexer actions which appear at a location other than the +/// end of a rule. For more information about DFA optimizations employed for +/// lexer actions, see {@link LexerActionExecutor#append} and +/// {@link LexerActionExecutor#fixOffsetBeforeMatch}.

+/// +/// @since 4.2 +class LexerIndexedCustomAction implements LexerAction { + /// Gets the location in the input [CharStream] at which the lexer + /// action should be executed. The value is interpreted as an offset relative + /// to the token start index. + /// + /// @return The location in the input [CharStream] at which the lexer + /// action should be executed. + final int offset; + + /// Gets the lexer action to execute. + /// + /// @return A [LexerAction] object which executes the lexer action. + final LexerAction action; + + /// Constructs a new indexed custom action by associating a character offset + /// with a [LexerAction]. + /// + ///

Note: This class is only required for lexer actions for which + /// {@link LexerAction#isPositionDependent} returns [true].

+ /// + /// @param offset The offset into the input [CharStream], relative to + /// the token start index, at which the specified lexer action should be + /// executed. + /// @param action The lexer action to execute at a particular offset in the + /// input [CharStream]. + LexerIndexedCustomAction(this.offset, this.action); + + /// {@inheritDoc} + /// + /// @return This method returns the result of calling {@link #getActionType} + /// on the [LexerAction] returned by {@link #getAction}. + @override + LexerActionType get actionType => action.actionType; + + /// {@inheritDoc} + /// @return This method returns [true]. + + @override + bool get isPositionDependent => true; + + /// {@inheritDoc} + /// + ///

This method calls {@link #execute} on the result of {@link #getAction} + /// using the provided [lexer].

+ + @override + void execute(Lexer lexer) { +// assume the input stream position was properly set by the calling code + action.execute(lexer); + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, offset); + hash = MurmurHash.update(hash, action); + return MurmurHash.finish(hash, 2); + } + + @override + bool operator ==(Object obj) { + if (obj == this) { + return true; + } else if (obj is LexerIndexedCustomAction) { + return offset == obj.offset && action == obj.action; + } + return false; + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart b/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart new file mode 100644 index 000000000..44957122f --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_action_executor.dart @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import '../../input_stream.dart'; +import '../../lexer.dart'; +import '../../util/murmur_hash.dart'; +import 'lexer_action.dart'; + +/// Represents an executor for a sequence of lexer actions which traversed during +/// the matching operation of a lexer rule (token). +/// +///

The executor tracks position information for position-dependent lexer actions +/// efficiently, ensuring that actions appearing only at the end of the rule do +/// not cause bloating of the [DFA] created for the lexer.

+/// +/// @since 4.2 +class LexerActionExecutor { + /// Gets the lexer actions to be executed by this executor. + /// @return The lexer actions to be executed by this executor. + final List lexerActions; + + /// Caches the result of {@link #hashCode} since the hash code is an element + /// of the performance-critical {@link LexerATNConfig#hashCode} operation. + @override + int get hashCode { + var hash = MurmurHash.initialize(); + for (var lexerAction in lexerActions) { + hash = MurmurHash.update(hash, lexerAction); + } + + return MurmurHash.finish(hash, lexerActions.length); + } + + /// Constructs an executor for a sequence of [LexerAction] actions. + /// @param lexerActions The lexer actions to execute. + LexerActionExecutor(this.lexerActions); + + /// Creates a [LexerActionExecutor] which executes the actions for + /// the input [lexerActionExecutor] followed by a specified + /// [lexerAction]. + /// + /// @param lexerActionExecutor The executor for actions already traversed by + /// the lexer while matching a token within a particular + /// [LexerATNConfig]. If this is null, the method behaves as + /// though it were an empty executor. + /// @param lexerAction The lexer action to execute after the actions + /// specified in [lexerActionExecutor]. + /// + /// @return A [LexerActionExecutor] for executing the combine actions + /// of [lexerActionExecutor] and [lexerAction]. + static LexerActionExecutor append( + LexerActionExecutor lexerActionExecutor, LexerAction lexerAction) { + if (lexerActionExecutor == null) { + return LexerActionExecutor([lexerAction]); + } + + final lexerActions = + List.from(lexerActionExecutor.lexerActions); + lexerActions.add(lexerAction); + return LexerActionExecutor(lexerActions); + } + + /// Creates a [LexerActionExecutor] which encodes the current offset + /// for position-dependent lexer actions. + /// + ///

Normally, when the executor encounters lexer actions where + /// {@link LexerAction#isPositionDependent} returns [true], it calls + /// {@link IntStream#seek} on the input [CharStream] to set the input + /// position to the end of the current token. This behavior provides + /// for efficient DFA representation of lexer actions which appear at the end + /// of a lexer rule, even when the lexer rule matches a variable number of + /// characters.

+ /// + ///

Prior to traversing a match transition in the ATN, the current offset + /// from the token start index is assigned to all position-dependent lexer + /// actions which have not already been assigned a fixed offset. By storing + /// the offsets relative to the token start index, the DFA representation of + /// lexer actions which appear in the middle of tokens remains efficient due + /// to sharing among tokens of the same length, regardless of their absolute + /// position in the input stream.

+ /// + ///

If the current executor already has offsets assigned to all + /// position-dependent lexer actions, the method returns [this].

+ /// + /// @param offset The current offset to assign to all position-dependent + /// lexer actions which do not already have offsets assigned. + /// + /// @return A [LexerActionExecutor] which stores input stream offsets + /// for all position-dependent lexer actions. + LexerActionExecutor fixOffsetBeforeMatch(int offset) { + List updatedLexerActions; + for (var i = 0; i < lexerActions.length; i++) { + if (lexerActions[i].isPositionDependent && + !(lexerActions[i] is LexerIndexedCustomAction)) { + updatedLexerActions ??= List.from(lexerActions); + + updatedLexerActions[i] = + LexerIndexedCustomAction(offset, lexerActions[i]); + } + } + + if (updatedLexerActions == null) { + return this; + } + + return LexerActionExecutor(updatedLexerActions); + } + + /// Execute the actions encapsulated by this executor within the context of a + /// particular [Lexer]. + /// + ///

This method calls {@link IntStream#seek} to set the position of the + /// [input] [CharStream] prior to calling + /// {@link LexerAction#execute} on a position-dependent action. Before the + /// method returns, the input position will be restored to the same position + /// it was in when the method was invoked.

+ /// + /// @param lexer The lexer instance. + /// @param input The input stream which is the source for the current token. + /// When this method is called, the current {@link IntStream#index} for + /// [input] should be the start of the following token, i.e. 1 + /// character past the end of the current token. + /// @param startIndex The token start index. This value may be passed to + /// {@link IntStream#seek} to set the [input] position to the beginning + /// of the token. + void execute(Lexer lexer, CharStream input, int startIndex) { + var requiresSeek = false; + final stopIndex = input.index; + try { + for (var lexerAction in lexerActions) { + if (lexerAction is LexerIndexedCustomAction) { + final offset = (lexerAction as LexerIndexedCustomAction).offset; + input.seek(startIndex + offset); + lexerAction = (lexerAction as LexerIndexedCustomAction).action; + requiresSeek = (startIndex + offset) != stopIndex; + } else if (lexerAction.isPositionDependent) { + input.seek(stopIndex); + requiresSeek = false; + } + + lexerAction.execute(lexer); + } + } finally { + if (requiresSeek) { + input.seek(stopIndex); + } + } + } + + @override + bool operator ==(Object obj) { + if (identical(obj, this)) { + return true; + } else if (!(obj is LexerActionExecutor)) { + return false; + } + + LexerActionExecutor other = obj; + return hashCode == other.hashCode && + ListEquality().equals(lexerActions, other.lexerActions); + } +} diff --git a/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart new file mode 100644 index 000000000..5762d84c8 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart @@ -0,0 +1,731 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../dfa/dfa.dart'; +import '../../error/error.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../lexer.dart'; +import '../../prediction_context.dart'; +import '../../token.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'atn_state.dart'; +import 'lexer_action_executor.dart'; +import 'transition.dart'; + +/// When we hit an accept state in either the DFA or the ATN, we +/// have to notify the character stream to start buffering characters +/// via {@link IntStream#mark} and record the current state. The current sim state +/// includes the current index into the input, the current line, +/// and current character position in that line. Note that the Lexer is +/// tracking the starting line and characterization of the token. These +/// variables track the "state" of the simulator when it hits an accept state. +/// +///

We track these variables separately for the DFA and ATN simulation +/// because the DFA simulation often has to fail over to the ATN +/// simulation. If the ATN simulation fails, we need the DFA to fall +/// back to its previously accepted state, if any. If the ATN succeeds, +/// then the ATN does the accept and the DFA simulator that invoked it +/// can simply return the predicted token type.

+class SimState { + int index = -1; + int line = 0; + int charPos = -1; + + DFAState dfaState; + + void reset() { + index = -1; + line = 0; + charPos = -1; + dfaState = null; + } +} + +/// "dup" of ParserInterpreter */ +class LexerATNSimulator extends ATNSimulator { + static final bool debug = true; + static final bool dfa_debug = true; + + static final int MIN_DFA_EDGE = 0; + static final int MAX_DFA_EDGE = 127; // forces unicode to stay in ATN + + final Lexer recog; + + /// The current token's starting index into the character stream. + /// Shared across DFA to ATN simulation in case the ATN fails and the + /// DFA did not have a previous accept state. In this case, we use the + /// ATN-generated exception object. + int startIndex = -1; + + /// line number 1..n within the input */ + int line = 1; + + /// The index of the character relative to the beginning of the line 0..n-1 */ + int charPositionInLine = 0; + + List decisionToDFA; + int mode = Lexer.DEFAULT_MODE; + + /// Used during DFA/ATN exec to record the most recent accept configuration info */ + + final SimState prevAccept = SimState(); + + LexerATNSimulator(ATN atn, this.decisionToDFA, + PredictionContextCache sharedContextCache, + {this.recog}) + : super(atn, sharedContextCache); + + void copyState(LexerATNSimulator simulator) { + charPositionInLine = simulator.charPositionInLine; + line = simulator.line; + mode = simulator.mode; + startIndex = simulator.startIndex; + } + + int match(CharStream input, int mode) { + this.mode = mode; + final mark = input.mark(); + try { + startIndex = input.index; + prevAccept.reset(); + final dfa = decisionToDFA[mode]; + if (dfa.s0 == null) { + return matchATN(input); + } else { + return execATN(input, dfa.s0); + } + } finally { + input.release(mark); + } + } + + @override + void reset() { + prevAccept.reset(); + startIndex = -1; + line = 1; + charPositionInLine = 0; + mode = Lexer.DEFAULT_MODE; + } + + @override + void clearDFA() { + for (var d = 0; d < decisionToDFA.length; d++) { + decisionToDFA[d] = DFA(atn.getDecisionState(d), d); + } + } + + int matchATN(CharStream input) { + ATNState startState = atn.modeToStartState[mode]; + + if (debug) { + log('matchATN mode $mode start: $startState\n', level: Level.FINE.value); + } + + final old_mode = mode; + + final s0_closure = computeStartState(input, startState); + final suppressEdge = s0_closure.hasSemanticContext; + s0_closure.hasSemanticContext = false; + + final next = addDFAState(s0_closure); + if (!suppressEdge) { + decisionToDFA[mode].s0 = next; + } + + final predict = execATN(input, next); + + if (debug) { + log('DFA after matchATN: ${decisionToDFA[old_mode].toLexerString()}\n', + level: Level.FINE.value); + } + + return predict; + } + + int execATN(CharStream input, DFAState ds0) { + //log("enter exec index "+input.index()+" from "+ds0.configs, level: Level.FINE.value); + if (debug) { + log('start state closure=${ds0.configs}\n', level: Level.FINE.value); + } + + if (ds0.isAcceptState) { + // allow zero-length tokens + captureSimState(prevAccept, input, ds0); + } + + var t = input.LA(1); + + var s = ds0; // s is current/from DFA state + + while (true) { + // while more work + if (debug) { + log('execATN loop starting closure: ${s.configs}\n', + level: Level.FINE.value); + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=null, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=null, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set; there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + var target = getExistingTargetState(s, t); + target ??= computeTargetState(input, s, t); + + if (target == ATNSimulator.ERROR) { + break; + } + + // If this is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if (t != IntStream.EOF) { + consume(input); + } + + if (target.isAcceptState) { + captureSimState(prevAccept, input, target); + if (t == IntStream.EOF) { + break; + } + } + + t = input.LA(1); + s = target; // flip; current DFA target becomes new src/from state + } + + return failOrAccept(prevAccept, input, s.configs, t); + } + + /// Get an existing target state for an edge in the DFA. If the target state + /// for the edge has not yet been computed or is otherwise not available, + /// this method returns null. + /// + /// @param s The current DFA state + /// @param t The next input symbol + /// @return The existing target DFA state for the given input symbol + /// [t], or null if the target state for this edge is not + /// already cached + + DFAState getExistingTargetState(DFAState s, int t) { + if (s.edges == null || t < MIN_DFA_EDGE || t > MAX_DFA_EDGE) { + return null; + } + + final target = s.edges[t - MIN_DFA_EDGE]; + if (debug && target != null) { + log('reuse state ${s.stateNumber} edge to ${target.stateNumber}', + level: Level.FINE.value); + } + + return target; + } + + /// Compute a target state for an edge in the DFA, and attempt to add the + /// computed state and corresponding edge to the DFA. + /// + /// @param input The input stream + /// @param s The current DFA state + /// @param t The next input symbol + /// + /// @return The computed target DFA state for the given input symbol + /// [t]. If [t] does not lead to a valid DFA state, this method + /// returns {@link #ERROR}. + + DFAState computeTargetState(CharStream input, DFAState s, int t) { + ATNConfigSet reach = OrderedATNConfigSet(); + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + getReachableConfigSet(input, s.configs, reach, t); + + if (reach.isEmpty) { + // we got nowhere on t from s + if (!reach.hasSemanticContext) { + // we got nowhere on t, don't throw out this knowledge; it'd + // cause a failover from DFA later. + addDFAEdge(s, t, ATNSimulator.ERROR); + } + + // stop when we can't match any more char + return ATNSimulator.ERROR; + } + + // Add an edge from s to target DFA found/created for reach + return addDFAEdgeByConfig(s, t, reach); + } + + int failOrAccept( + SimState prevAccept, CharStream input, ATNConfigSet reach, int t) { + if (prevAccept.dfaState != null) { + final lexerActionExecutor = + prevAccept.dfaState.lexerActionExecutor; + accept(input, lexerActionExecutor, startIndex, prevAccept.index, + prevAccept.line, prevAccept.charPos); + return prevAccept.dfaState.prediction; + } else { + // if no accept and EOF is first char, return EOF + if (t == IntStream.EOF && input.index == startIndex) { + return Token.EOF; + } + + throw LexerNoViableAltException(recog, input, startIndex, reach); + } + } + + /// Given a starting configuration set, figure out all ATN configurations + /// we can reach upon input [t]. Parameter [reach] is a return + /// parameter. + void getReachableConfigSet( + CharStream input, ATNConfigSet configs, ATNConfigSet reach, int t) { + // this is used to skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + var skipAlt = ATN.INVALID_ALT_NUMBER; + for (var c in configs) { + final currentAltReachedAcceptState = c.alt == skipAlt; + if (currentAltReachedAcceptState && + (c as LexerATNConfig).hasPassedThroughNonGreedyDecision()) { + continue; + } + + if (debug) { + log('testing ${getTokenName(t)} at ${c.toString(recog, true)}\n', + level: Level.FINE.value); + } + + final n = c.state.numberOfTransitions; + for (var ti = 0; ti < n; ti++) { + // for each transition + final trans = c.state.transition(ti); + final target = getReachableTarget(trans, t); + if (target != null) { + var lexerActionExecutor = + (c as LexerATNConfig).lexerActionExecutor; + if (lexerActionExecutor != null) { + lexerActionExecutor = lexerActionExecutor + .fixOffsetBeforeMatch(input.index - startIndex); + } + + final treatEofAsEpsilon = t == IntStream.EOF; + if (closure( + input, + LexerATNConfig.dup(c, target, + lexerActionExecutor: lexerActionExecutor), + reach, + currentAltReachedAcceptState, + true, + treatEofAsEpsilon)) { + // any remaining configs for this alt have a lower priority than + // the one that just reached an accept state. + skipAlt = c.alt; + break; + } + } + } + } + } + + void accept(CharStream input, LexerActionExecutor lexerActionExecutor, + int startIndex, int index, int line, int charPos) { + if (debug) { + log('ACTION $lexerActionExecutor\n', level: Level.FINE.value); + } + + // seek to after last char in token + input.seek(index); + this.line = line; + charPositionInLine = charPos; + + if (lexerActionExecutor != null && recog != null) { + lexerActionExecutor.execute(recog, input, startIndex); + } + } + + ATNState getReachableTarget(Transition trans, int t) { + if (trans.matches(t, Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE)) { + return trans.target; + } + + return null; + } + + ATNConfigSet computeStartState(CharStream input, ATNState p) { + PredictionContext initialContext = PredictionContext.EMPTY; + ATNConfigSet configs = OrderedATNConfigSet(); + for (var i = 0; i < p.numberOfTransitions; i++) { + final target = p.transition(i).target; + final c = LexerATNConfig(target, i + 1, initialContext); + closure(input, c, configs, false, false, false); + } + return configs; + } + + /// Since the alternatives within any lexer decision are ordered by + /// preference, this method stops pursuing the closure as soon as an accept + /// state is reached. After the first accept state is reached by depth-first + /// search from [config], all other (potentially reachable) states for + /// this rule would have a lower priority. + /// + /// @return [true] if an accept state is reached, otherwise + /// [false]. + bool closure( + CharStream input, + LexerATNConfig config, + ATNConfigSet configs, + bool currentAltReachedAcceptState, + bool speculative, + bool treatEofAsEpsilon) { + if (debug) { + log('closure(' + config.toString(recog, true) + ')', + level: Level.FINE.value); + } + + if (config.state is RuleStopState) { + if (debug) { + if (recog != null) { + log('closure at ${recog.ruleNames[config.state.ruleIndex]} rule stop $config\n', + level: Level.FINE.value); + } else { + log('closure at rule stop $config\n', level: Level.FINE.value); + } + } + + if (config.context == null || config.context.hasEmptyPath()) { + if (config.context == null || config.context.isEmpty) { + configs.add(config); + return true; + } else { + configs.add(LexerATNConfig.dup(config, config.state, + context: PredictionContext.EMPTY)); + currentAltReachedAcceptState = true; + } + } + + if (config.context != null && !config.context.isEmpty) { + for (var i = 0; i < config.context.length; i++) { + if (config.context.getReturnState(i) != + PredictionContext.EMPTY_RETURN_STATE) { + final newContext = + config.context.getParent(i); // "pop" return state + final returnState = atn.states[config.context.getReturnState(i)]; + final c = LexerATNConfig.dup(config, returnState, + context: newContext); + currentAltReachedAcceptState = closure(input, c, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon); + } + } + } + + return currentAltReachedAcceptState; + } + + // optimization + if (!config.state.onlyHasEpsilonTransitions()) { + if (!currentAltReachedAcceptState || + !config.hasPassedThroughNonGreedyDecision()) { + configs.add(config); + } + } + + final p = config.state; + for (var i = 0; i < p.numberOfTransitions; i++) { + final t = p.transition(i); + final c = getEpsilonTarget( + input, config, t, configs, speculative, treatEofAsEpsilon); + if (c != null) { + currentAltReachedAcceptState = closure(input, c, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon); + } + } + + return currentAltReachedAcceptState; + } + + // side-effect: can alter configs.hasSemanticContext + + LexerATNConfig getEpsilonTarget( + CharStream input, + LexerATNConfig config, + Transition t, + ATNConfigSet configs, + bool speculative, + bool treatEofAsEpsilon) { + LexerATNConfig c; + switch (t.type) { + case TransitionType.RULE: + RuleTransition ruleTransition = t; + PredictionContext newContext = SingletonPredictionContext.create( + config.context, ruleTransition.followState.stateNumber); + c = LexerATNConfig.dup(config, t.target, context: newContext); + break; + + case TransitionType.PRECEDENCE: + throw UnsupportedError( + 'Precedence predicates are not supported in lexers.'); + case TransitionType.PREDICATE: + /* Track traversing semantic predicates. If we traverse, + we cannot add a DFA state for this "reach" computation + because the DFA would not test the predicate again in the + future. Rather than creating collections of semantic predicates + like v3 and testing them on prediction, v4 will test them on the + fly all the time using the ATN not the DFA. This is slower but + semantically it's not used that often. One of the key elements to + this predicate mechanism is not adding DFA states that see + predicates immediately afterwards in the ATN. For example, + + a : ID {p1}? | ID {p2}? ; + + should create the start state for rule 'a' (to save start state + competition), but should not create target of ID state. The + collection of ATN states the following ID references includes + states reached by traversing predicates. Since this is when we + test them, we cannot cash the DFA state target of ID. + */ + PredicateTransition pt = t; + if (debug) { + log('EVAL rule ${pt.ruleIndex}:${pt.predIndex}', + level: Level.FINE.value); + } + configs.hasSemanticContext = true; + if (evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative)) { + c = LexerATNConfig.dup(config, t.target); + } + break; + case TransitionType.ACTION: + if (config.context == null || config.context.hasEmptyPath()) { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + final lexerActionExecutor = LexerActionExecutor.append( + config.lexerActionExecutor, + atn.lexerActions[(t as ActionTransition).actionIndex]); + c = LexerATNConfig.dup(config, t.target, + lexerActionExecutor: lexerActionExecutor); + } else { + // ignore actions in referenced rules + c = LexerATNConfig.dup(config, t.target); + } + break; + + case TransitionType.EPSILON: + c = LexerATNConfig.dup(config, t.target); + break; + + case TransitionType.ATOM: + case TransitionType.RANGE: + case TransitionType.SET: + if (treatEofAsEpsilon) { + if (t.matches( + IntStream.EOF, Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE)) { + c = LexerATNConfig.dup(config, t.target); + break; + } + } + break; + case TransitionType.NOT_SET: + break; + case TransitionType.WILDCARD: + break; + case TransitionType.INVALID: + throw ArgumentError.value(t.type, 'TransitionType'); + break; + } + + return c; + } + + /// Evaluate a predicate specified in the lexer. + /// + ///

If [speculative] is [true], this method was called before + /// {@link #consume} for the matched character. This method should call + /// {@link #consume} before evaluating the predicate to ensure position + /// sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine}, + /// and {@link Lexer#getCharPositionInLine}, properly reflect the current + /// lexer state. This method should restore [input] and the simulator + /// to the original state before returning (i.e. undo the actions made by the + /// call to {@link #consume}.

+ /// + /// @param input The input stream. + /// @param ruleIndex The rule containing the predicate. + /// @param predIndex The index of the predicate within the rule. + /// @param speculative [true] if the current index in [input] is + /// one character before the predicate's location. + /// + /// @return [true] if the specified predicate evaluates to + /// [true]. + bool evaluatePredicate( + CharStream input, int ruleIndex, int predIndex, bool speculative) { + // assume true if no recognizer was provided + if (recog == null) { + return true; + } + + if (!speculative) { + return recog.sempred(null, ruleIndex, predIndex); + } + + final savedCharPositionInLine = charPositionInLine; + final savedLine = line; + final index = input.index; + final marker = input.mark(); + try { + consume(input); + return recog.sempred(null, ruleIndex, predIndex); + } finally { + charPositionInLine = savedCharPositionInLine; + line = savedLine; + input.seek(index); + input.release(marker); + } + } + + void captureSimState(SimState settings, CharStream input, DFAState dfaState) { + settings.index = input.index; + settings.line = line; + settings.charPos = charPositionInLine; + settings.dfaState = dfaState; + } + + DFAState addDFAEdgeByConfig(DFAState from, int t, ATNConfigSet q) { + /* leading to this call, ATNConfigSet.hasSemanticContext is used as a + * marker indicating dynamic predicate evaluation makes this edge + * dependent on the specific input sequence, so the static edge in the + * DFA should be omitted. The target DFAState is still created since + * execATN has the ability to resynchronize with the DFA state cache + * following the predicate evaluation step. + * + * TJP notes: next time through the DFA, we see a pred again and eval. + * If that gets us to a previously created (but dangling) DFA + * state, we can continue in pure DFA mode from there. + */ + final suppressEdge = q.hasSemanticContext; + q.hasSemanticContext = false; + + final to = addDFAState(q); + + if (suppressEdge) { + return to; + } + + addDFAEdge(from, t, to); + return to; + } + + void addDFAEdge(DFAState p, int t, DFAState q) { + if (t < MIN_DFA_EDGE || t > MAX_DFA_EDGE) { + // Only track edges within the DFA bounds + return; + } + + if (debug) { + log('EDGE $p -> $q upon ${String.fromCharCode(t)}', + level: Level.FINE.value); + } + + p.edges ??= List(MAX_DFA_EDGE - MIN_DFA_EDGE + 1); + p.edges[t - MIN_DFA_EDGE] = q; // connect + } + + /// Add a new DFA state if there isn't one with this set of + /// configurations already. This method also detects the first + /// configuration containing an ATN rule stop state. Later, when + /// traversing the DFA, we will know which rule to accept. + DFAState addDFAState(ATNConfigSet configs) { + /* the lexer evaluates predicates on-the-fly; by this point configs + * should not contain any configurations with unevaluated predicates. + */ + assert(!configs.hasSemanticContext); + + final proposed = DFAState(configs: configs); + ATNConfig firstConfigWithRuleStopState; + for (var c in configs) { + if (c.state is RuleStopState) { + firstConfigWithRuleStopState = c; + break; + } + } + + if (firstConfigWithRuleStopState != null) { + proposed.isAcceptState = true; + proposed.lexerActionExecutor = + (firstConfigWithRuleStopState as LexerATNConfig) + .lexerActionExecutor; + proposed.prediction = + atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; + } + + final dfa = decisionToDFA[mode]; + final existing = dfa.states[proposed]; + if (existing != null) return existing; + + final newState = proposed; + + newState.stateNumber = dfa.states.length; + configs.readOnly = true; + newState.configs = configs; + dfa.states[newState] = newState; + return newState; + } + + DFA getDFA(int mode) { + return decisionToDFA[mode]; + } + + /// Get the text matched so far for the current token. + + String getText(CharStream input) { + // index is first lookahead char, don't include. + return input.getText(Interval.of(startIndex, input.index - 1)); + } + + void consume(CharStream input) { + final curChar = input.LA(1); + if (curChar == 10) { // Is new line + line++; + charPositionInLine = 0; + } else { + charPositionInLine++; + } + input.consume(); + } + + String getTokenName(int t) { + if (t == -1) return 'EOF'; + //if ( atn.g!=null ) return atn.g.getTokenDisplayName(t); + return "'${String.fromCharCode(t)}'"; + } +} diff --git a/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart new file mode 100644 index 000000000..8ed2a5122 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart @@ -0,0 +1,2630 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../dfa/dfa.dart'; +import '../../error/error.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../misc/pair.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../prediction_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import '../../util/murmur_hash.dart'; +import '../../vocabulary.dart'; +import 'atn.dart'; +import 'atn_config.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; +import 'transition.dart'; + +/// The embodiment of the adaptive LL(*), ALL(*), parsing strategy. +/// +///

+/// The basic complexity of the adaptive strategy makes it harder to understand. +/// We begin with ATN simulation to build paths in a DFA. Subsequent prediction +/// requests go through the DFA first. If they reach a state without an edge for +/// the current symbol, the algorithm fails over to the ATN simulation to +/// complete the DFA path for the current input (until it finds a conflict state +/// or uniquely predicting state).

+/// +///

+/// All of that is done without using the outer context because we want to create +/// a DFA that is not dependent upon the rule invocation stack when we do a +/// prediction. One DFA works in all contexts. We avoid using context not +/// necessarily because it's slower, although it can be, but because of the DFA +/// caching problem. The closure routine only considers the rule invocation stack +/// created during prediction beginning in the decision rule. For example, if +/// prediction occurs without invoking another rule's ATN, there are no context +/// stacks in the configurations. When lack of context leads to a conflict, we +/// don't know if it's an ambiguity or a weakness in the strong LL(*) parsing +/// strategy (versus full LL(*)).

+/// +///

+/// When SLL yields a configuration set with conflict, we rewind the input and +/// retry the ATN simulation, this time using full outer context without adding +/// to the DFA. Configuration context stacks will be the full invocation stacks +/// from the start rule. If we get a conflict using full context, then we can +/// definitively say we have a true ambiguity for that input sequence. If we +/// don't get a conflict, it implies that the decision is sensitive to the outer +/// context. (It is not context-sensitive in the sense of context-sensitive +/// grammars.)

+/// +///

+/// The next time we reach this DFA state with an SLL conflict, through DFA +/// simulation, we will again retry the ATN simulation using full context mode. +/// This is slow because we can't save the results and have to "interpret" the +/// ATN each time we get that input.

+/// +///

+/// CACHING FULL CONTEXT PREDICTIONS

+/// +///

+/// We could cache results from full context to predicted alternative easily and +/// that saves a lot of time but doesn't work in presence of predicates. The set +/// of visible predicates from the ATN start state changes depending on the +/// context, because closure can fall off the end of a rule. I tried to cache +/// tuples (stack context, semantic context, predicted alt) but it was slower +/// than interpreting and much more complicated. Also required a huge amount of +/// memory. The goal is not to create the world's fastest parser anyway. I'd like +/// to keep this algorithm simple. By launching multiple threads, we can improve +/// the speed of parsing across a large number of files.

+/// +///

+/// There is no strict ordering between the amount of input used by SLL vs LL, +/// which makes it really hard to build a cache for full context. Let's say that +/// we have input A B C that leads to an SLL conflict with full context X. That +/// implies that using X we might only use A B but we could also use A B C D to +/// resolve conflict. Input A B C D could predict alternative 1 in one position +/// in the input and A B C E could predict alternative 2 in another position in +/// input. The conflicting SLL configurations could still be non-unique in the +/// full context prediction, which would lead us to requiring more input than the +/// original A B C. To make a prediction cache work, we have to track the exact +/// input used during the previous prediction. That amounts to a cache that maps +/// X to a specific DFA for that context.

+/// +///

+/// Something should be done for left-recursive expression predictions. They are +/// likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry +/// with full LL thing Sam does.

+/// +///

+/// AVOIDING FULL CONTEXT PREDICTION

+/// +///

+/// We avoid doing full context retry when the outer context is empty, we did not +/// dip into the outer context by falling off the end of the decision state rule, +/// or when we force SLL mode.

+/// +///

+/// As an example of the not dip into outer context case, consider as super +/// constructor calls versus function calls. One grammar might look like +/// this:

+/// +///
+/// ctorBody
+///   : '{' superCall? stat* '}'
+///   ;
+/// 
+/// +///

+/// Or, you might see something like

+/// +///
+/// stat
+///   : superCall ';'
+///   | expression ';'
+///   | ...
+///   ;
+/// 
+/// +///

+/// In both cases I believe that no closure operations will dip into the outer +/// context. In the first case ctorBody in the worst case will stop at the '}'. +/// In the 2nd case it should stop at the ';'. Both cases should stay within the +/// entry rule and not dip into the outer context.

+/// +///

+/// PREDICATES

+/// +///

+/// Predicates are always evaluated if present in either SLL or LL both. SLL and +/// LL simulation deals with predicates differently. SLL collects predicates as +/// it performs closure operations like ANTLR v3 did. It delays predicate +/// evaluation until it reaches and accept state. This allows us to cache the SLL +/// ATN simulation whereas, if we had evaluated predicates on-the-fly during +/// closure, the DFA state configuration sets would be different and we couldn't +/// build up a suitable DFA.

+/// +///

+/// When building a DFA accept state during ATN simulation, we evaluate any +/// predicates and return the sole semantically valid alternative. If there is +/// more than 1 alternative, we report an ambiguity. If there are 0 alternatives, +/// we throw an exception. Alternatives without predicates act like they have +/// true predicates. The simple way to think about it is to strip away all +/// alternatives with false predicates and choose the minimum alternative that +/// remains.

+/// +///

+/// When we start in the DFA and reach an accept state that's predicated, we test +/// those and return the minimum semantically viable alternative. If no +/// alternatives are viable, we throw an exception.

+/// +///

+/// During full LL ATN simulation, closure always evaluates predicates and +/// on-the-fly. This is crucial to reducing the configuration set size during +/// closure. It hits a landmine when parsing with the Java grammar, for example, +/// without this on-the-fly evaluation.

+/// +///

+/// SHARING DFA

+/// +///

+/// All instances of the same parser share the same decision DFAs through a +/// static field. Each instance gets its own ATN simulator but they share the +/// same {@link #decisionToDFA} field. They also share a +/// [PredictionContextCache] object that makes sure that all +/// [PredictionContext] objects are shared among the DFA states. This makes +/// a big size difference.

+/// +///

+/// THREAD SAFETY

+/// +///

+/// The [ParserATNSimulator] locks on the {@link #decisionToDFA} field when +/// it adds a new DFA object to that array. {@link #addDFAEdge} +/// locks on the DFA for the current decision when setting the +/// {@link DFAState#edges} field. {@link #addDFAState} locks on +/// the DFA for the current decision when looking up a DFA state to see if it +/// already exists. We must make sure that all requests to add DFA states that +/// are equivalent result in the same shared DFA object. This is because lots of +/// threads will be trying to update the DFA at once. The +/// {@link #addDFAState} method also locks inside the DFA lock +/// but this time on the shared context cache when it rebuilds the +/// configurations' [PredictionContext] objects using cached +/// subgraphs/nodes. No other locking occurs, even during DFA simulation. This is +/// safe as long as we can guarantee that all threads referencing +/// {@code s.edge[t]} get the same physical target [DFAState], or +/// null. Once into the DFA, the DFA simulation does not reference the +/// {@link DFA#states} map. It follows the {@link DFAState#edges} field to new +/// targets. The DFA simulator will either find {@link DFAState#edges} to be +/// null, to be non-null and {@code dfa.edges[t]} null, or +/// {@code dfa.edges[t]} to be non-null. The +/// {@link #addDFAEdge} method could be racing to set the field +/// but in either case the DFA simulator works; if null, and requests ATN +/// simulation. It could also race trying to get {@code dfa.edges[t]}, but either +/// way it will work because it's not doing a test and set operation.

+/// +///

+/// Starting with SLL then failing to combined SLL/LL (Two-Stage +/// Parsing)

+/// +///

+/// Sam pointed out that if SLL does not give a syntax error, then there is no +/// point in doing full LL, which is slower. We only have to try LL if we get a +/// syntax error. For maximum speed, Sam starts the parser set to pure SLL +/// mode with the [BailErrorStrategy]:

+/// +///
+/// parser.{@link Parser#interpreter interpreter}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
+/// parser.{@link Parser#setErrorHandler setErrorHandler}(new [BailErrorStrategy]());
+/// 
+/// +///

+/// If it does not get a syntax error, then we're done. If it does get a syntax +/// error, we need to retry with the combined SLL/LL strategy.

+/// +///

+/// The reason this works is as follows. If there are no SLL conflicts, then the +/// grammar is SLL (at least for that input set). If there is an SLL conflict, +/// the full LL analysis must yield a set of viable alternatives which is a +/// subset of the alternatives reported by SLL. If the LL set is a singleton, +/// then the grammar is LL but not SLL. If the LL set is the same size as the SLL +/// set, the decision is SLL. If the LL set has size > 1, then that decision +/// is truly ambiguous on the current input. If the LL set is smaller, then the +/// SLL conflict resolution might choose an alternative that the full LL would +/// rule out as a possibility based upon better context information. If that's +/// the case, then the SLL parse will definitely get an error because the full LL +/// analysis says it's not viable. If SLL conflict resolution chooses an +/// alternative within the LL set, them both SLL and LL would choose the same +/// alternative because they both choose the minimum of multiple conflicting +/// alternatives.

+/// +///

+/// Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and +/// a smaller LL set called s. If s is {@code {2, 3}}, then SLL +/// parsing will get an error because SLL will pursue alternative 1. If +/// s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will +/// choose the same alternative because alternative one is the minimum of either +/// set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax +/// error. If s is {@code {1}} then SLL will succeed.

+/// +///

+/// Of course, if the input is invalid, then we will get an error for sure in +/// both SLL and LL parsing. Erroneous input will therefore require 2 passes over +/// the input.

+class ParserATNSimulator extends ATNSimulator { + static final bool debug = false; + static final bool debug_list_atn_decisions = false; + static final bool dfa_debug = false; + static final bool retry_debug = false; + + /// Just in case this optimization is bad, add an ENV variable to turn it off */ + static final bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = + bool.fromEnvironment('TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT'); + + final Parser parser; + + final List decisionToDFA; + + /// SLL, LL, or LL + exact ambig detection? */ + + PredictionMode predictionMode = PredictionMode.LL; + + /// Each prediction operation uses a cache for merge of prediction contexts. + /// Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + /// isn't synchronized but we're ok since two threads shouldn't reuse same + /// parser/atnsim object because it can only handle one input at a time. + /// This maps graphs a and b to merged result c. (a,b)→c. We can avoid + /// the merge if we ever see a and b again. Note that (b,a)→c should + /// also be examined during cache lookup. + Map, PredictionContext> mergeCache; + + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + TokenStream input; + int startIndex = 0; + ParserRuleContext _outerContext; + DFA _dfa; + + ParserATNSimulator(this.parser, ATN atn, this.decisionToDFA, + PredictionContextCache sharedContextCache) + : super(atn, sharedContextCache) { + // DOTGenerator dot = new DOTGenerator(null); + // log(dot.getDOT(atn.rules.get(0), parser.getRuleNames())); + // log(dot.getDOT(atn.rules.get(1), parser.getRuleNames())); + } + + @override + void reset() {} + + @override + void clearDFA() { + for (var d = 0; d < decisionToDFA.length; d++) { + decisionToDFA[d] = DFA(atn.getDecisionState(d), d); + } + } + + int adaptivePredict( + TokenStream input_, int decision, ParserRuleContext outerContext) { + if (debug || debug_list_atn_decisions) { + log('adaptivePredict decision $decision' ' exec LA(1)==' + + getLookaheadName(input_) + + ' line ${input_.LT(1).line}:${input_.LT(1).charPositionInLine}'); + } + + input = input_; + startIndex = input_.index; + _outerContext = outerContext; + final dfa = decisionToDFA[decision]; + _dfa = dfa; + + final m = input_.mark(); + final index = startIndex; + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + try { + DFAState s0; + if (dfa.isPrecedenceDfa()) { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(parser.precedence); + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0; + } + + if (s0 == null) { + outerContext ??= ParserRuleContext.EMPTY; + if (debug || debug_list_atn_decisions) { + log('predictATN decision ${dfa.decision}' ' exec LA(1)==' + + getLookaheadName(input_) + + ', outerContext=' + + outerContext.toString(recog: parser)); + } + + final fullCtx = false; + var s0_closure = computeStartState( + dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx); + + if (dfa.isPrecedenceDfa()) { + /* If this is a precedence DFA, we use applyPrecedenceFilter + * to convert the computed start state to a precedence start + * state. We then use DFA.setPrecedenceStartState to set the + * appropriate start state for the precedence level rather + * than simply setting DFA.s0. + */ + dfa.s0.configs = + s0_closure; // not used for prediction but useful to know start configs anyway + s0_closure = applyPrecedenceFilter(s0_closure); + s0 = addDFAState(dfa, DFAState(configs: s0_closure)); + dfa.setPrecedenceStartState(parser.precedence, s0); + } else { + s0 = addDFAState(dfa, DFAState(configs: s0_closure)); + dfa.s0 = s0; + } + } + + final alt = execATN(dfa, s0, input_, index, outerContext); + if (debug) { + log('DFA after predictATN: ' + dfa.toString(parser.vocabulary)); + } + return alt; + } finally { + mergeCache = null; // wack cache after each prediction + _dfa = null; + input_.seek(index); + input_.release(m); + } + } + + /// Performs ATN simulation to compute a predicted alternative based + /// upon the remaining input, but also updates the DFA cache to avoid + /// having to traverse the ATN again for the same input sequence. + /// + /// There are some key conditions we're looking for after computing a new + /// set of ATN configs (proposed DFA state): + /// if the set is empty, there is no viable alternative for current symbol + /// does the state uniquely predict an alternative? + /// does the state have a conflict that would prevent us from + /// putting it on the work list? + /// + /// We also have some key operations to do: + /// add an edge from previous DFA state to potentially new DFA state, D, + /// upon current symbol but only if adding to work list, which means in all + /// cases except no viable alternative (and possibly non-greedy decisions?) + /// collecting predicates and adding semantic context to DFA accept states + /// adding rule context to context-sensitive DFA accept states + /// consuming an input symbol + /// reporting a conflict + /// reporting an ambiguity + /// reporting a context sensitivity + /// reporting insufficient predicates + /// + /// cover these cases: + /// dead end + /// single alt + /// single alt + preds + /// conflict + /// conflict + preds + /// + int execATN(DFA dfa, DFAState s0, TokenStream input, int startIndex, + ParserRuleContext outerContext) { + if (debug || debug_list_atn_decisions) { + log('execATN decision ${dfa.decision}' ' exec LA(1)==' + + getLookaheadName(input) + + ' line ${input.LT(1).line}' + + ':${input.LT(1).charPositionInLine}'); + } + + var previousD = s0; + + if (debug) log('s0 = $s0'); + + var t = input.LA(1); + + while (true) { + // while more work + var D = getExistingTargetState(previousD, t); + D ??= computeTargetState(dfa, previousD, t); + + if (D == ATNSimulator.ERROR) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + final e = + noViableAlt(input, outerContext, previousD.configs, startIndex); + input.seek(startIndex); + final alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + previousD.configs, outerContext); + if (alt != ATN.INVALID_ALT_NUMBER) { + return alt; + } + throw e; + } + + if (D.requiresFullContext && predictionMode != PredictionMode.SLL) { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + var conflictingAlts = D.configs.conflictingAlts; + if (D.predicates != null) { + if (debug) log('DFA state has preds in DFA sim LL failover'); + final conflictIndex = input.index; + if (conflictIndex != startIndex) { + input.seek(startIndex); + } + + conflictingAlts = + evalSemanticContext(D.predicates, outerContext, true); + if (conflictingAlts.cardinality == 1) { + if (debug) log('Full LL avoided'); + return conflictingAlts.nextset(0); + } + + if (conflictIndex != startIndex) { + // restore the index so reporting the fallback to full + // context occurs with the index at the correct spot + input.seek(conflictIndex); + } + } + + if (dfa_debug) log('ctx sensitive state ${outerContext} in $D'); + final fullCtx = true; + final s0_closure = + computeStartState(dfa.atnStartState, outerContext, fullCtx); + reportAttemptingFullContext( + dfa, conflictingAlts, D.configs, startIndex, input.index); + final alt = execATNWithFullContext( + dfa, D, s0_closure, input, startIndex, outerContext); + return alt; + } + + if (D.isAcceptState) { + if (D.predicates == null) { + return D.prediction; + } + + final stopIndex = input.index; + input.seek(startIndex); + final alts = evalSemanticContext(D.predicates, outerContext, true); + switch (alts.cardinality) { + case 0: + throw noViableAlt(input, outerContext, D.configs, startIndex); + + case 1: + return alts.nextset(0); + + default: + // report ambiguity after predicate evaluation to make sure the correct + // set of ambig alts is reported. + reportAmbiguity( + dfa, D, startIndex, stopIndex, false, alts, D.configs); + return alts.nextset(0); + } + } + + previousD = D; + + if (t != IntStream.EOF) { + input.consume(); + t = input.LA(1); + } + } + } + + /// Get an existing target state for an edge in the DFA. If the target state + /// for the edge has not yet been computed or is otherwise not available, + /// this method returns null. + /// + /// @param previousD The current DFA state + /// @param t The next input symbol + /// @return The existing target DFA state for the given input symbol + /// [t], or null if the target state for this edge is not + /// already cached + DFAState getExistingTargetState(DFAState previousD, int t) { + final edges = previousD.edges; + if (edges == null || t + 1 < 0 || t + 1 >= edges.length) { + return null; + } + + return edges[t + 1]; + } + + /// Compute a target state for an edge in the DFA, and attempt to add the + /// computed state and corresponding edge to the DFA. + /// + /// @param dfa The DFA + /// @param previousD The current DFA state + /// @param t The next input symbol + /// + /// @return The computed target DFA state for the given input symbol + /// [t]. If [t] does not lead to a valid DFA state, this method + /// returns {@link #ERROR}. + DFAState computeTargetState(DFA dfa, DFAState previousD, int t) { + final reach = computeReachSet(previousD.configs, t, false); + if (reach == null) { + addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); + return ATNSimulator.ERROR; + } + + // create new target state; we'll add to DFA after it's complete + var D = DFAState(configs: reach); + + final predictedAlt = getUniqueAlt(reach); + + if (debug) { + final altSubSets = + PredictionModeExtension.getConflictingAltSubsets(reach); + log( + 'SLL altSubSets=$altSubSets' ', configs=$reach' ', predict=$predictedAlt, allSubsetsConflict=${PredictionModeExtension + .allSubsetsConflict( + altSubSets)}, conflictingAlts=${getConflictingAlts(reach)}'); + } + + if (predictedAlt != ATN.INVALID_ALT_NUMBER) { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true; + D.configs.uniqueAlt = predictedAlt; + D.prediction = predictedAlt; + } else if (PredictionModeExtension.hasSLLConflictTerminatingPrediction( + predictionMode, reach)) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = getConflictingAlts(reach); + D.requiresFullContext = true; + // in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = true; + D.prediction = D.configs.conflictingAlts.nextset(0); + } + + if (D.isAcceptState && D.configs.hasSemanticContext) { + predicateDFAState(D, atn.getDecisionState(dfa.decision)); + if (D.predicates != null) { + D.prediction = ATN.INVALID_ALT_NUMBER; + } + } + + // all adds to dfa are done after we've created full D state + D = addDFAEdge(dfa, previousD, t, D); + return D; + } + + void predicateDFAState(DFAState dfaState, DecisionState decisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + final nalts = decisionState.numberOfTransitions; + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + final altsToCollectPredsFrom = + getConflictingAltsOrUniqueAlt(dfaState.configs); + final altToPred = + getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); + if (altToPred != null) { + dfaState.predicates = + getPredicatePredictions(altsToCollectPredsFrom, altToPred); + dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.prediction = altsToCollectPredsFrom.nextset(0); + } + } + + // comes back with reach.uniqueAlt set to a valid alt + int execATNWithFullContext( + DFA dfa, + DFAState D, // how far we got in SLL DFA before failing over + ATNConfigSet s0, + TokenStream input, + int startIndex, + ParserRuleContext outerContext) { + if (debug || debug_list_atn_decisions) { + log('execATNWithFullContext $s0'); + } + final fullCtx = true; + var foundExactAmbig = false; + ATNConfigSet reach; + var previous = s0; + input.seek(startIndex); + var t = input.LA(1); + int predictedAlt; + while (true) { + // while more work +// log("LL REACH "+getLookaheadName(input)+ +// " from configs.size="+previous.length+ +// " line "+input.LT(1).getLine()+":"+input.LT(1).getCharPositionInLine()); + reach = computeReachSet(previous, t, fullCtx); + if (reach == null) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + final e = + noViableAlt(input, outerContext, previous, startIndex); + input.seek(startIndex); + final alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + previous, outerContext); + if (alt != ATN.INVALID_ALT_NUMBER) { + return alt; + } + throw e; + } + + final altSubSets = + PredictionModeExtension.getConflictingAltSubsets(reach); + if (debug) { + log('LL altSubSets=$altSubSets' ', predict=${PredictionModeExtension.getUniqueAlt(altSubSets)}' ', resolvesToJustOneViableAlt=${PredictionModeExtension.resolvesToJustOneViableAlt(altSubSets)}'); + } + +// log("altSubSets: "+altSubSets); +// log("reach="+reach+", "+reach.conflictingAlts, level: Level.SEVERE.value); + reach.uniqueAlt = getUniqueAlt(reach); + // unique prediction? + if (reach.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + predictedAlt = reach.uniqueAlt; + break; + } + if (predictionMode != PredictionMode.LL_EXACT_AMBIG_DETECTION) { + predictedAlt = + PredictionModeExtension.resolvesToJustOneViableAlt(altSubSets); + if (predictedAlt != ATN.INVALID_ALT_NUMBER) { + break; + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if (PredictionModeExtension.allSubsetsConflict(altSubSets) && + PredictionModeExtension.allSubsetsEqual(altSubSets)) { + foundExactAmbig = true; + predictedAlt = PredictionModeExtension.getSingleViableAlt(altSubSets); + break; + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + + previous = reach; + if (t != IntStream.EOF) { + input.consume(); + t = input.LA(1); + } + } + + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if (reach.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + reportContextSensitivity( + dfa, predictedAlt, reach, startIndex, input.index); + return predictedAlt; + } + + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + /* + In non-exact ambiguity detection mode, we might actually be able to + detect an exact ambiguity, but I'm not going to spend the cycles + needed to check. We only emit ambiguity warnings in exact ambiguity + mode. + + For example, we might know that we have conflicting configurations. + But, that does not mean that there is no way forward without a + conflict. It's possible to have nonconflicting alt subsets as in: + + LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + from + + [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + + In this case, (17,1,[5 $]) indicates there is some next sequence that + would resolve this without conflict to alternative 1. Any other viable + next sequence, however, is associated with a conflict. We stop + looking for input because no amount of further lookahead will alter + the fact that we should predict alternative 1. We just can't say for + sure that there is an ambiguity without looking further. + */ + reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, + reach.alts, reach); + + return predictedAlt; + } + + ATNConfigSet computeReachSet(ATNConfigSet config, int t, bool fullCtx) { + if (debug) log('in computeReachSet, starting closure: $config'); + + mergeCache ??= {}; + + final intermediate = ATNConfigSet(fullCtx); + + /* Configurations already in a rule stop state indicate reaching the end + * of the decision rule (local context) or end of the start rule (full + * context). Once reached, these configurations are never updated by a + * closure operation, so they are handled separately for the performance + * advantage of having a smaller intermediate set when calling closure. + * + * For full-context reach operations, separate handling is required to + * ensure that the alternative matching the longest overall sequence is + * chosen when multiple such configurations can match the input. + */ + List skippedStopStates; + + // First figure out where we can reach on input t + for (var c in config) { + if (debug) log('testing ' + getTokenName(t) + ' at ' + c.toString()); + + if (c.state is RuleStopState) { + assert(c.context.isEmpty); + if (fullCtx || t == IntStream.EOF) { + skippedStopStates ??= []; + + skippedStopStates.add(c); + } + + continue; + } + + final n = c.state.numberOfTransitions; + for (var ti = 0; ti < n; ti++) { + // for each transition + final trans = c.state.transition(ti); + final target = getReachableTarget(trans, t); + if (target != null) { + intermediate.add(ATNConfig.dup(c, state: target), mergeCache); + } + } + } + + // Now figure out where the reach operation can take us... + + ATNConfigSet reach; + + /* This block optimizes the reach operation for intermediate sets which + * trivially indicate a termination state for the overall + * adaptivePredict operation. + * + * The conditions assume that intermediate + * contains all configurations relevant to the reach set, but this + * condition is not true when one or more configurations have been + * withheld in skippedStopStates, or when the current symbol is EOF. + */ + if (skippedStopStates == null && t != Token.EOF) { + if (intermediate.length == 1) { + // Don't pursue the closure if there is just one state. + // It can only have one alternative; just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate; + } else if (getUniqueAlt(intermediate) != ATN.INVALID_ALT_NUMBER) { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate; + } + } + + /* If the reach set could not be trivially determined, perform a closure + * operation on the intermediate set to compute its initial value. + */ + if (reach == null) { + reach = ATNConfigSet(fullCtx); + final closureBusy = {}; + final treatEofAsEpsilon = t == Token.EOF; + for (var c in intermediate) { + closure(c, reach, closureBusy, false, fullCtx, treatEofAsEpsilon); + } + } + + if (t == IntStream.EOF) { + /* After consuming EOF no additional input is possible, so we are + * only interested in configurations which reached the end of the + * decision rule (local context) or end of the start rule (full + * context). Update reach to contain only these configurations. This + * handles both explicit EOF transitions in the grammar and implicit + * EOF transitions following the end of the decision or start rule. + * + * When reach==intermediate, no closure operation was performed. In + * this case, removeAllConfigsNotInRuleStopState needs to check for + * reachable rule stop states as well as configurations already in + * a rule stop state. + * + * This is handled before the configurations in skippedStopStates, + * because any configurations potentially added from that list are + * already guaranteed to meet this condition whether or not it's + * required. + */ + reach = removeAllConfigsNotInRuleStopState(reach, reach == intermediate); + } + + /* If skippedStopStates is not null, then it contains at least one + * configuration. For full-context reach operations, these + * configurations reached the end of the start rule, in which case we + * only add them back to reach if no configuration during the current + * closure operation reached such a state. This ensures adaptivePredict + * chooses an alternative matching the longest overall sequence when + * multiple alternatives are viable. + */ + if (skippedStopStates != null && + (!fullCtx || + !PredictionModeExtension.hasConfigInRuleStopState(reach))) { + assert(skippedStopStates.isNotEmpty); + for (var c in skippedStopStates) { + reach.add(c, mergeCache); + } + } + + if (reach.isEmpty) return null; + return reach; + } + + /// Return a configuration set containing only the configurations from + /// [configs] which are in a [RuleStopState]. If all + /// configurations in [configs] are already in a rule stop state, this + /// method simply returns [configs]. + /// + ///

When [lookToEndOfRule] is true, this method uses + /// {@link ATN#nextTokens} for each configuration in [configs] which is + /// not already in a rule stop state to see if a rule stop state is reachable + /// from the configuration via epsilon-only transitions.

+ /// + /// @param configs the configuration set to update + /// @param lookToEndOfRule when true, this method checks for rule stop states + /// reachable by epsilon-only transitions from each configuration in + /// [configs]. + /// + /// @return [configs] if all configurations in [configs] are in a + /// rule stop state, otherwise return a new configuration set containing only + /// the configurations from [configs] which are in a rule stop state + ATNConfigSet removeAllConfigsNotInRuleStopState( + ATNConfigSet configs, bool lookToEndOfRule) { + if (PredictionModeExtension.allConfigsInRuleStopStates(configs)) { + return configs; + } + + final result = ATNConfigSet(configs.fullCtx); + for (var config in configs) { + if (config.state is RuleStopState) { + result.add(config, mergeCache); + continue; + } + + if (lookToEndOfRule && config.state.onlyHasEpsilonTransitions()) { + final nextTokens = atn.nextTokens(config.state); + if (nextTokens.contains(Token.EPSILON)) { + ATNState endOfRuleState = atn.ruleToStopState[config.state.ruleIndex]; + result.add( + ATNConfig.dup(config, state: endOfRuleState), mergeCache); + } + } + } + + return result; + } + + ATNConfigSet computeStartState(ATNState p, RuleContext ctx, bool fullCtx) { + // always at least the implicit call to start rule + final initialContext = + PredictionContext.fromRuleContext(atn, ctx); + final configs = ATNConfigSet(fullCtx); + + for (var i = 0; i < p.numberOfTransitions; i++) { + final target = p.transition(i).target; + final c = ATNConfig(target, i + 1, initialContext); + final closureBusy = {}; + closure(c, configs, closureBusy, true, fullCtx, false); + } + + return configs; + } + + /* parrt internal source braindump that doesn't mess up + * external API spec. + context-sensitive in that they can only be properly evaluated + in the context of the proper prec argument. Without pruning, + these predicates are normal predicates evaluated when we reach + conflict state (or unique prediction). As we cannot evaluate + these predicates out of context, the resulting conflict leads + to full LL evaluation and nonlinear prediction which shows up + very clearly with fairly large expressions. + + Example grammar: + + e : e '*' e + | e '+' e + | INT + ; + + We convert that to the following: + + e[int prec] + : INT + ( {3>=prec}? '*' e[4] + | {2>=prec}? '+' e[3] + )* + ; + + The (..)* loop has a decision for the inner block as well as + an enter or exit decision, which is what concerns us here. At + the 1st + of input 1+2+3, the loop entry sees both predicates + and the loop exit also sees both predicates by falling off the + edge of e. This is because we have no stack information with + SLL and find the follow of e, which will hit the return states + inside the loop after e[4] and e[3], which brings it back to + the enter or exit decision. In this case, we know that we + cannot evaluate those predicates because we have fallen off + the edge of the stack and will in general not know which prec + parameter is the right one to use in the predicate. + + Because we have special information, that these are precedence + predicates, we can resolve them without failing over to full + LL despite their context sensitive nature. We make an + assumption that prec[-1] <= prec[0], meaning that the current + precedence level is greater than or equal to the precedence + level of recursive invocations above us in the stack. For + example, if predicate {3>=prec}? is true of the current prec, + then one option is to enter the loop to match it now. The + other option is to exit the loop and the left recursive rule + to match the current operator in rule invocation further up + the stack. But, we know that all of those prec are lower or + the same value and so we can decide to enter the loop instead + of matching it later. That means we can strip out the other + configuration for the exit branch. + + So imagine we have (14,1,$,{2>=prec}?) and then + (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization + allows us to collapse these two configurations. We know that + if {2>=prec}? is true for the current prec parameter, it will + also be true for any prec from an invoking e call, indicated + by dipsIntoOuterContext. As the predicates are both true, we + have the option to evaluate them early in the decision start + state. We do this by stripping both predicates and choosing to + enter the loop as it is consistent with the notion of operator + precedence. It's also how the full LL conflict resolution + would work. + + The solution requires a different DFA start state for each + precedence level. + + The basic filter mechanism is to remove configurations of the + form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In + other words, for the same ATN state and predicate context, + remove any configuration associated with an exit branch if + there is a configuration associated with the enter branch. + + It's also the case that the filter evaluates precedence + predicates and resolves conflicts according to precedence + levels. For example, for input 1+2+3 at the first +, we see + prediction filtering + + [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1), + (11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext + + to + + [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext + + This filters because {3>=prec}? evals to true and collapses + (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict + resolution based upon rules of operator precedence fits with + our usual match first alt upon conflict. + + We noticed a problem where a recursive call resets precedence + to 0. Sam's fix: each config has flag indicating if it has + returned from an expr[0] call. then just don't filter any + config with that flag set. flag is carried along in + closure(). so to avoid adding field, set bit just under sign + bit of dipsIntoOuterContext (SUPPRESS_PRECEDENCE_FILTER). + With the change you filter "unless (p, 2, pi) was reached + after leaving the rule stop state of the LR rule containing + state p, corresponding to a rule invocation with precedence + level 0" + */ + + /// This method transforms the start state computed by + /// {@link #computeStartState} to the special start state used by a + /// precedence DFA for a particular precedence value. The transformation + /// process applies the following changes to the start state's configuration + /// set. + /// + ///
    + ///
  1. Evaluate the precedence predicates for each configuration using + /// {@link SemanticContext#evalPrecedence}.
  2. + ///
  3. When {@link ATNConfig#isPrecedenceFilterSuppressed} is [false], + /// remove all configurations which predict an alternative greater than 1, + /// for which another configuration that predicts alternative 1 is in the + /// same ATN state with the same prediction context. This transformation is + /// valid for the following reasons: + ///
      + ///
    • The closure block cannot contain any epsilon transitions which bypass + /// the body of the closure, so all states reachable via alternative 1 are + /// part of the precedence alternatives of the transformed left-recursive + /// rule.
    • + ///
    • The "primary" portion of a left recursive rule cannot contain an + /// epsilon transition, so the only way an alternative other than 1 can exist + /// in a state that is also reachable via alternative 1 is by nesting calls + /// to the left-recursive rule, with the outer calls not being at the + /// preferred precedence level. The + /// {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN + /// configurations which do not meet this condition, and therefore are not + /// eligible for elimination during the filtering process.
    • + ///
    + ///
  4. + ///
+ /// + ///

+ /// The prediction context must be considered by this filter to address + /// situations like the following. + ///

+ /// + ///
+  /// grammar TA;
+  /// prog: statement* EOF;
+  /// statement: letterA | statement letterA 'b' ;
+  /// letterA: 'a';
+  /// 
+ ///
+ ///

+ /// If the above grammar, the ATN state immediately before the token + /// reference {@code 'a'} in [letterA] is reachable from the left edge + /// of both the primary and closure blocks of the left-recursive rule + /// [statement]. The prediction context associated with each of these + /// configurations distinguishes between them, and prevents the alternative + /// which stepped out to [prog] (and then back in to [statement] + /// from being eliminated by the filter. + ///

+ /// + /// @param configs The configuration set computed by + /// {@link #computeStartState} as the start state for the DFA. + /// @return The transformed configuration set representing the start state + /// for a precedence DFA at a particular precedence level (determined by + /// calling {@link Parser#getPrecedence}). + ATNConfigSet applyPrecedenceFilter(ATNConfigSet configs) { + final statesFromAlt1 = {}; + final configSet = ATNConfigSet(configs.fullCtx); + for (var config in configs) { + // handle alt 1 first + if (config.alt != 1) { + continue; + } + + final updatedContext = + config.semanticContext.evalPrecedence(parser, _outerContext); + if (updatedContext == null) { + // the configuration was eliminated + continue; + } + + statesFromAlt1[config.state.stateNumber] = config.context; + if (updatedContext != config.semanticContext) { + configSet.add( + ATNConfig.dup(config, semanticContext: updatedContext), + mergeCache); + } else { + configSet.add(config, mergeCache); + } + } + + for (var config in configs) { + if (config.alt == 1) { + // already handled + continue; + } + + if (!config.isPrecedenceFilterSuppressed()) { + /* In the future, this elimination step could be updated to also + * filter the prediction context for alternatives predicting alt>1 + * (basically a graph subtraction algorithm). + */ + final context = statesFromAlt1[config.state.stateNumber]; + if (context != null && context == config.context) { + // eliminated + continue; + } + } + + configSet.add(config, mergeCache); + } + + return configSet; + } + + ATNState getReachableTarget(Transition trans, int ttype) { + if (trans.matches(ttype, 0, atn.maxTokenType)) { + return trans.target; + } + + return null; + } + + List getPredsForAmbigAlts( + BitSet ambigAlts, ATNConfigSet configs, int nalts) { + // REACH=[1|1|[]|0:0, 1|2|[]|0:1] + /* altToPred starts as an array of all null contexts. The entry at index i + * corresponds to alternative i. altToPred[i] may have one of three values: + * 1. null: no ATNConfig c is found such that c.alt==i + * 2. SemanticContext.NONE: At least one ATNConfig c exists such that + * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + * alt i has at least one unpredicated config. + * 3. Non-NONE Semantic Context: There exists at least one, and for all + * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + * + * From this, it is clear that NONE||anything==NONE. + */ + var altToPred = List(nalts + 1); + for (var c in configs) { + if (ambigAlts[c.alt]) { + altToPred[c.alt] = + SemanticContext.or(altToPred[c.alt], c.semanticContext); + } + } + + var nPredAlts = 0; + for (var i = 1; i <= nalts; i++) { + if (altToPred[i] == null) { + altToPred[i] = SemanticContext.NONE; + } else if (altToPred[i] != SemanticContext.NONE) { + nPredAlts++; + } + } + +// // Optimize away p||p and p&&p TODO: optimize() was a no-op +// for (int i = 0; i < altToPred.length; i++) { +// altToPred[i] = altToPred[i].optimize(); +// } + + // nonambig alts are null in altToPred + if (nPredAlts == 0) altToPred = null; + if (debug) log('getPredsForAmbigAlts result $altToPred'); + return altToPred; + } + + List getPredicatePredictions( + BitSet ambigAlts, List altToPred) { + final pairs = []; + var containsPredicate = false; + for (var i = 1; i < altToPred.length; i++) { + final pred = altToPred[i]; + + // unpredicated is indicated by SemanticContext.NONE + assert(pred != null); + + if (ambigAlts != null && ambigAlts[i]) { + pairs.add(PredPrediction(pred, i)); + } + if (pred != SemanticContext.NONE) containsPredicate = true; + } + + if (!containsPredicate) { + return null; + } + +// log(Arrays.toString(altToPred)+"->"+pairs); + return pairs; + } + + /// This method is used to improve the localization of error messages by + /// choosing an alternative rather than throwing a + /// [NoViableAltException] in particular prediction scenarios where the + /// {@link #ERROR} state was reached during ATN simulation. + /// + ///

+ /// The default implementation of this method uses the following + /// algorithm to identify an ATN configuration which successfully parsed the + /// decision entry rule. Choosing such an alternative ensures that the + /// [ParserRuleContext] returned by the calling rule will be complete + /// and valid, and the syntax error will be reported later at a more + /// localized location.

+ /// + ///
    + ///
  • If a syntactically valid path or paths reach the end of the decision rule and + /// they are semantically valid if predicated, return the min associated alt.
  • + ///
  • Else, if a semantically invalid but syntactically valid path exist + /// or paths exist, return the minimum associated alt. + ///
  • + ///
  • Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.
  • + ///
+ /// + ///

+ /// In some scenarios, the algorithm described above could predict an + /// alternative which will result in a [FailedPredicateException] in + /// the parser. Specifically, this could occur if the only configuration + /// capable of successfully parsing to the end of the decision rule is + /// blocked by a semantic predicate. By choosing this alternative within + /// {@link #adaptivePredict} instead of throwing a + /// [NoViableAltException], the resulting + /// [FailedPredicateException] in the parser will identify the specific + /// predicate which is preventing the parser from successfully parsing the + /// decision rule, which helps developers identify and correct logic errors + /// in semantic predicates. + ///

+ /// + /// @param configs The ATN configurations which were valid immediately before + /// the {@link #ERROR} state was reached + /// @param outerContext The is the \gamma_0 initial parser context from the paper + /// or the parser stack at the instant before prediction commences. + /// + /// @return The value to return from {@link #adaptivePredict}, or + /// {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not + /// identified and {@link #adaptivePredict} should report an error instead. + int getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule( + ATNConfigSet configs, ParserRuleContext outerContext) { + final sets = + splitAccordingToSemanticValidity(configs, outerContext); + final semValidConfigs = sets.a; + final semInvalidConfigs = sets.b; + var alt = getAltThatFinishedDecisionEntryRule(semValidConfigs); + if (alt != ATN.INVALID_ALT_NUMBER) { + // semantically/syntactically viable path exists + return alt; + } + // Is there a syntactically valid path with a failed pred? + if (semInvalidConfigs.isNotEmpty) { + alt = getAltThatFinishedDecisionEntryRule(semInvalidConfigs); + if (alt != ATN.INVALID_ALT_NUMBER) { + // syntactically viable path exists + return alt; + } + } + return ATN.INVALID_ALT_NUMBER; + } + + int getAltThatFinishedDecisionEntryRule(ATNConfigSet configs) { + final alts = IntervalSet(); + for (var c in configs) { + if (c.outerContextDepth > 0 || + (c.state is RuleStopState && c.context.hasEmptyPath())) { + alts.addOne(c.alt); + } + } + if (alts.length == 0) return ATN.INVALID_ALT_NUMBER; + return alts.minElement; + } + + /// Walk the list of configurations and split them according to + /// those that have preds evaluating to true/false. If no pred, assume + /// true pred and include in succeeded set. Returns Pair of sets. + /// + /// Create a new set so as not to alter the incoming parameter. + /// + /// Assumption: the input stream has been restored to the starting point + /// prediction, which is where predicates need to evaluate. + Pair splitAccordingToSemanticValidity( + ATNConfigSet configs, ParserRuleContext outerContext) { + final succeeded = ATNConfigSet(configs.fullCtx); + final failed = ATNConfigSet(configs.fullCtx); + for (var c in configs) { + if (c.semanticContext != SemanticContext.NONE) { + final predicateEvaluationResult = evalSemanticContextOne( + c.semanticContext, outerContext, c.alt, configs.fullCtx); + if (predicateEvaluationResult) { + succeeded.add(c); + } else { + failed.add(c); + } + } else { + succeeded.add(c); + } + } + return Pair(succeeded, failed); + } + + /// Look through a list of predicate/alt pairs, returning alts for the + /// pairs that win. A [NONE] predicate indicates an alt containing an + /// unpredicated config which behaves as "always true." If !complete + /// then we stop at the first predicate that evaluates to true. This + /// includes pairs with null predicates. + BitSet evalSemanticContext(List predPredictions, + ParserRuleContext outerContext, bool complete) { + final predictions = BitSet(); + for (var pair in predPredictions) { + if (pair.pred == SemanticContext.NONE) { + predictions.set(pair.alt); + if (!complete) { + break; + } + continue; + } + + final fullCtx = false; // in dfa + final predicateEvaluationResult = + evalSemanticContextOne(pair.pred, outerContext, pair.alt, fullCtx); + if (debug || dfa_debug) { + log('eval pred $pair=$predicateEvaluationResult'); + } + + if (predicateEvaluationResult) { + if (debug || dfa_debug) log('PREDICT ' + pair.alt); + predictions.set(pair.alt); + if (!complete) { + break; + } + } + } + + return predictions; + } + + /// Evaluate a semantic context within a specific parser context. + /// + ///

+ /// This method might not be called for every semantic context evaluated + /// during the prediction process. In particular, we currently do not + /// evaluate the following but it may change in the future:

+ /// + ///
    + ///
  • Precedence predicates (represented by + /// {@link SemanticContext.PrecedencePredicate}) are not currently evaluated + /// through this method.
  • + ///
  • Operator predicates (represented by {@link SemanticContext.AND} and + /// {@link SemanticContext.OR}) are evaluated as a single semantic + /// context, rather than evaluating the operands individually. + /// Implementations which require evaluation results from individual + /// predicates should override this method to explicitly handle evaluation of + /// the operands within operator predicates.
  • + ///
+ /// + /// @param pred The semantic context to evaluate + /// @param parserCallStack The parser context in which to evaluate the + /// semantic context + /// @param alt The alternative which is guarded by [pred] + /// @param fullCtx [true] if the evaluation is occurring during LL + /// prediction; otherwise, [false] if the evaluation is occurring + /// during SLL prediction + /// + /// @since 4.3 + bool evalSemanticContextOne(SemanticContext pred, + ParserRuleContext parserCallStack, int alt, bool fullCtx) { + return pred.eval(parser, parserCallStack); + } + + /* TODO: If we are doing predicates, there is no point in pursuing + closure operations if we reach a DFA state that uniquely predicts + alternative. We will not be caching that DFA state and it is a + waste to pursue the closure. Might have to advance when we do + ambig detection thought :( + */ + + void closure( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + bool treatEofAsEpsilon) { + final initialDepth = 0; + closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEofAsEpsilon); + assert(!fullCtx || !configs.dipsIntoOuterContext); + } + + void closureCheckingStopState( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + int depth, + bool treatEofAsEpsilon) { + if (debug) log('closure(' + config.toString(parser, true) + ')'); + + if (config.state is RuleStopState) { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if (!config.context.isEmpty) { + for (var i = 0; i < config.context.length; i++) { + if (config.context.getReturnState(i) == + PredictionContext.EMPTY_RETURN_STATE) { + if (fullCtx) { + configs.add( + ATNConfig.dup(config, + state: config.state, context: PredictionContext.EMPTY), + mergeCache); + continue; + } else { + // we have no context info, just chase follow links (if greedy) + if (debug) { + log('FALLING off rule ' + getRuleName(config.state.ruleIndex)); + } + closure_(config, configs, closureBusy, collectPredicates, fullCtx, + depth, treatEofAsEpsilon); + } + continue; + } + final returnState = atn.states[config.context.getReturnState(i)]; + final newContext = + config.context.getParent(i); // "pop" return state + final c = ATNConfig( + returnState, config.alt, newContext, config.semanticContext); + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + // + // This assignment also propagates the + // isPrecedenceFilterSuppressed() value to the new + // configuration. + c.reachesIntoOuterContext = config.reachesIntoOuterContext; +// assert(depth > int.MIN_VALUE); + closureCheckingStopState(c, configs, closureBusy, collectPredicates, + fullCtx, depth - 1, treatEofAsEpsilon); + } + return; + } else if (fullCtx) { + // reached end of start rule + configs.add(config, mergeCache); + return; + } else { + // else if we have no context info, just chase follow links (if greedy) + if (debug) { + log('FALLING off rule ' + getRuleName(config.state.ruleIndex)); + } + } + } + + closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, + treatEofAsEpsilon); + } + + /// Do the actual work of walking epsilon edges */ + void closure_( + ATNConfig config, + ATNConfigSet configs, + Set closureBusy, + bool collectPredicates, + bool fullCtx, + int depth, + bool treatEofAsEpsilon) { + final p = config.state; + // optimization + if (!p.onlyHasEpsilonTransitions()) { + configs.add(config, mergeCache); + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. +// if ( debug ) log("added config "+configs); + } + + for (var i = 0; i < p.numberOfTransitions; i++) { + if (i == 0 && canDropLoopEntryEdgeInLeftRecursiveRule(config)) continue; + + final t = p.transition(i); + final continueCollecting = !(t is ActionTransition) && collectPredicates; + final c = getEpsilonTarget(config, t, continueCollecting, depth == 0, + fullCtx, treatEofAsEpsilon); + if (c != null) { + var newDepth = depth; + if (config.state is RuleStopState) { + assert(!fullCtx); + // target fell off end of rule; mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if (_dfa != null && _dfa.isPrecedenceDfa()) { + final outermostPrecedenceReturn = + (t as EpsilonTransition).outermostPrecedenceReturn; + if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) { + c.setPrecedenceFilterSuppressed(true); + } + } + + c.reachesIntoOuterContext++; + + if (!closureBusy.add(c)) { + // avoid infinite recursion for right-recursive rules + continue; + } + + // TODO: can remove? only care when we add to set per middle of this method + configs.dipsIntoOuterContext = true; +// assert(newDepth > int.MIN_VALUE); + newDepth--; + if (debug) log('dips into outer ctx: $c'); + } else { + if (!t.isEpsilon && !closureBusy.add(c)) { + // avoid infinite recursion for EOF* and EOF+ + continue; + } + + if (t is RuleTransition) { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if (newDepth >= 0) { + newDepth++; + } + } + } + + closureCheckingStopState(c, configs, closureBusy, continueCollecting, + fullCtx, newDepth, treatEofAsEpsilon); + } + } + } + + /// Implements first-edge (loop entry) elimination as an optimization + /// during closure operations. See antlr/antlr4#1398. + /// + /// The optimization is to avoid adding the loop entry config when + /// the exit path can only lead back to the same + /// StarLoopEntryState after popping context at the rule end state + /// (traversing only epsilon edges, so we're still in closure, in + /// this same rule). + /// + /// We need to detect any state that can reach loop entry on + /// epsilon w/o exiting rule. We don't have to look at FOLLOW + /// links, just ensure that all stack tops for config refer to key + /// states in LR rule. + /// + /// To verify we are in the right situation we must first check + /// closure is at a StarLoopEntryState generated during LR removal. + /// Then we check that each stack top of context is a return state + /// from one of these cases: + /// + /// 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + /// 2. expr op expr. The return state is the block end of internal block of (...)* + /// 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + /// That state points at block end of internal block of (...)*. + /// 4. expr '?' expr ':' expr. The return state points at block end, + /// which points at loop entry state. + /// + /// If any is true for each stack top, then closure does not add a + /// config to the current config set for edge[0], the loop entry branch. + /// + /// Conditions fail if any context for the current config is: + /// + /// a. empty (we'd fall out of expr to do a global FOLLOW which could + /// even be to some weird spot in expr) or, + /// b. lies outside of expr or, + /// c. lies within expr but at a state not the BlockEndState + /// generated during LR removal + /// + /// Do we need to evaluate predicates ever in closure for this case? + /// + /// No. Predicates, including precedence predicates, are only + /// evaluated when computing a DFA start state. I.e., only before + /// the lookahead (but not parser) consumes a token. + /// + /// There are no epsilon edges allowed in LR rule alt blocks or in + /// the "primary" part (ID here). If closure is in + /// StarLoopEntryState any lookahead operation will have consumed a + /// token as there are no epsilon-paths that lead to + /// StarLoopEntryState. We do not have to evaluate predicates + /// therefore if we are in the generated StarLoopEntryState of a LR + /// rule. Note that when making a prediction starting at that + /// decision point, decision d=2, compute-start-state performs + /// closure starting at edges[0], edges[1] emanating from + /// StarLoopEntryState. That means it is not performing closure on + /// StarLoopEntryState during compute-start-state. + /// + /// How do we know this always gives same prediction answer? + /// + /// Without predicates, loop entry and exit paths are ambiguous + /// upon remaining input +b (in, say, a+b). Either paths lead to + /// valid parses. Closure can lead to consuming + immediately or by + /// falling out of this call to expr back into expr and loop back + /// again to StarLoopEntryState to match +b. In this special case, + /// we choose the more efficient path, which is to take the bypass + /// path. + /// + /// The lookahead language has not changed because closure chooses + /// one path over the other. Both paths lead to consuming the same + /// remaining input during a lookahead operation. If the next token + /// is an operator, lookahead will enter the choice block with + /// operators. If it is not, lookahead will exit expr. Same as if + /// closure had chosen to enter the choice block immediately. + /// + /// Closure is examining one config (some loopentrystate, some alt, + /// context) which means it is considering exactly one alt. Closure + /// always copies the same alt to any derived configs. + /// + /// How do we know this optimization doesn't mess up precedence in + /// our parse trees? + /// + /// Looking through expr from left edge of stat only has to confirm + /// that an input, say, a+b+c; begins with any valid interpretation + /// of an expression. The precedence actually doesn't matter when + /// making a decision in stat seeing through expr. It is only when + /// parsing rule expr that we must use the precedence to get the + /// right interpretation and, hence, parse tree. + /// + /// @since 4.6 + bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig config) { + if (TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT) return false; + final p = config.state; + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if (p.stateType != StateType.STAR_LOOP_ENTRY || + !(p as StarLoopEntryState) + .isPrecedenceDecision || // Are we the special loop entry/exit state? + config.context.isEmpty || // If SLL wildcard + config.context.hasEmptyPath()) { + return false; + } + + // Require all return states to return back to the same rule + // that p is in. + final numCtxs = config.context.length; + for (var i = 0; i < numCtxs; i++) { + // for each stack context + final returnState = atn.states[config.context.getReturnState(i)]; + if (returnState.ruleIndex != p.ruleIndex) return false; + } + + BlockStartState decisionStartState = p.transition(0).target; + final blockEndStateNum = decisionStartState.endState.stateNumber; + BlockEndState blockEndState = atn.states[blockEndStateNum]; + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + for (var i = 0; i < numCtxs; i++) { + // for each stack context + final returnStateNumber = config.context.getReturnState(i); + final returnState = atn.states[returnStateNumber]; + // all states must have single outgoing epsilon edge + if (returnState.numberOfTransitions != 1 || + !returnState.transition(0).isEpsilon) { + return false; + } + // Look for prefix op case like 'not expr', (' type ')' expr + final returnStateTarget = returnState.transition(0).target; + if (returnState.stateType == StateType.BLOCK_END && + returnStateTarget == p) { + continue; + } + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if (returnState == blockEndState) { + continue; + } + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if (returnStateTarget == blockEndState) { + continue; + } + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if (returnStateTarget.stateType == StateType.BLOCK_END && + returnStateTarget.numberOfTransitions == 1 && + returnStateTarget.transition(0).isEpsilon && + returnStateTarget.transition(0).target == p) { + continue; + } + + // anything else ain't conforming + return false; + } + + return true; + } + + String getRuleName(int index) { + if (parser != null && index >= 0) return parser.ruleNames[index]; + return ''; + } + + ATNConfig getEpsilonTarget( + ATNConfig config, + Transition t, + bool collectPredicates, + bool inContext, + bool fullCtx, + bool treatEofAsEpsilon) { + switch (t.type) { + case TransitionType.RULE: + return ruleTransition(config, t); + + case TransitionType.PRECEDENCE: + return precedenceTransition( + config, t, collectPredicates, inContext, fullCtx); + + case TransitionType.PREDICATE: + return predTransition(config, t, collectPredicates, inContext, fullCtx); + case TransitionType.ACTION: + return actionTransition(config, t); + + case TransitionType.EPSILON: + return ATNConfig.dup(config, state: t.target); + + case TransitionType.ATOM: + case TransitionType.RANGE: + case TransitionType.SET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if (treatEofAsEpsilon) { + if (t.matches(Token.EOF, 0, 1)) { + return ATNConfig.dup(config, state: t.target); + } + } + + return null; + + default: + return null; + } + } + + ATNConfig actionTransition(ATNConfig config, ActionTransition t) { + if (debug) log('ACTION edge ${t.ruleIndex}:${t.actionIndex}'); + return ATNConfig.dup(config, state: t.target); + } + + ATNConfig precedenceTransition( + ATNConfig config, + PrecedencePredicateTransition pt, + bool collectPredicates, + bool inContext, + bool fullCtx) { + if (debug) { + log('PRED (collectPredicates=$collectPredicates) ${pt.precedence}>=_p, ctx dependent=true'); + if (parser != null) { + log('context surrounding pred is ${parser.getRuleInvocationStack()}'); + } + } + + ATNConfig c; + if (collectPredicates && inContext) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + final currentPosition = input.index; + input.seek(startIndex); + final predSucceeds = evalSemanticContextOne( + pt.predicate, _outerContext, config.alt, fullCtx); + input.seek(currentPosition); + if (predSucceeds) { + c = ATNConfig.dup(config, state: pt.target); // no pred context + } + } else { + final newSemCtx = + SemanticContext.and(config.semanticContext, pt.predicate); + c = ATNConfig.dup(config, + state: pt.target, semanticContext: newSemCtx); + } + } else { + c = ATNConfig.dup(config, state: pt.target); + } + + if (debug) log('config from pred transition=$c'); + return c; + } + + ATNConfig predTransition(ATNConfig config, PredicateTransition pt, + bool collectPredicates, bool inContext, bool fullCtx) { + if (debug) { + log('PRED (collectPredicates=$collectPredicates) ' '${pt.ruleIndex}:${pt.predIndex}' ', ctx dependent=${pt.isCtxDependent}'); + if (parser != null) { + log('context surrounding pred is ${parser.getRuleInvocationStack()}'); + } + } + + ATNConfig c; + if (collectPredicates && + (!pt.isCtxDependent || (pt.isCtxDependent && inContext))) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + final currentPosition = input.index; + input.seek(startIndex); + final predSucceeds = evalSemanticContextOne( + pt.predicate, _outerContext, config.alt, fullCtx); + input.seek(currentPosition); + if (predSucceeds) { + c = ATNConfig.dup(config, state: pt.target); // no pred context + } + } else { + final newSemCtx = + SemanticContext.and(config.semanticContext, pt.predicate); + c = ATNConfig.dup(config, + state: pt.target, semanticContext: newSemCtx); + } + } else { + c = ATNConfig.dup(config, state: pt.target); + } + + if (debug) log('config from pred transition=$c'); + return c; + } + + ATNConfig ruleTransition(ATNConfig config, RuleTransition t) { + if (debug) { + log('CALL rule ' + + getRuleName(t.target.ruleIndex) + + ', ctx=${config.context}'); + } + + final returnState = t.followState; + PredictionContext newContext = SingletonPredictionContext.create( + config.context, returnState.stateNumber); + return ATNConfig.dup(config, state: t.target, context: newContext); + } + + /// Gets a [BitSet] containing the alternatives in [configs] + /// which are part of one or more conflicting alternative subsets. + /// + /// @param configs The [ATNConfigSet] to analyze. + /// @return The alternatives in [configs] which are part of one or more + /// conflicting alternative subsets. If [configs] does not contain any + /// conflicting subsets, this method returns an empty [BitSet]. + BitSet getConflictingAlts(ATNConfigSet configs) { + final altsets = + PredictionModeExtension.getConflictingAltSubsets(configs); + return PredictionModeExtension.getAlts(altsets); + } + + /// Sam pointed out a problem with the previous definition, v3, of + /// ambiguous states. If we have another state associated with conflicting + /// alternatives, we should keep going. For example, the following grammar + /// + /// s : (ID | ID ID?) ';' ; + /// + /// When the ATN simulation reaches the state before ';', it has a DFA + /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally + /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node + /// because alternative to has another way to continue, via [6|2|[]]. + /// The key is that we have a single state that has config's only associated + /// with a single alternative, 2, and crucially the state transitions + /// among the configurations are all non-epsilon transitions. That means + /// we don't consider any conflicts that include alternative 2. So, we + /// ignore the conflict between alts 1 and 2. We ignore a set of + /// conflicting alts when there is an intersection with an alternative + /// associated with a single alt state in the state→config-list map. + /// + /// It's also the case that we might have two conflicting configurations but + /// also a 3rd nonconflicting configuration for a different alternative: + /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: + /// + /// a : A | A | A B ; + /// + /// After matching input A, we reach the stop state for rule A, state 1. + /// State 8 is the state right before B. Clearly alternatives 1 and 2 + /// conflict and no amount of further lookahead will separate the two. + /// However, alternative 3 will be able to continue and so we do not + /// stop working on this state. In the previous example, we're concerned + /// with states associated with the conflicting alternatives. Here alt + /// 3 is not associated with the conflicting configs, but since we can continue + /// looking for input reasonably, I don't declare the state done. We + /// ignore a set of conflicting alts when we have an alternative + /// that we still need to pursue. + BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet configs) { + BitSet conflictingAlts; + if (configs.uniqueAlt != ATN.INVALID_ALT_NUMBER) { + conflictingAlts = BitSet(); + conflictingAlts.set(configs.uniqueAlt); + } else { + conflictingAlts = configs.conflictingAlts; + } + return conflictingAlts; + } + + String getTokenName(int t) { + if (t == Token.EOF) { + return 'EOF'; + } + + final vocabulary = parser != null + ? parser.vocabulary + : VocabularyImpl.EMPTY_VOCABULARY; + final displayName = vocabulary.getDisplayName(t); + if (displayName == t.toString()) { + return displayName; + } + + return displayName + '<$t>'; + } + + String getLookaheadName(TokenStream input) { + return getTokenName(input.LA(1)); + } + + /// Used for debugging in adaptivePredict around execATN but I cut + /// it out for clarity now that alg. works well. We can leave this + /// "dead" code for a bit. + void dumpDeadEndConfigs(NoViableAltException nvae) { + log('dead end configs: ', level: Level.SEVERE.value); + for (var c in nvae.deadEndConfigs) { + var trans = 'no edges'; + if (c.state.numberOfTransitions > 0) { + final t = c.state.transition(0); + if (t is AtomTransition) { + final at = t; + trans = 'Atom ' + getTokenName(at.atomLabel); + } else if (t is SetTransition) { + final st = t; + final not = st is NotSetTransition; + trans = (not ? '~' : '') + 'Set ' + st.label.toString(); + } + } + log(c.toString(parser, true) + ':' + trans, level: Level.SEVERE.value); + } + } + + NoViableAltException noViableAlt(TokenStream input, + ParserRuleContext outerContext, ATNConfigSet configs, int startIndex) { + return NoViableAltException(parser, input, input.get(startIndex), + input.LT(1), configs, outerContext); + } + + static int getUniqueAlt(ATNConfigSet configs) { + var alt = ATN.INVALID_ALT_NUMBER; + for (var c in configs) { + if (alt == ATN.INVALID_ALT_NUMBER) { + alt = c.alt; // found first alt + } else if (c.alt != alt) { + return ATN.INVALID_ALT_NUMBER; + } + } + return alt; + } + + /// Add an edge to the DFA, if possible. This method calls + /// {@link #addDFAState} to ensure the [to] state is present in the + /// DFA. If [from] is null, or if [t] is outside the + /// range of edges that can be represented in the DFA tables, this method + /// returns without adding the edge to the DFA. + /// + ///

If [to] is null, this method returns null. + /// Otherwise, this method returns the [DFAState] returned by calling + /// {@link #addDFAState} for the [to] state.

+ /// + /// @param dfa The DFA + /// @param from The source state for the edge + /// @param t The input symbol + /// @param to The target state for the edge + /// + /// @return If [to] is null, this method returns null; + /// otherwise this method returns the result of calling {@link #addDFAState} + /// on [to] + DFAState addDFAEdge(DFA dfa, DFAState from, int t, DFAState to) { + if (debug) { + log('EDGE $from -> $to upon ' + getTokenName(t)); + } + + if (to == null) { + return null; + } + + to = addDFAState(dfa, to); // used existing if possible not incoming + if (from == null || t < -1 || t > atn.maxTokenType) { + return to; + } + + from.edges ??= List(atn.maxTokenType + 1 + 1); + + from.edges[t + 1] = to; // connect + + if (debug) { + log('DFA=\n' + + dfa.toString(parser != null + ? parser.vocabulary + : VocabularyImpl.EMPTY_VOCABULARY)); + } + + return to; + } + + /// Add state [D] to the DFA if it is not already present, and return + /// the actual instance stored in the DFA. If a state equivalent to [D] + /// is already in the DFA, the existing state is returned. Otherwise this + /// method returns [D] after adding it to the DFA. + /// + ///

If [D] is {@link #ERROR}, this method returns {@link #ERROR} and + /// does not change the DFA.

+ /// + /// @param dfa The dfa + /// @param D The DFA state to add + /// @return The state stored in the DFA. This will be either the existing + /// state if [D] is already in the DFA, or [D] itself if the + /// state was not already present. + DFAState addDFAState(DFA dfa, DFAState D) { + if (D == ATNSimulator.ERROR) { + return D; + } + + final existing = dfa.states[D]; + if (existing != null) return existing; + + D.stateNumber = dfa.states.length; + if (!D.configs.readOnly) { + D.configs.optimizeConfigs(this); + D.configs.readOnly = true; + } + dfa.states[D] = D; + if (debug) log('adding new DFA state: $D'); + return D; + } + + void reportAttemptingFullContext(DFA dfa, BitSet conflictingAlts, + ATNConfigSet configs, int startIndex, int stopIndex) { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log('reportAttemptingFullContext decision=${dfa.decision}:$configs' ', input=' + + parser.tokenStream.getText(interval)); + } + if (parser != null) { + parser.errorListenerDispatch.reportAttemptingFullContext( + parser, dfa, startIndex, stopIndex, conflictingAlts, configs); + } + } + + void reportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, + int startIndex, int stopIndex) { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log('reportContextSensitivity decision=${dfa.decision}:$configs' ', input=' + + parser.tokenStream.getText(interval)); + } + if (parser != null) { + parser.errorListenerDispatch.reportContextSensitivity( + parser, dfa, startIndex, stopIndex, prediction, configs); + } + } + + /// If context sensitive parsing, we know it's ambiguity not conflict */ + void reportAmbiguity( + DFA dfa, + DFAState D, // the DFA state from execATN() that had SLL conflicts + int startIndex, + int stopIndex, + bool exact, + BitSet ambigAlts, + ATNConfigSet configs) // configs that LL not SLL considered conflicting + { + if (debug || retry_debug) { + final interval = Interval.of(startIndex, stopIndex); + log('reportAmbiguity $ambigAlts:$configs' ', input=' + + parser.tokenStream.getText(interval)); + } + if (parser != null) { + parser.errorListenerDispatch.reportAmbiguity( + parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs); + } + } +} + +/// This enumeration defines the prediction modes available in ANTLR 4 along with +/// utility methods for analyzing configuration sets for conflicts and/or +/// ambiguities. +enum PredictionMode { + /// The SLL(*) prediction mode. This prediction mode ignores the current + /// parser context when making predictions. This is the fastest prediction + /// mode, and provides correct results for many grammars. This prediction + /// mode is more powerful than the prediction mode provided by ANTLR 3, but + /// may result in syntax errors for grammar and input combinations which are + /// not SLL. + /// + ///

+ /// When using this prediction mode, the parser will either return a correct + /// parse tree (i.e. the same parse tree that would be returned with the + /// {@link #LL} prediction mode), or it will report a syntax error. If a + /// syntax error is encountered when using the {@link #SLL} prediction mode, + /// it may be due to either an actual syntax error in the input or indicate + /// that the particular combination of grammar and input requires the more + /// powerful {@link #LL} prediction abilities to complete successfully.

+ /// + ///

+ /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

+ SLL, + /// The LL(*) prediction mode. This prediction mode allows the current parser + /// context to be used for resolving SLL conflicts that occur during + /// prediction. This is the fastest prediction mode that guarantees correct + /// parse results for all combinations of grammars with syntactically correct + /// inputs. + /// + ///

+ /// When using this prediction mode, the parser will make correct decisions + /// for all syntactically-correct grammar and input combinations. However, in + /// cases where the grammar is truly ambiguous this prediction mode might not + /// report a precise answer for exactly which alternatives are + /// ambiguous.

+ /// + ///

+ /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

+ LL, + /// The LL(*) prediction mode with exact ambiguity detection. In addition to + /// the correctness guarantees provided by the {@link #LL} prediction mode, + /// this prediction mode instructs the prediction algorithm to determine the + /// complete and exact set of ambiguous alternatives for every ambiguous + /// decision encountered while parsing. + /// + ///

+ /// This prediction mode may be used for diagnosing ambiguities during + /// grammar development. Due to the performance overhead of calculating sets + /// of ambiguous alternatives, this prediction mode should be avoided when + /// the exact results are not necessary.

+ /// + ///

+ /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs.

+ LL_EXACT_AMBIG_DETECTION, +} + +extension PredictionModeExtension on PredictionMode { + /// Computes the SLL prediction termination condition. + /// + ///

+ /// This method computes the SLL prediction termination condition for both of + /// the following cases.

+ /// + ///
    + ///
  • The usual SLL+LL fallback upon SLL conflict
  • + ///
  • Pure SLL without LL fallback
  • + ///
+ /// + ///

COMBINED SLL+LL PARSING

+ /// + ///

When LL-fallback is enabled upon SLL conflict, correct predictions are + /// ensured regardless of how the termination condition is computed by this + /// method. Due to the substantially higher cost of LL prediction, the + /// prediction should only fall back to LL when the additional lookahead + /// cannot lead to a unique SLL prediction.

+ /// + ///

Assuming combined SLL+LL parsing, an SLL configuration set with only + /// conflicting subsets should fall back to full LL, even if the + /// configuration sets don't resolve to the same alternative (e.g. + /// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + /// configuration, SLL could continue with the hopes that more lookahead will + /// resolve via one of those non-conflicting configurations.

+ /// + ///

Here's the prediction termination rule them: SLL (for SLL+LL parsing) + /// stops when it sees only conflicting configuration subsets. In contrast, + /// full LL keeps going when there is uncertainty.

+ /// + ///

HEURISTIC

+ /// + ///

As a heuristic, we stop prediction when we see any conflicting subset + /// unless we see a state that only has one alternative associated with it. + /// The single-alt-state thing lets prediction continue upon rules like + /// (otherwise, it would admit defeat too soon):

+ /// + ///

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

+ /// + ///

When the ATN simulation reaches the state before {@code ';'}, it has a + /// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + /// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + /// processing this node because alternative to has another way to continue, + /// via {@code [6|2|[]]}.

+ /// + ///

It also let's us continue for this rule:

+ /// + ///

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

+ /// + ///

After matching input A, we reach the stop state for rule A, state 1. + /// State 8 is the state right before B. Clearly alternatives 1 and 2 + /// conflict and no amount of further lookahead will separate the two. + /// However, alternative 3 will be able to continue and so we do not stop + /// working on this state. In the previous example, we're concerned with + /// states associated with the conflicting alternatives. Here alt 3 is not + /// associated with the conflicting configs, but since we can continue + /// looking for input reasonably, don't declare the state done.

+ /// + ///

PURE SLL PARSING

+ /// + ///

To handle pure SLL parsing, all we have to do is make sure that we + /// combine stack contexts for configurations that differ only by semantic + /// predicate. From there, we can do the usual SLL termination heuristic.

+ /// + ///

PREDICATES IN SLL+LL PARSING

+ /// + ///

SLL decisions don't evaluate predicates until after they reach DFA stop + /// states because they need to create the DFA cache that works in all + /// semantic situations. In contrast, full LL evaluates predicates collected + /// during start state computation so it can ignore predicates thereafter. + /// This means that SLL termination detection can totally ignore semantic + /// predicates.

+ /// + ///

Implementation-wise, [ATNConfigSet] combines stack contexts but not + /// semantic predicate contexts so we might see two configurations like the + /// following.

+ /// + ///

{@code (s, 1, x, {}), (s, 1, x', {p})}

+ /// + ///

Before testing these configurations against others, we have to merge + /// [x] and {@code x'} (without modifying the existing configurations). + /// For example, we test {@code (x+x')==x''} when looking for conflicts in + /// the following configurations.

+ /// + ///

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

+ /// + ///

If the configuration set has predicates (as indicated by + /// {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of + /// the configurations to strip out all of the predicates so that a standard + /// [ATNConfigSet] will merge everything ignoring predicates.

+ static bool hasSLLConflictTerminatingPrediction( + PredictionMode mode, ATNConfigSet configs) { +/* Configs in rule stop states indicate reaching the end of the decision + * rule (local context) or end of start rule (full context). If all + * configs meet this condition, then none of the configurations is able + * to match additional input so we terminate prediction. + */ + if (allConfigsInRuleStopStates(configs)) { + return true; + } + +// pure SLL mode parsing + if (mode == PredictionMode.SLL) { +// Don't bother with combining configs from different semantic +// contexts if we can fail over to full LL; costs more time +// since we'll often fail over anyway. + if (configs.hasSemanticContext) { +// dup configs, tossing out semantic predicates + final dup = ATNConfigSet(); + for (var c in configs) { + c = ATNConfig.dup(c, semanticContext: SemanticContext.NONE); + dup.add(c); + } + configs = dup; + } +// now we have combined contexts for configs with dissimilar preds + } + +// pure SLL or combined SLL+LL mode parsing + + final altsets = getConflictingAltSubsets(configs); + final heuristic = + hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs); + return heuristic; + } + + /// Checks if any configuration in [configs] is in a + /// [RuleStopState]. Configurations meeting this condition have reached + /// the end of the decision rule (local context) or end of start rule (full + /// context). + /// + /// @param configs the configuration set to test + /// @return [true] if any configuration in [configs] is in a + /// [RuleStopState], otherwise [false] + static bool hasConfigInRuleStopState(ATNConfigSet configs) { + for (var c in configs) { + if (c.state is RuleStopState) { + return true; + } + } + + return false; + } + + /// Checks if all configurations in [configs] are in a + /// [RuleStopState]. Configurations meeting this condition have reached + /// the end of the decision rule (local context) or end of start rule (full + /// context). + /// + /// @param configs the configuration set to test + /// @return [true] if all configurations in [configs] are in a + /// [RuleStopState], otherwise [false] + static bool allConfigsInRuleStopStates(ATNConfigSet configs) { + for (var config in configs) { + if (!(config.state is RuleStopState)) { + return false; + } + } + + return true; + } + + /// Full LL prediction termination. + /// + ///

Can we stop looking ahead during ATN simulation or is there some + /// uncertainty as to which alternative we will ultimately pick, after + /// consuming more input? Even if there are partial conflicts, we might know + /// that everything is going to resolve to the same minimum alternative. That + /// means we can stop since no more lookahead will change that fact. On the + /// other hand, there might be multiple conflicts that resolve to different + /// minimums. That means we need more look ahead to decide which of those + /// alternatives we should predict.

+ /// + ///

The basic idea is to split the set of configurations [C], into + /// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + /// non-conflicting configurations. Two configurations conflict if they have + /// identical {@link ATNConfig#state} and {@link ATNConfig#context} values + /// but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)} + /// and {@code (s, j, ctx, _)} for {@code i!=j}.

+ /// + ///

Reduce these configuration subsets to the set of possible alternatives. + /// You can compute the alternative subsets in one pass as follows:

+ /// + ///

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + /// [C] holding [s] and [ctx] fixed.

+ /// + ///

Or in pseudo-code, for each configuration [c] in [C]:

+ /// + ///
+  /// map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+  /// alt and not pred
+  /// 
+ /// + ///

The values in [map] are the set of {@code A_s,ctx} sets.

+ /// + ///

If {@code |A_s,ctx|=1} then there is no conflict associated with + /// [s] and [ctx].

+ /// + ///

Reduce the subsets to singletons by choosing a minimum of each subset. If + /// the union of these alternative subsets is a singleton, then no amount of + /// more lookahead will help us. We will always pick that alternative. If, + /// however, there is more than one alternative, then we are uncertain which + /// alternative to predict and must continue looking for resolution. We may + /// or may not discover an ambiguity in the future, even if there are no + /// conflicting subsets this round.

+ /// + ///

The biggest sin is to terminate early because it means we've made a + /// decision but were uncertain as to the eventual outcome. We haven't used + /// enough lookahead. On the other hand, announcing a conflict too late is no + /// big deal; you will still have the conflict. It's just inefficient. It + /// might even look until the end of file.

+ /// + ///

No special consideration for semantic predicates is required because + /// predicates are evaluated on-the-fly for full LL prediction, ensuring that + /// no configuration contains a semantic context during the termination + /// check.

+ /// + ///

CONFLICTING CONFIGS

+ /// + ///

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + /// when {@code i!=j} but {@code x=x'}. Because we merge all + /// {@code (s, i, _)} configurations together, that means that there are at + /// most [n] configurations associated with state [s] for + /// [n] possible alternatives in the decision. The merged stacks + /// complicate the comparison of configuration contexts [x] and + /// {@code x'}. Sam checks to see if one is a subset of the other by calling + /// merge and checking to see if the merged result is either [x] or + /// {@code x'}. If the [x] associated with lowest alternative [i] + /// is the superset, then [i] is the only possible prediction since the + /// others resolve to {@code min(i)} as well. However, if [x] is + /// associated with {@code j>i} then at least one stack configuration for + /// [j] is not in conflict with alternative [i]. The algorithm + /// should keep going, looking for more lookahead due to the uncertainty.

+ /// + ///

For simplicity, I'm doing a equality check between [x] and + /// {@code x'} that lets the algorithm continue to consume lookahead longer + /// than necessary. The reason I like the equality is of course the + /// simplicity but also because that is the test you need to detect the + /// alternatives that are actually in conflict.

+ /// + ///

CONTINUE/STOP RULE

+ /// + ///

Continue if union of resolved alternative sets from non-conflicting and + /// conflicting alternative subsets has more than one alternative. We are + /// uncertain about which alternative to predict.

+ /// + ///

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + /// alternatives are still in the running for the amount of input we've + /// consumed at this point. The conflicting sets let us to strip away + /// configurations that won't lead to more states because we resolve + /// conflicts to the configuration with a minimum alternate for the + /// conflicting set.

+ /// + ///

CASES

+ /// + ///
    + /// + ///
  • no conflicts and more than 1 alternative in set => continue
  • + /// + ///
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, + /// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set + /// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + /// {@code {1,3}} => continue + ///
  • + /// + ///
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + /// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set + /// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + /// {@code {1}} => stop and predict 1
  • + /// + ///
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + /// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {1}} = {@code {1}} => stop and predict 1, can announce + /// ambiguity {@code {1,2}}
  • + /// + ///
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, + /// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {2}} = {@code {1,2}} => continue
  • + /// + ///
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, + /// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U + /// {@code {3}} = {@code {1,3}} => continue
  • + /// + ///
+ /// + ///

EXACT AMBIGUITY DETECTION

+ /// + ///

If all states report the same conflicting set of alternatives, then we + /// know we have the exact ambiguity set.

+ /// + ///

|A_i|>1 and + /// A_i = A_j for all i, j.

+ /// + ///

In other words, we continue examining lookahead until all {@code A_i} + /// have more than one alternative and all {@code A_i} are the same. If + /// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + /// because the resolved set is {@code {1}}. To determine what the real + /// ambiguity is, we have to know whether the ambiguity is between one and + /// two or one and three so we keep going. We can only stop prediction when + /// we need exact ambiguity detection when the sets look like + /// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

+ static int resolvesToJustOneViableAlt(List altsets) { + return getSingleViableAlt(altsets); + } + + /// Determines if every alternative subset in [altsets] contains more + /// than one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if every [BitSet] in [altsets] has + /// {@link BitSet#cardinality cardinality} > 1, otherwise [false] + static bool allSubsetsConflict(List altsets) { + return !hasNonConflictingAltSet(altsets); + } + + /// Determines if any single alternative subset in [altsets] contains + /// exactly one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if [altsets] contains a [BitSet] with + /// {@link BitSet#cardinality cardinality} 1, otherwise [false] + static bool hasNonConflictingAltSet(List altsets) { + for (var alts in altsets) { + if (alts.cardinality == 1) { + return true; + } + } + return false; + } + + /// Determines if any single alternative subset in [altsets] contains + /// more than one alternative. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if [altsets] contains a [BitSet] with + /// {@link BitSet#cardinality cardinality} > 1, otherwise [false] + static bool hasConflictingAltSet(List altsets) { + for (var alts in altsets) { + if (alts.cardinality > 1) { + return true; + } + } + return false; + } + + /// Determines if every alternative subset in [altsets] is equivalent. + /// + /// @param altsets a collection of alternative subsets + /// @return [true] if every member of [altsets] is equal to the + /// others, otherwise [false] + static bool allSubsetsEqual(List altsets) { + final first = altsets.first; + return altsets.every((e) => e == first); + } + + /// Returns the unique alternative predicted by all alternative subsets in + /// [altsets]. If no such alternative exists, this method returns + /// {@link ATN#INVALID_ALT_NUMBER}. + /// + /// @param altsets a collection of alternative subsets + static int getUniqueAlt(List altsets) { + final all = getAlts(altsets); + if (all.cardinality == 1) return all.nextset(0); + return ATN.INVALID_ALT_NUMBER; + } + + /// Gets the complete set of represented alternatives for a collection of + /// alternative subsets. This method returns the union of each [BitSet] + /// in [altsets]. + /// + /// @param altsets a collection of alternative subsets + /// @return the set of represented alternatives in [altsets] + static BitSet getAlts(List altsets) { + final all = BitSet(); + for (var alts in altsets) { + all.or(alts); + } + return all; + } + + /// Get union of all alts from configs. + /// + /// @since 4.5.1 + static BitSet getAltsFromConfigs(ATNConfigSet configs) { + final alts = BitSet(); + for (var config in configs) { + alts.set(config.alt); + } + return alts; + } + + /// This function gets the conflicting alt subsets from a configuration set. + /// For each configuration [c] in [configs]: + /// + ///
+  /// map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+  /// alt and not pred
+  /// 
+ static List getConflictingAltSubsets(ATNConfigSet configs) { + final configToAlts = + HashMap(equals: (ATNConfig a, ATNConfig b) { + if (identical(a, b)) return true; + if (a == null || b == null) return false; + return a.state.stateNumber == b.state.stateNumber && + a.context == b.context; + }, hashCode: (ATNConfig o) { + /** + * The hash code is only a function of the {@link ATNState#stateNumber} + * and {@link ATNConfig#context}. + */ + var hashCode = MurmurHash.initialize(7); + hashCode = MurmurHash.update(hashCode, o.state.stateNumber); + hashCode = MurmurHash.update(hashCode, o.context); + hashCode = MurmurHash.finish(hashCode, 2); + return hashCode; + }); + for (var c in configs) { + var alts = configToAlts[c]; + if (alts == null) { + alts = BitSet(); + configToAlts[c] = alts; + } + alts.set(c.alt); + } + return configToAlts.values.toList(); + } + + /// Get a map from state to alt subset from a configuration set. For each + /// configuration [c] in [configs]: + /// + ///
+  /// map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
+  /// 
+ static Map getStateToAltMap(ATNConfigSet configs) { + final m = {}; + for (var c in configs) { + var alts = m[c.state]; + if (alts == null) { + alts = BitSet(); + m[c.state] = alts; + } + alts.set(c.alt); + } + return m; + } + + static bool hasStateAssociatedWithOneAlt(ATNConfigSet configs) { + final x = getStateToAltMap(configs); + for (var alts in x.values) { + if (alts.cardinality == 1) return true; + } + return false; + } + + static int getSingleViableAlt(List altsets) { + final viableAlts = BitSet(); + for (var alts in altsets) { + final minAlt = alts.nextset(0); + viableAlts.set(minAlt); + if (viableAlts.cardinality > 1) { + // more than 1 viable alt + return ATN.INVALID_ALT_NUMBER; + } + } + return viableAlts.nextset(0); + } +} diff --git a/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart new file mode 100644 index 000000000..a796acb61 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/profiling_atn_simulator.dart @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import '../../dfa/dfa.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../token_stream.dart'; +import '../../util/bit_set.dart'; +import 'atn_config_set.dart'; +import 'atn_simulator.dart'; +import 'info.dart'; +import 'parser_atn_simulator.dart'; +import 'semantic_context.dart'; + +class ProfilingATNSimulator extends ParserATNSimulator { + List decisions; + int numDecisions; + + int _sllStopIndex; + int _llStopIndex; + + int currentDecision; + DFAState currentState; + + /// At the point of LL failover, we record how SLL would resolve the conflict so that + /// we can determine whether or not a decision / input pair is context-sensitive. + /// If LL gives a different result than SLL's predicted alternative, we have a + /// context sensitivity for sure. The converse is not necessarily true, however. + /// It's possible that after conflict resolution chooses minimum alternatives, + /// SLL could get the same answer as LL. Regardless of whether or not the result indicates + /// an ambiguity, it is not treated as a context sensitivity because LL prediction + /// was not required in order to produce a correct prediction for this decision and input sequence. + /// It may in fact still be a context sensitivity but we don't know by looking at the + /// minimum alternatives for the current input. + int conflictingAltResolvedBySLL; + + ProfilingATNSimulator(Parser parser) + : super(parser, parser.interpreter.atn, parser.interpreter.decisionToDFA, + parser.interpreter.sharedContextCache) { + numDecisions = atn.decisionToState.length; + decisions = List(numDecisions); + for (var i = 0; i < numDecisions; i++) { + decisions[i] = DecisionInfo(i); + } + } + + @override + int adaptivePredict( + TokenStream input, int decision, ParserRuleContext outerContext) { + try { + _sllStopIndex = -1; + _llStopIndex = -1; + currentDecision = decision; + + final start = + DateTime.now(); // TODO get nano seconds expensive but useful info + final alt = super.adaptivePredict(input, decision, outerContext); + final stop = DateTime.now(); + decisions[decision].timeInPrediction += + (stop.difference(start)).inMicroseconds; + decisions[decision].invocations++; + + final SLL_k = _sllStopIndex - startIndex + 1; + decisions[decision].SLL_TotalLook += SLL_k; + decisions[decision].SLL_MinLook = decisions[decision].SLL_MinLook == 0 + ? SLL_k + : min(decisions[decision].SLL_MinLook, SLL_k); + if (SLL_k > decisions[decision].SLL_MaxLook) { + decisions[decision].SLL_MaxLook = SLL_k; + decisions[decision].SLL_MaxLookEvent = LookaheadEventInfo( + decision, null, alt, input, startIndex, _sllStopIndex, false); + } + + if (_llStopIndex >= 0) { + final LL_k = _llStopIndex - startIndex + 1; + decisions[decision].LL_TotalLook += LL_k; + decisions[decision].LL_MinLook = decisions[decision].LL_MinLook == 0 + ? LL_k + : min(decisions[decision].LL_MinLook, LL_k); + if (LL_k > decisions[decision].LL_MaxLook) { + decisions[decision].LL_MaxLook = LL_k; + decisions[decision].LL_MaxLookEvent = LookaheadEventInfo( + decision, null, alt, input, startIndex, _llStopIndex, true); + } + } + + return alt; + } finally { + currentDecision = -1; + } + } + + @override + DFAState getExistingTargetState(DFAState previousD, int t) { + // this method is called after each time the input position advances + // during SLL prediction + _sllStopIndex = input.index; + + final existingTargetState = super.getExistingTargetState(previousD, t); + if (existingTargetState != null) { + decisions[currentDecision] + .SLL_DFATransitions++; // count only if we transition over a DFA state + if (existingTargetState == ATNSimulator.ERROR) { + decisions[currentDecision].errors.add(ErrorInfo(currentDecision, + previousD.configs, input, startIndex, _sllStopIndex, false)); + } + } + + currentState = existingTargetState; + return existingTargetState; + } + + @override + DFAState computeTargetState(DFA dfa, DFAState previousD, int t) { + final state = super.computeTargetState(dfa, previousD, t); + currentState = state; + return state; + } + + @override + ATNConfigSet computeReachSet(ATNConfigSet closure, int t, bool fullCtx) { + if (fullCtx) { + // this method is called after each time the input position advances + // during full context prediction + _llStopIndex = input.index; + } + + final reachConfigs = super.computeReachSet(closure, t, fullCtx); + if (fullCtx) { + decisions[currentDecision] + .LL_ATNTransitions++; // count computation even if error + if (reachConfigs != null) { + } else { + // no reach on current lookahead symbol. ERROR. + // TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule() + decisions[currentDecision].errors.add(ErrorInfo( + currentDecision, closure, input, startIndex, _llStopIndex, true)); + } + } else { + decisions[currentDecision].SLL_ATNTransitions++; + if (reachConfigs != null) { + } else { + // no reach on current lookahead symbol. ERROR. + decisions[currentDecision].errors.add(ErrorInfo( + currentDecision, closure, input, startIndex, _sllStopIndex, false)); + } + } + return reachConfigs; + } + + @override + bool evalSemanticContextOne(SemanticContext pred, + ParserRuleContext parserCallStack, int alt, bool fullCtx) { + final result = + super.evalSemanticContextOne(pred, parserCallStack, alt, fullCtx); + if (!(pred is PrecedencePredicate)) { + final fullContext = _llStopIndex >= 0; + final stopIndex = fullContext ? _llStopIndex : _sllStopIndex; + decisions[currentDecision].predicateEvals.add(PredicateEvalInfo( + currentDecision, + input, + startIndex, + stopIndex, + pred, + result, + alt, + fullCtx)); + } + + return result; + } + + @override + void reportAttemptingFullContext(DFA dfa, BitSet conflictingAlts, + ATNConfigSet configs, int startIndex, int stopIndex) { + if (conflictingAlts != null) { + conflictingAltResolvedBySLL = conflictingAlts.nextset(0); + } else { + conflictingAltResolvedBySLL = configs.alts.nextset(0); + } + decisions[currentDecision].LL_Fallback++; + super.reportAttemptingFullContext( + dfa, conflictingAlts, configs, startIndex, stopIndex); + } + + @override + void reportContextSensitivity(DFA dfa, int prediction, ATNConfigSet configs, + int startIndex, int stopIndex) { + if (prediction != conflictingAltResolvedBySLL) { + decisions[currentDecision].contextSensitivities.add( + ContextSensitivityInfo( + currentDecision, configs, input, startIndex, stopIndex)); + } + super.reportContextSensitivity( + dfa, prediction, configs, startIndex, stopIndex); + } + + @override + void reportAmbiguity(DFA dfa, DFAState D, int startIndex, int stopIndex, + bool exact, BitSet ambigAlts, ATNConfigSet configs) { + final prediction = + ambigAlts != null ? ambigAlts.nextset(0) : configs.alts.nextset(0); + if (configs.fullCtx && prediction != conflictingAltResolvedBySLL) { + // Even though this is an ambiguity we are reporting, we can + // still detect some context sensitivities. Both SLL and LL + // are showing a conflict, hence an ambiguity, but if they resolve + // to different minimum alternatives we have also identified a + // context sensitivity. + decisions[currentDecision].contextSensitivities.add( + ContextSensitivityInfo( + currentDecision, configs, input, startIndex, stopIndex)); + } + decisions[currentDecision].ambiguities.add(AmbiguityInfo(currentDecision, + configs, ambigAlts, input, startIndex, stopIndex, configs.fullCtx)); + super.reportAmbiguity( + dfa, D, startIndex, stopIndex, exact, ambigAlts, configs); + } + + // --------------------------------------------------------------------- + + List get decisionInfo { + return decisions; + } +} diff --git a/runtime/Dart/lib/src/atn/src/semantic_context.dart b/runtime/Dart/lib/src/atn/src/semantic_context.dart new file mode 100644 index 000000000..8ea747964 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/semantic_context.dart @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import '../../recognizer.dart'; +import '../../rule_context.dart'; +import '../../util/murmur_hash.dart'; + +/// A tree structure used to record the semantic context in which +/// an ATN configuration is valid. It's either a single predicate, +/// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +/// +///

I have scoped the [AND], [OR], and [Predicate] subclasses of +/// [SemanticContext] within the scope of this outer class.

+abstract class SemanticContext { + /// The default [SemanticContext], which is semantically equivalent to + /// a predicate of the form {@code {true}?}. + static const SemanticContext NONE = Predicate(); + + const SemanticContext(); + + /// For context independent predicates, we evaluate them without a local + /// context (i.e., null context). That way, we can evaluate them without + /// having to create proper rule-specific context during prediction (as + /// opposed to the parser, which creates them naturally). In a practical + /// sense, this avoids a cast exception from RuleContext to myruleContext. + /// + ///

For context dependent predicates, we must pass in a local context so that + /// references such as $arg evaluate properly as _localctx.arg. We only + /// capture context dependent predicates in the context in which we begin + /// prediction, so we passed in the outer context here in case of context + /// dependent predicate evaluation.

+ bool eval(Recognizer parser, RuleContext parserCallStack); + + /// Evaluate the precedence predicates for the context and reduce the result. + /// + /// @param parser The parser instance. + /// @param parserCallStack + /// @return The simplified semantic context after precedence predicates are + /// evaluated, which will be one of the following values. + ///
    + ///
  • {@link #NONE}in if the predicate simplifies to [true] after + /// precedence predicates are evaluated.
  • + ///
  • nullin if the predicate simplifies to [false] after + /// precedence predicates are evaluated.
  • + ///
  • [this]in if the semantic context is not changed as a result of + /// precedence predicate evaluation.
  • + ///
  • A non-null [SemanticContext]in the new simplified + /// semantic context after precedence predicates are evaluated.
  • + ///
+ SemanticContext evalPrecedence(Recognizer parser, + RuleContext parserCallStack) { + return this; + } + + static SemanticContext and(SemanticContext a, SemanticContext b) { + if (a == null || a == NONE) return b; + if (b == null || b == NONE) return a; + final result = AND(a, b); + if (result.opnds.length == 1) { + return result.opnds[0]; + } + + return result; + } + + /// + /// @see ParserATNSimulator#getPredsForAmbigAlts + static SemanticContext or(SemanticContext a, SemanticContext b) { + if (a == null) return b; + if (b == null) return a; + if (a == NONE || b == NONE) return NONE; + final result = OR(a, b); + if (result.opnds.length == 1) { + return result.opnds[0]; + } + + return result; + } + + static Iterable filterPrecedencePredicates( + Iterable collection) { + return collection.whereType(); + } + + static Iterable filterNonPrecedencePredicates( + Iterable collection) { + return collection.where((e) => !(e is PrecedencePredicate)); + } +} + +class Predicate extends SemanticContext { + final int ruleIndex; + final int predIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + const Predicate( + [this.ruleIndex = -1, this.predIndex = -1, this.isCtxDependent = false]); + + @override + bool eval(Recognizer parser, RuleContext parserCallStack) { + final localctx = isCtxDependent ? parserCallStack : null; + return parser.sempred(localctx, ruleIndex, predIndex); + } + + @override + int get hashCode { + var hashCode = MurmurHash.initialize(); + hashCode = MurmurHash.update(hashCode, ruleIndex); + hashCode = MurmurHash.update(hashCode, predIndex); + hashCode = MurmurHash.update(hashCode, isCtxDependent ? 1 : 0); + hashCode = MurmurHash.finish(hashCode, 3); + return hashCode; + } + + @override + bool operator ==(Object obj) { + return obj is Predicate && + ruleIndex == obj.ruleIndex && + predIndex == obj.predIndex && + isCtxDependent == obj.isCtxDependent; + } + + @override + String toString() { + return '{$ruleIndex:$predIndex}?'; + } +} + +class PrecedencePredicate extends SemanticContext + implements Comparable { + final int precedence; + + PrecedencePredicate([this.precedence = 0]); + + @override + bool eval(Recognizer parser, RuleContext parserCallStack) { + return parser.precpred(parserCallStack, precedence); + } + + @override + SemanticContext evalPrecedence(Recognizer parser, + RuleContext parserCallStack) { + if (parser.precpred(parserCallStack, precedence)) { + return SemanticContext.NONE; + } else { + return null; + } + } + + @override + int compareTo(PrecedencePredicate o) { + return precedence - o.precedence; + } + + @override + int get hashCode { + var hashCode = 1; + hashCode = 31 * hashCode + precedence; + return hashCode; + } + + @override + bool operator ==(Object obj) { + if (!(obj is PrecedencePredicate)) { + return false; + } + PrecedencePredicate other = obj; + return precedence == other.precedence; + } + +// precedence >= _precedenceStack.peek() + @override + String toString() { + return '{$precedence>=prec}?'; + } +} + +/// This is the base class for semantic context "operators", which operate on +/// a collection of semantic context "operands". +/// +/// @since 4.3 +abstract class Operator extends SemanticContext { + /// Gets the operands for the semantic context operator. + /// + /// @return a collection of [SemanticContext] operands for the + /// operator. + /// + /// @since 4.3 + List get operands; +} + +/// A semantic context which is true whenever none of the contained contexts +/// is false. + +class AND extends Operator { + List opnds; + + AND(SemanticContext a, SemanticContext b) { + var operands = {}; + if (a is AND) { + operands.addAll(a.opnds); + } else { + operands.add(a); + } + if (b is AND) { + operands.addAll(b.opnds); + } else { + operands.add(b); + } + + final precedencePredicates = + SemanticContext.filterPrecedencePredicates(operands); + + operands = SemanticContext.filterNonPrecedencePredicates(operands).toSet(); + if (precedencePredicates.isNotEmpty) { + // interested in the transition with the lowest precedence + final reduced = + precedencePredicates.reduce((a, b) => a.compareTo(b) <= 0 ? a : b); + operands.add(reduced); + } + + opnds = operands.toList(); + } + + @override + List get operands { + return opnds; + } + + @override + bool operator ==(Object obj) { + if (!(obj is AND)) return false; + AND other = obj; + return ListEquality().equals(opnds, other.opnds); + } + + @override + int get hashCode { + return MurmurHash.getHashCode(opnds, runtimeType.hashCode); + } + + /// {@inheritDoc} + /// + ///

+ /// The evaluation of predicates by this context is short-circuiting, but + /// unordered.

+ + @override + bool eval(Recognizer parser, RuleContext parserCallStack) { + for (var opnd in opnds) { + if (!opnd.eval(parser, parserCallStack)) return false; + } + return true; + } + + @override + SemanticContext evalPrecedence(Recognizer parser, + RuleContext parserCallStack) { + var differs = false; + final operands = []; + for (var context in opnds) { + final evaluated = + context.evalPrecedence(parser, parserCallStack); + differs |= (evaluated != context); + if (evaluated == null) { + // The AND context is false if any element is false + return null; + } else if (evaluated != SemanticContext.NONE) { + // Reduce the result by skipping true elements + operands.add(evaluated); + } + } + + if (!differs) { + return this; + } + + if (operands.isEmpty) { + // all elements were true, so the AND context is true + return SemanticContext.NONE; + } + + var result = operands[0]; + for (var i = 1; i < operands.length; i++) { + result = SemanticContext.and(result, operands[i]); + } + + return result; + } + + @override + String toString() { + return opnds.join('&&'); + } +} + +/// A semantic context which is true whenever at least one of the contained +/// contexts is true. +class OR extends Operator { + List opnds; + + OR(SemanticContext a, SemanticContext b) { + var operands = {}; + if (a is OR) { + operands.addAll(a.opnds); + } else { + operands.add(a); + } + if (b is OR) { + operands.addAll(b.opnds); + } else { + operands.add(b); + } + + final precedencePredicates = + SemanticContext.filterPrecedencePredicates(operands); + + operands = SemanticContext.filterNonPrecedencePredicates(operands).toSet(); + if (precedencePredicates.isNotEmpty) { + // interested in the transition with the highest precedence + final reduced = + precedencePredicates.reduce((a, b) => a.compareTo(b) >= 0 ? a : b); + operands.add(reduced); + } + + opnds = operands.toList(); + } + + @override + List get operands { + return opnds; + } + + @override + bool operator ==(Object obj) { + if (!(obj is OR)) return false; + OR other = obj; + return ListEquality().equals(opnds, other.opnds); + } + + @override + int get hashCode { + return MurmurHash.getHashCode(opnds, runtimeType.hashCode); + } + + /// {@inheritDoc} + /// + ///

+ /// The evaluation of predicates by this context is short-circuiting, but + /// unordered.

+ + @override + bool eval(Recognizer parser, RuleContext parserCallStack) { + for (var opnd in opnds) { + if (opnd.eval(parser, parserCallStack)) return true; + } + return false; + } + + @override + SemanticContext evalPrecedence(Recognizer parser, + RuleContext parserCallStack) { + var differs = false; + final operands = []; + for (var context in opnds) { + final evaluated = + context.evalPrecedence(parser, parserCallStack); + differs |= (evaluated != context); + if (evaluated == SemanticContext.NONE) { + // The OR context is true if any element is true + return SemanticContext.NONE; + } else if (evaluated != null) { + // Reduce the result by skipping false elements + operands.add(evaluated); + } + } + + if (!differs) { + return this; + } + + if (operands.isEmpty) { + // all elements were false, so the OR context is false + return null; + } + + var result = operands[0]; + for (var i = 1; i < operands.length; i++) { + result = SemanticContext.or(result, operands[i]); + } + + return result; + } + + @override + String toString() { + return opnds.join('||'); + } +} diff --git a/runtime/Dart/lib/src/atn/src/transition.dart b/runtime/Dart/lib/src/atn/src/transition.dart new file mode 100644 index 000000000..06d873844 --- /dev/null +++ b/runtime/Dart/lib/src/atn/src/transition.dart @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../interval_set.dart'; +import '../../token.dart'; +import 'atn_state.dart'; +import 'semantic_context.dart'; + +enum TransitionType { + INVALID, // 0 is not used + EPSILON, + RANGE, + RULE, + PREDICATE, // e.g., {isType(input.LT(1))}? + ATOM, + ACTION, + SET, // ~(A|B) or ~atom, wildcard, which convert to next 2 + NOT_SET, + WILDCARD, + PRECEDENCE, +} + +/// An ATN transition between any two ATN states. Subclasses define +/// atom, set, epsilon, action, predicate, rule transitions. +/// +///

This is a one way link. It emanates from a state (usually via a list of +/// transitions) and has a target state.

+/// +///

Since we never have to change the ATN transitions once we construct it, +/// we can fix these transitions as specific classes. The DFA transitions +/// on the other hand need to update the labels as it adds transitions to +/// the states. We'll use the term Edge for the DFA to distinguish them from +/// ATN transitions.

+abstract class Transition { + /// The target of this transition. */ + ATNState target; + + Transition(this.target) { + if (target == null) { + throw ArgumentError.notNull('target cannot be null.'); + } + } + + TransitionType get type; + + /// Determines if the transition is an "epsilon" transition. + /// + ///

The default implementation returns [false].

+ /// + /// @return [true] if traversing this transition in the ATN does not + /// consume an input symbol; otherwise, [false] if traversing this + /// transition consumes (matches) an input symbol. + bool get isEpsilon => false; + + IntervalSet get label => null; + + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol); +} + +class EpsilonTransition extends Transition { + /// @return the rule index of a precedence rule for which this transition is + /// returning from, where the precedence value is 0; otherwise, -1. + /// + /// @see ATNConfig#isPrecedenceFilterSuppressed() + /// @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet) + /// @since 4.4.1 + final int outermostPrecedenceReturn; + + EpsilonTransition(ATNState target, [this.outermostPrecedenceReturn = -1]) + : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return false; + } + + @override + String toString() { + return 'epsilon'; + } + + @override + TransitionType get type => TransitionType.EPSILON; +} + +class RangeTransition extends Transition { + final int from; + final int to; + + RangeTransition(ATNState target, this.from, this.to) : super(target); + + @override + IntervalSet get label { + return IntervalSet.ofRange(from, to); + } + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return symbol >= from && symbol <= to; + } + + @override + String toString() { + return "'$from..$to'"; + } + + @override + TransitionType get type => TransitionType.RANGE; +} + +class RuleTransition extends Transition { + /// Ptr to the rule definition object for this rule ref */ + final int ruleIndex; // no Rule object at runtime + + final int precedence; + + /// What node to begin computations following ref to rule */ + ATNState followState; + + RuleTransition(RuleStartState ruleStart, this.ruleIndex, this.precedence, + this.followState) + : super(ruleStart); + + @override + bool get isEpsilon => true; + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return false; + } + + @override + TransitionType get type => TransitionType.RULE; +} + +abstract class AbstractPredicateTransition extends Transition { + AbstractPredicateTransition(ATNState target) : super(target); +} + +class PredicateTransition extends AbstractPredicateTransition { + final int ruleIndex; + final int predIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + PredicateTransition( + target, this.ruleIndex, this.predIndex, this.isCtxDependent) + : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } + + Predicate get predicate => Predicate(ruleIndex, predIndex, isCtxDependent); + + + @override + String toString() { + return 'pred_$ruleIndex:$predIndex'; + } + + @override + TransitionType get type => TransitionType.PREDICATE; +} + +/// TODO: make all transitions sets? no, should remove set edges */ +class AtomTransition extends Transition { + /// The token type or character value; or, signifies special label. */ + final int atomLabel; + + AtomTransition(ATNState target, this.atomLabel) : super(target); + + @override + IntervalSet get label { + return IntervalSet.ofOne(atomLabel); + } + + @override + bool matches(int symbol, int minVocabSymbol, int maxVocabSymbol) { + return atomLabel == symbol; + } + + @override + String toString() { + return label.toString(); + } + + @override + TransitionType get type => TransitionType.ATOM; +} + +class ActionTransition extends Transition { + final int ruleIndex; + final int actionIndex; + final bool isCtxDependent; // e.g., $i ref in pred + + ActionTransition(target, this.ruleIndex, + [this.actionIndex = -1, this.isCtxDependent = false]) + : super(target); + + @override + bool get isEpsilon => + true; // we are to be ignored by analysis 'cept for predicates + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) => false; + + @override + String toString() { + return 'action_$ruleIndex:$actionIndex'; + } + + @override + TransitionType get type => TransitionType.ACTION; +} + +// A transition containing a set of values. +class SetTransition extends Transition { + @override + IntervalSet label; + + SetTransition(ATNState target, [IntervalSet st]) : super(target) { + label = st ?? IntervalSet.ofOne(Token.INVALID_TYPE); + } + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return label.contains(symbol); + } + + @override + String toString() { + return label.toString(); + } + + @override + TransitionType get type => TransitionType.SET; +} + +class NotSetTransition extends SetTransition { + NotSetTransition(target, st) : super(target, st); + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && + symbol <= maxVocabSymbol && + !super.matches(symbol, minVocabSymbol, maxVocabSymbol); + } + + @override + String toString() { + return '~' + super.toString(); + } + + @override + TransitionType get type => TransitionType.NOT_SET; +} + +class WildcardTransition extends Transition { + WildcardTransition(target) : super(target); + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; + } + + @override + String toString() { + return '.'; + } + + @override + TransitionType get type => TransitionType.WILDCARD; +} + +class PrecedencePredicateTransition extends AbstractPredicateTransition { + final int precedence; + + PrecedencePredicateTransition(target, this.precedence) : super(target); + + @override + bool get isEpsilon => true; + + @override + bool matches(symbol, minVocabSymbol, maxVocabSymbol) => false; + + PrecedencePredicate get predicate { + return PrecedencePredicate(precedence); + } + + @override + String toString() => '$precedence >= _p'; + + @override + TransitionType get type => TransitionType.PRECEDENCE; +} diff --git a/runtime/Dart/lib/src/dfa/dfa.dart b/runtime/Dart/lib/src/dfa/dfa.dart new file mode 100644 index 000000000..552ddef7d --- /dev/null +++ b/runtime/Dart/lib/src/dfa/dfa.dart @@ -0,0 +1,8 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/dfa.dart'; +export 'src/dfa_state.dart'; diff --git a/runtime/Dart/lib/src/dfa/src/dfa.dart b/runtime/Dart/lib/src/dfa/src/dfa.dart new file mode 100644 index 000000000..30aa2af36 --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa.dart @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../vocabulary.dart'; +import '../../atn/atn.dart'; +import 'dfa_serializer.dart'; +import 'dfa_state.dart'; + +class DFA { + /// A set of all DFA states. Use [Map] so we can get old state back + /// ([Set] only allows you to see if it's there). + + Map states = {}; + + DFAState s0; + + final int decision; + + /// From which ATN state did we create this DFA? */ + + DecisionState atnStartState; + + /// [true] if this DFA is for a precedence decision; otherwise, + /// [false]. This is the backing field for {@link #isPrecedenceDfa}. + bool precedenceDfa; + + DFA(this.atnStartState, [this.decision]) { + var precedenceDfa = false; + if (atnStartState is StarLoopEntryState) { + if ((atnStartState as StarLoopEntryState).isPrecedenceDecision) { + precedenceDfa = true; + final precedenceState = DFAState(configs: ATNConfigSet()); + precedenceState.edges = []; + precedenceState.isAcceptState = false; + precedenceState.requiresFullContext = false; + s0 = precedenceState; + } + } + + this.precedenceDfa = precedenceDfa; + } + + /// Gets whether this DFA is a precedence DFA. Precedence DFAs use a special + /// start state {@link #s0} which is not stored in {@link #states}. The + /// {@link DFAState#edges} array for this start state contains outgoing edges + /// supplying individual start states corresponding to specific precedence + /// values. + /// + /// @return [true] if this is a precedence DFA; otherwise, + /// [false]. + /// @see Parser#getPrecedence() + bool isPrecedenceDfa() { + return precedenceDfa; + } + + /// Get the start state for a specific precedence value. + /// + /// @param precedence The current precedence. + /// @return The start state corresponding to the specified precedence, or + /// null if no start state exists for the specified precedence. + /// + /// @throws IllegalStateException if this is not a precedence DFA. + /// @see #isPrecedenceDfa() + DFAState getPrecedenceStartState(int precedence) { + if (!isPrecedenceDfa()) { + throw StateError( + 'Only precedence DFAs may contain a precedence start state.'); + } + + // s0.edges is never null for a precedence DFA + if (precedence < 0 || precedence >= s0.edges.length) { + return null; + } + + return s0.edges[precedence]; + } + + /// Set the start state for a specific precedence value. + /// + /// @param precedence The current precedence. + /// @param startState The start state corresponding to the specified + /// precedence. + /// + /// @throws IllegalStateException if this is not a precedence DFA. + /// @see #isPrecedenceDfa() + void setPrecedenceStartState(int precedence, DFAState startState) { + if (!isPrecedenceDfa()) { + throw StateError( + 'Only precedence DFAs may contain a precedence start state.'); + } + + if (precedence < 0) { + return; + } + + // synchronization on s0 here is ok. when the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again + // s0.edges is never null for a precedence DFA + if (precedence >= s0.edges.length) { + final original = s0.edges; + s0.edges = List(precedence + 1); + List.copyRange(s0.edges, 0, original); + } + + s0.edges[precedence] = startState; + } + + /// Return a list of all states in this DFA, ordered by state number. + + List getStates() { + final result = states.keys.toList(); + result.sort((DFAState o1, DFAState o2) { + return o1.stateNumber - o2.stateNumber; + }); + + return result; + } + + @override + String toString([Vocabulary vocabulary]) { + vocabulary = vocabulary ?? VocabularyImpl.EMPTY_VOCABULARY; + if (s0 == null) { + return ''; + } + + final serializer = DFASerializer(this, vocabulary); + return serializer.toString(); + } + + String toLexerString() { + if (s0 == null) return ''; + DFASerializer serializer = LexerDFASerializer(this); + return serializer.toString(); + } +} diff --git a/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart b/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart new file mode 100644 index 000000000..7ffb42bbb --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa_serializer.dart @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../util/utils.dart'; +import '../../vocabulary.dart'; +import 'dfa.dart'; +import 'dfa_state.dart'; + +/// A DFA walker that knows how to dump them to serialized strings. */ +class DFASerializer { + final DFA dfa; + + final Vocabulary vocabulary; + + DFASerializer(this.dfa, this.vocabulary); + + @override + String toString() { + if (dfa.s0 == null) return null; + final buf = StringBuffer(); + final states = dfa.getStates(); + for (var s in states) { + var n = 0; + if (s.edges != null) n = s.edges.length; + for (var i = 0; i < n; i++) { + final t = s.edges[i]; + if (t != null && t.stateNumber != 0x7FFFFFFF) { + buf.write(getStateString(s)); + final label = getEdgeLabel(i); + buf.write('-'); + buf.write(label); + buf.write('->'); + buf.write(getStateString(t)); + buf.write('\n'); + } + } + } + + final output = buf.toString(); + if (output.isEmpty) return null; + //return Utils.sortLinesInString(output); + return output; + } + + String getEdgeLabel(int i) { + return vocabulary.getDisplayName(i - 1); + } + + String getStateString(DFAState s) { + final n = s.stateNumber; + final baseStateStr = (s.isAcceptState ? ':' : '') + + 's$n' + + (s.requiresFullContext ? '^' : ''); + if (s.isAcceptState) { + if (s.predicates != null) { + return baseStateStr + '=>${arrayToString(s.predicates)}'; + } else { + return baseStateStr + '=>${s.prediction}'; + } + } else { + return baseStateStr; + } + } +} + +class LexerDFASerializer extends DFASerializer { + LexerDFASerializer(dfa) : super(dfa, VocabularyImpl.EMPTY_VOCABULARY); + + @override + String getEdgeLabel(i) { + return "'" + String.fromCharCode(i) + "'"; + } +} diff --git a/runtime/Dart/lib/src/dfa/src/dfa_state.dart b/runtime/Dart/lib/src/dfa/src/dfa_state.dart new file mode 100644 index 000000000..17a2f4f7d --- /dev/null +++ b/runtime/Dart/lib/src/dfa/src/dfa_state.dart @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../util/murmur_hash.dart'; +import '../../util/utils.dart'; + +/// Map a predicate to a predicted alternative. */ +class PredPrediction { + final alt; + final pred; + + PredPrediction(this.pred, this.alt); + + @override + String toString() { + return '($pred, $alt)'; + } +} + +/// A DFA state represents a set of possible ATN configurations. +/// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state +/// to keep track of all possible states the ATN can be in after +/// reading each input symbol. That is to say, after reading +/// input a1a2..an, the DFA is in a state that represents the +/// subset T of the states of the ATN that are reachable from the +/// ATN's start state along some path labeled a1a2..an." +/// In conventional NFA→DFA conversion, therefore, the subset T +/// would be a bitset representing the set of states the +/// ATN could be in. We need to track the alt predicted by each +/// state as well, however. More importantly, we need to maintain +/// a stack of states, tracking the closure operations as they +/// jump from rule to rule, emulating rule invocations (method calls). +/// I have to add a stack to simulate the proper lookahead sequences for +/// the underlying LL grammar from which the ATN was derived. +/// +///

I use a set of ATNConfig objects not simple states. An ATNConfig +/// is both a state (ala normal conversion) and a RuleContext describing +/// the chain of rules (if any) followed to arrive at that state.

+/// +///

A DFA state may have multiple references to a particular state, +/// but with different ATN contexts (with same or different alts) +/// meaning that state was reached via a different set of rule invocations.

+class DFAState { + int stateNumber = -1; + + ATNConfigSet configs = ATNConfigSet(); + + /// {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + /// {@link Token#EOF} maps to {@code edges[0]}. + + List edges; + + bool isAcceptState = false; + + /// if accept state, what ttype do we match or alt do we predict? + /// This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or + /// {@link #requiresFullContext}. + int prediction = 0; + + LexerActionExecutor lexerActionExecutor; + + /// Indicates that this state was created during SLL prediction that + /// discovered a conflict between the configurations in the state. Future + /// {@link ParserATNSimulator#execATN} invocations immediately jumped doing + /// full context prediction if this field is true. + bool requiresFullContext = false; + + /// During SLL parsing, this is a list of predicates associated with the + /// ATN configurations of the DFA state. When we have predicates, + /// {@link #requiresFullContext} is [false] since full context prediction evaluates predicates + /// on-the-fly. If this is not null, then {@link #prediction} is + /// {@link ATN#INVALID_ALT_NUMBER}. + /// + ///

We only use these for non-{@link #requiresFullContext} but conflicting states. That + /// means we know from the context (it's $ or we don't dip into outer + /// context) that it's an ambiguity not a conflict.

+ /// + ///

This list is computed by {@link ParserATNSimulator#predicateDFAState}.

+ + List predicates; + + DFAState({this.stateNumber, this.configs}); + + /// Get the set of all alts mentioned by all ATN configurations in this + /// DFA state. + Set get altSet { + final alts = {}; + if (configs != null) { + for (var c in configs) { + alts.add(c.alt); + } + } + if (alts.isEmpty) return null; + return alts; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(7); + hash = MurmurHash.update(hash, configs.hashCode); + hash = MurmurHash.finish(hash, 1); + return hash; + } + + /// Two [DFAState] instances are equal if their ATN configuration sets + /// are the same. This method is used to see if a state already exists. + /// + ///

Because the number of alternatives and number of ATN configurations are + /// finite, there is a finite number of DFA states that can be processed. + /// This is necessary to show that the algorithm terminates.

+ /// + ///

Cannot test the DFA state numbers here because in + /// {@link ParserATNSimulator#addDFAState} we need to know if any other state + /// exists that has this exact set of ATN configurations. The + /// {@link #stateNumber} is irrelevant.

+ + @override + bool operator ==(Object o) { + // compare set of ATN configurations in this set with other + if (identical(this, o)) return true; + + if (!(o is DFAState)) { + return false; + } + + DFAState other = o; + // TODO (sam): what to do when configs==null? + final sameSet = configs == other.configs; +// System.out.println("DFAState.equals: "+configs+(sameSet?"==":"!=")+other.configs); + return sameSet; + } + + @override + String toString() { + final buf = StringBuffer(); + buf.write('$stateNumber:$configs'); + if (isAcceptState) { + buf.write('=>'); + if (predicates != null) { + buf.write(arrayToString(predicates)); + } else { + buf.write(prediction); + } + } + return buf.toString(); + } +} diff --git a/runtime/Dart/lib/src/error/error.dart b/runtime/Dart/lib/src/error/error.dart new file mode 100644 index 000000000..8b96c26b9 --- /dev/null +++ b/runtime/Dart/lib/src/error/error.dart @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/diagnostic_error_listener.dart'; +export 'src/error_listener.dart'; +export 'src/error_strategy.dart'; +export 'src/errors.dart'; diff --git a/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart b/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart new file mode 100644 index 000000000..7ea39ab9f --- /dev/null +++ b/runtime/Dart/lib/src/error/src/diagnostic_error_listener.dart @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../dfa/dfa.dart'; +import '../../interval_set.dart'; +import '../../parser.dart'; +import '../../util/bit_set.dart'; +import 'error_listener.dart'; + +/// This implementation of [ANTLRErrorListener] can be used to identify +/// certain potential correctness and performance problems in grammars. "Reports" +/// are made by calling {@link Parser#notifyErrorListeners} with the appropriate +/// message. +/// +///
    +///
  • Ambiguities: These are cases where more than one path through the +/// grammar can match the input.
  • +///
  • Weak context sensitivity: These are cases where full-context +/// prediction resolved an SLL conflict to a unique alternative which equaled the +/// minimum alternative of the SLL conflict.
  • +///
  • Strong (forced) context sensitivity: These are cases where the +/// full-context prediction resolved an SLL conflict to a unique alternative, +/// and the minimum alternative of the SLL conflict was found to not be +/// a truly viable alternative. Two-stage parsing cannot be used for inputs where +/// this situation occurs.
  • +///
+class DiagnosticErrorListener extends BaseErrorListener { + /// When [true], only exactly known ambiguities are reported. + final bool exactOnly; + + /// Initializes a new instance of [DiagnosticErrorListener], specifying + /// whether all ambiguities or only exact ambiguities are reported. + /// + /// @param exactOnly [true] to report only exact ambiguities, otherwise + /// [false] to report all ambiguities. + DiagnosticErrorListener([this.exactOnly = true]); + + @override + void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs) { + if (exactOnly && !exact) { + return; + } + + final decision = getDecisionDescription(recognizer, dfa); + final conflictingAlts = getConflictingAlts(ambigAlts, configs); + final text = + recognizer.tokenStream.getText(Interval.of(startIndex, stopIndex)); + final message = + "reportAmbiguity d=$decision: ambigAlts=$conflictingAlts, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + @override + void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { + final decision = getDecisionDescription(recognizer, dfa); + final text = + recognizer.tokenStream.getText(Interval.of(startIndex, stopIndex)); + final message = "reportAttemptingFullContext d=$decision, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + @override + void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, int prediction, ATNConfigSet configs) { + final decision = getDecisionDescription(recognizer, dfa); + final text = + recognizer.tokenStream.getText(Interval.of(startIndex, stopIndex)); + final message = "reportContextSensitivity d=$decision, input='$text'"; + recognizer.notifyErrorListeners(message); + } + + String getDecisionDescription(Parser recognizer, DFA dfa) { + final decision = dfa.decision; + final ruleIndex = dfa.atnStartState.ruleIndex; + + final ruleNames = recognizer.ruleNames; + if (ruleIndex < 0 || ruleIndex >= ruleNames.length) { + return decision.toString(); + } + + final ruleName = ruleNames[ruleIndex]; + if (ruleName == null || ruleName.isEmpty) { + return decision.toString(); + } + + return '$decision ($ruleName)'; + } + + /// Computes the set of conflicting or ambiguous alternatives from a + /// configuration set, if that information was not already provided by the + /// parser. + /// + /// @param reportedAlts The set of conflicting or ambiguous alternatives, as + /// reported by the parser. + /// @param configs The conflicting or ambiguous configuration set. + /// @return Returns [reportedAlts] if it is not null, otherwise + /// returns the set of alternatives represented in [configs]. + BitSet getConflictingAlts(BitSet reportedAlts, ATNConfigSet configs) { + if (reportedAlts != null) { + return reportedAlts; + } + + final result = BitSet(); + for (var config in configs) { + result.set(config.alt); + } + + return result; + } +} diff --git a/runtime/Dart/lib/src/error/src/error_listener.dart b/runtime/Dart/lib/src/error/src/error_listener.dart new file mode 100644 index 000000000..e2458434a --- /dev/null +++ b/runtime/Dart/lib/src/error/src/error_listener.dart @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:io'; + +import '../../atn/atn.dart'; +import '../../dfa/dfa.dart'; +import '../../parser.dart'; +import '../../recognizer.dart'; +import '../../util/bit_set.dart'; +import 'errors.dart'; + +abstract class ErrorListener { + /// Upon syntax error, notify any interested parties. This is not how to + /// recover from errors or compute error messages. [ANTLRErrorStrategy] + /// specifies how to recover from syntax errors and how to compute error + /// messages. This listener's job is simply to emit a computed message, + /// though it has enough information to create its own message in many cases. + /// + ///

The [RecognitionException] is non-null for all syntax errors except + /// when we discover mismatched token errors that we can recover from + /// in-line, without returning from the surrounding rule (via the single + /// token insertion and deletion mechanism).

+ /// + /// @param recognizer + /// What parser got the error. From this + /// object, you can access the context as well + /// as the input stream. + /// @param offendingSymbol + /// The offending token in the input token + /// stream, unless recognizer is a lexer (then it's null). If + /// no viable alternative error, [e] has token at which we + /// started production for the decision. + /// @param line + /// The line number in the input where the error occurred. + /// @param charPositionInLine + /// The character position within that line where the error occurred. + /// @param msg + /// The message to emit. + /// @param e + /// The exception generated by the parser that led to + /// the reporting of an error. It is null in the case where + /// the parser was able to recover in line without exiting the + /// surrounding rule. + void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, + int charPositionInLine, String msg, RecognitionException e); + + /// This method is called by the parser when a full-context prediction + /// results in an ambiguity. + /// + ///

Each full-context prediction which does not result in a syntax error + /// will call either {@link #reportContextSensitivity} or + /// {@link #reportAmbiguity}.

+ /// + ///

When [ambigAlts] is not null, it contains the set of potentially + /// viable alternatives identified by the prediction algorithm. When + /// [ambigAlts] is null, use {@link ATNConfigSet#getAlts} to obtain the + /// represented alternatives from the [configs] argument.

+ /// + ///

When [exact] is [true], all of the potentially + /// viable alternatives are truly viable, i.e. this is reporting an exact + /// ambiguity. When [exact] is [false], at least two of + /// the potentially viable alternatives are viable for the current input, but + /// the prediction algorithm terminated as soon as it determined that at + /// least the minimum potentially viable alternative is truly + /// viable.

+ /// + ///

When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction + /// mode is used, the parser is required to identify exact ambiguities so + /// [exact] will always be [true].

+ /// + ///

This method is not used by lexers.

+ /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input input where the ambiguity was identified + /// @param exact [true] if the ambiguity is exactly known, otherwise + /// [false]. This is always [true] when + /// {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. + /// @param ambigAlts the potentially ambiguous alternatives, or null + /// to indicate that the potentially ambiguous alternatives are the complete + /// set of represented alternatives in [configs] + /// @param configs the ATN configuration set where the ambiguity was + /// identified + void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs); + + /// This method is called when an SLL conflict occurs and the parser is about + /// to use the full context information to make an LL decision. + /// + ///

If one or more configurations in [configs] contains a semantic + /// predicate, the predicates are evaluated before this method is called. The + /// subset of alternatives which are still viable after predicates are + /// evaluated is reported in [conflictingAlts].

+ /// + ///

This method is not used by lexers.

+ /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input index where the SLL conflict occurred + /// @param conflictingAlts The specific conflicting alternatives. If this is + /// null, the conflicting alternatives are all alternatives + /// represented in [configs]. At the moment, conflictingAlts is non-null + /// (for the reference implementation, but Sam's optimized version can see this + /// as null). + /// @param configs the ATN configuration set where the SLL conflict was + /// detected + void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, BitSet conflictingAlts, ATNConfigSet configs); + + /// This method is called by the parser when a full-context prediction has a + /// unique result. + /// + ///

Each full-context prediction which does not result in a syntax error + /// will call either {@link #reportContextSensitivity} or + /// {@link #reportAmbiguity}.

+ /// + ///

For prediction implementations that only evaluate full-context + /// predictions when an SLL conflict is found (including the default + /// [ParserATNSimulator] implementation), this method reports cases + /// where SLL conflicts were resolved to unique full-context predictions, + /// i.e. the decision was context-sensitive. This report does not necessarily + /// indicate a problem, and it may appear even in completely unambiguous + /// grammars.

+ /// + ///

[configs] may have more than one represented alternative if the + /// full-context prediction algorithm does not evaluate predicates before + /// beginning the full-context prediction. In all cases, the final prediction + /// is passed as the [prediction] argument.

+ /// + ///

Note that the definition of "context sensitivity" in this method + /// differs from the concept in {@link DecisionInfo#contextSensitivities}. + /// This method reports all instances where an SLL conflict occurred but LL + /// parsing produced a unique result, whether or not that unique result + /// matches the minimum alternative in the SLL conflicting set.

+ /// + ///

This method is not used by lexers.

+ /// + /// @param recognizer the parser instance + /// @param dfa the DFA for the current decision + /// @param startIndex the input index where the decision started + /// @param stopIndex the input index where the context sensitivity was + /// finally determined + /// @param prediction the unambiguous result of the full-context prediction + /// @param configs the ATN configuration set where the unambiguous prediction + /// was determined + void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, int prediction, ATNConfigSet configs); +} + +class BaseErrorListener extends ErrorListener { + @override + void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs) {} + + @override + void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) {} + + @override + void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, int prediction, ATNConfigSet configs) {} + + @override + void syntaxError(Recognizer recognizer, Object offendingSymbol, + int line, int charPositionInLine, String msg, RecognitionException e) {} +} + +class ConsoleErrorListener extends BaseErrorListener { + /// Provides a default instance of [ConsoleErrorListener]. + static final INSTANCE = ConsoleErrorListener(); + + /// {@inheritDoc} + /// + ///

+ /// This implementation prints messages to {@link System//err} containing the + /// values of [line], [charPositionInLine], and [msg] using + /// the following format.

+ /// + ///
+  /// line line:charPositionInLine msg
+  /// 
+ @override + void syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + stderr.writeln('line $line:$column $msg'); + } +} + +/// This implementation of [ErrorListener] dispatches all calls to a +/// collection of delegate listeners. This reduces the effort required to support multiple +/// listeners. +class ProxyErrorListener implements ErrorListener { + final List delegates; + + ProxyErrorListener(this.delegates) { + if (delegates == null) { + throw ArgumentError.notNull('delegates'); + } + } + + @override + void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, + int charPositionInLine, String msg, RecognitionException e) { + for (final listener in delegates) { + listener.syntaxError( + recognizer, offendingSymbol, line, charPositionInLine, msg, e); + } + } + + @override + void reportAmbiguity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, bool exact, BitSet ambigAlts, ATNConfigSet configs) { + for (final listener in delegates) { + listener.reportAmbiguity( + recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); + } + } + + @override + void reportAttemptingFullContext(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) { + for (final listener in delegates) { + listener.reportAttemptingFullContext( + recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); + } + } + + @override + void reportContextSensitivity(Parser recognizer, DFA dfa, int startIndex, + int stopIndex, int prediction, ATNConfigSet configs) { + for (final listener in delegates) { + listener.reportContextSensitivity( + recognizer, dfa, startIndex, stopIndex, prediction, configs); + } + } +} diff --git a/runtime/Dart/lib/src/error/src/error_strategy.dart b/runtime/Dart/lib/src/error/src/error_strategy.dart new file mode 100644 index 000000000..c5d02ef20 --- /dev/null +++ b/runtime/Dart/lib/src/error/src/error_strategy.dart @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'package:logging/logging.dart'; + +import '../../atn/atn.dart'; +import '../../interval_set.dart'; +import '../../misc/pair.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../tree/tree.dart'; +import 'errors.dart'; + +/// The interface for defining strategies to deal with syntax errors encountered +/// during a parse by ANTLR-generated parsers. We distinguish between three +/// different kinds of errors: +/// +///
    +///
  • The parser could not figure out which path to take in the ATN (none of +/// the available alternatives could possibly match)
  • +///
  • The current input does not match what we were looking for
  • +///
  • A predicate evaluated to false
  • +///
+/// +/// Implementations of this interface report syntax errors by calling +/// {@link Parser#notifyErrorListeners}. +/// +///

TODO: what to do about lexers

+abstract class ErrorStrategy { + /// Reset the error handler state for the specified [recognizer]. + /// @param recognizer the parser instance + void reset(Parser recognizer); + + /// This method is called when an unexpected symbol is encountered during an + /// inline match operation, such as {@link Parser#match}. If the error + /// strategy successfully recovers from the match failure, this method + /// returns the [Token] instance which should be treated as the + /// successful result of the match. + /// + ///

This method handles the consumption of any tokens - the caller should + /// not call {@link Parser#consume} after a successful recovery.

+ /// + ///

Note that the calling code will not report an error if this method + /// returns successfully. The error strategy implementation is responsible + /// for calling {@link Parser#notifyErrorListeners} as appropriate.

+ /// + /// @param recognizer the parser instance + /// @ if the error strategy was not able to + /// recover from the unexpected input symbol + Token recoverInline(Parser recognizer); + + /// This method is called to recover from exception [e]. This method is + /// called after {@link #reportError} by the default exception handler + /// generated for a rule method. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception to recover from + /// @ if the error strategy could not recover from + /// the recognition exception + void recover(Parser recognizer, RecognitionException e); + + /// This method provides the error handler with an opportunity to handle + /// syntactic or semantic errors in the input stream before they result in a + /// [RecognitionException]. + /// + ///

The generated code currently contains calls to {@link #sync} after + /// entering the decision state of a closure block ({@code (...)*} or + /// {@code (...)+}).

+ /// + ///

For an implementation based on Jim Idle's "magic sync" mechanism, see + /// {@link DefaultErrorStrategy#sync}.

+ /// + /// @see DefaultErrorStrategy#sync + /// + /// @param recognizer the parser instance + /// @ if an error is detected by the error + /// strategy but cannot be automatically recovered at the current state in + /// the parsing process + void sync(Parser recognizer); + + /// Tests whether or not [recognizer] is in the process of recovering + /// from an error. In error recovery mode, {@link Parser#consume} adds + /// symbols to the parse tree by calling + /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of + /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}. + /// + /// @param recognizer the parser instance + /// @return [true] if the parser is currently recovering from a parse + /// error, otherwise [false] + bool inErrorRecoveryMode(Parser recognizer); + + /// This method is called by when the parser successfully matches an input + /// symbol. + /// + /// @param recognizer the parser instance + void reportMatch(Parser recognizer); + + /// Report any kind of [RecognitionException]. This method is called by + /// the default exception handler generated for a rule method. + /// + /// @param recognizer the parser instance + /// @param e the recognition exception to report + void reportError(Parser recognizer, RecognitionException e); +} + +/// This is the default implementation of [ANTLRErrorStrategy] used for +/// error reporting and recovery in ANTLR parsers. +class DefaultErrorStrategy implements ErrorStrategy { + /// Indicates whether the error strategy is currently "recovering from an + /// error". This is used to suppress reporting multiple error messages while + /// attempting to recover from a detected syntax error. + /// + /// @see #inErrorRecoveryMode + bool errorRecoveryMode = false; + + /// The index into the input stream where the last error occurred. + /// This is used to prevent infinite loops where an error is found + /// but no token is consumed during recovery...another error is found, + /// ad nauseum. This is a failsafe mechanism to guarantee that at least + /// one token/tree node is consumed for two errors. + int lastErrorIndex = -1; + + IntervalSet lastErrorStates; + + /// This field is used to propagate information about the lookahead following + /// the previous match. Since prediction prefers completing the current rule + /// to error recovery efforts, error reporting may occur later than the + /// original point where it was discoverable. The original context is used to + /// compute the true expected sets as though the reporting occurred as early + /// as possible. + ParserRuleContext nextTokensContext; + + /// @see #nextTokensContext + int nextTokensState; + + /// {@inheritDoc} + /// + ///

The default implementation simply calls {@link #endErrorCondition} to + /// ensure that the handler is not in error recovery mode.

+ + @override + void reset(Parser recognizer) { + endErrorCondition(recognizer); + } + + /// This method is called to enter error recovery mode when a recognition + /// exception is reported. + /// + /// @param recognizer the parser instance + void beginErrorCondition(Parser recognizer) { + errorRecoveryMode = true; + } + + /// {@inheritDoc} + + @override + bool inErrorRecoveryMode(Parser recognizer) { + return errorRecoveryMode; + } + + /// This method is called to leave error recovery mode after recovering from + /// a recognition exception. + /// + /// @param recognizer + void endErrorCondition(Parser recognizer) { + errorRecoveryMode = false; + lastErrorStates = null; + lastErrorIndex = -1; + } + + /// {@inheritDoc} + /// + ///

The default implementation simply calls {@link #endErrorCondition}.

+ + @override + void reportMatch(Parser recognizer) { + endErrorCondition(recognizer); + } + + /// {@inheritDoc} + /// + ///

The default implementation returns immediately if the handler is already + /// in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} + /// and dispatches the reporting task based on the runtime type of [e] + /// according to the following table.

+ /// + ///
    + ///
  • [NoViableAltException]: Dispatches the call to + /// {@link #reportNoViableAlternative}
  • + ///
  • [InputMismatchException]: Dispatches the call to + /// {@link #reportInputMismatch}
  • + ///
  • [FailedPredicateException]: Dispatches the call to + /// {@link #reportFailedPredicate}
  • + ///
  • All other types: calls {@link Parser#notifyErrorListeners} to report + /// the exception
  • + ///
+ + @override + void reportError(Parser recognizer, RecognitionException e) { + // if we've already reported an error and have not matched a token + // yet successfully, don't report any errors. + if (inErrorRecoveryMode(recognizer)) { +// System.err.print("[SPURIOUS] "); + return; // don't report spurious errors + } + beginErrorCondition(recognizer); + if (e is NoViableAltException) { + reportNoViableAlternative(recognizer, e); + } else if (e is InputMismatchException) { + reportInputMismatch(recognizer, e); + } else if (e is FailedPredicateException) { + reportFailedPredicate(recognizer, e); + } else { + log('unknown recognition error type: ${e.runtimeType}', + level: Level.SEVERE.value); + recognizer.notifyErrorListeners(e.message, e.offendingToken, e); + } + } + + /// {@inheritDoc} + /// + ///

The default implementation resynchronizes the parser by consuming tokens + /// until we find one in the resynchronization set--loosely the set of tokens + /// that can follow the current rule.

+ + @override + void recover(Parser recognizer, RecognitionException e) { +// System.out.println("recover in "+recognizer.getRuleInvocationStack()+ +// " index="+recognizer.inputStream.index()+ +// ", lastErrorIndex="+ +// lastErrorIndex+ +// ", states="+lastErrorStates); + if (lastErrorIndex == recognizer.inputStream.index && + lastErrorStates != null && + lastErrorStates.contains(recognizer.state)) { + // uh oh, another error at same token index and previously-visited + // state in ATN; must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop; this is a failsafe. +// log("seen error condition before index=, level: Level.SEVERE.value"+ +// lastErrorIndex+", states="+lastErrorStates); +// log("FAILSAFE consumes "+recognizer.getTokenNames()[recognizer.inputStream.LA(1)], level: Level.SEVERE.value); + recognizer.consume(); + } + lastErrorIndex = recognizer.inputStream.index; + lastErrorStates ??= IntervalSet(); lastErrorStates.addOne(recognizer.state); + final followSet = getErrorRecoverySet(recognizer); + consumeUntil(recognizer, followSet); + } + + /// The default implementation of {@link ANTLRErrorStrategy#sync} makes sure + /// that the current lookahead symbol is consistent with what were expecting + /// at this point in the ATN. You can call this anytime but ANTLR only + /// generates code to check before subrules/loops and each iteration. + /// + ///

Implements Jim Idle's magic sync mechanism in closures and optional + /// subrules. E.g.,

+ /// + ///
+  /// a : sync ( stuff sync )* ;
+  /// sync : {consume to what can follow sync} ;
+  /// 
+ /// + /// At the start of a sub rule upon error, {@link #sync} performs single + /// token deletion, if possible. If it can't do that, it bails on the current + /// rule and uses the default error recovery, which consumes until the + /// resynchronization set of the current rule. + /// + ///

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + /// with an empty alternative), then the expected set includes what follows + /// the subrule.

+ /// + ///

During loop iteration, it consumes until it sees a token that can start a + /// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + /// stay in the loop as long as possible.

+ /// + ///

ORIGINS

+ /// + ///

Previous versions of ANTLR did a poor job of their recovery within loops. + /// A single mismatch token or missing token would force the parser to bail + /// out of the entire rules surrounding the loop. So, for rule

+ /// + ///
+  /// classDef : 'class' ID '{' member* '}'
+  /// 
+ /// + /// input with an extra token between members would force the parser to + /// consume until it found the next class definition rather than the next + /// member definition of the current class. + /// + ///

This functionality cost a little bit of effort because the parser has to + /// compare token set at the start of the loop and at each iteration. If for + /// some reason speed is suffering for you, you can turn off this + /// functionality by simply overriding this method as a blank { }.

+ + @override + void sync(Parser recognizer) { + final s = recognizer.interpreter.atn.states[recognizer.state]; +// log("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName(), level: Level.SEVERE.value); + // If already recovering, don't try to sync + if (inErrorRecoveryMode(recognizer)) { + return; + } + + final tokens = recognizer.inputStream; + final la = tokens.LA(1); + + // try cheaper subset first; might get lucky. seems to shave a wee bit off + final nextTokens = recognizer.getATN().nextTokens(s); + if (nextTokens.contains(la)) { + // We are sure the token matches + nextTokensContext = null; + nextTokensState = ATNState.INVALID_STATE_NUMBER; + return; + } + + if (nextTokens.contains(Token.EPSILON)) { + if (nextTokensContext == null) { + // It's possible the next token won't match; information tracked + // by sync is restricted for performance. + nextTokensContext = recognizer.context; + nextTokensState = recognizer.state; + } + return; + } + + switch (s.stateType) { + case StateType.BLOCK_START: + case StateType.STAR_BLOCK_START: + case StateType.PLUS_BLOCK_START: + case StateType.STAR_LOOP_ENTRY: + // report error and recover if possible + if (singleTokenDeletion(recognizer) != null) { + return; + } + + throw InputMismatchException(recognizer); + + case StateType.PLUS_LOOP_BACK: + case StateType.STAR_LOOP_BACK: +// log("at loop back: "+s.getClass().getSimpleName(), level: Level.SEVERE.value); + reportUnwantedToken(recognizer); + final expecting = recognizer.expectedTokens; + final whatFollowsLoopIterationOrRule = + expecting | getErrorRecoverySet(recognizer); + consumeUntil(recognizer, whatFollowsLoopIterationOrRule); + break; + + default: + // do nothing if we can't identify the exact kind of ATN state + break; + } + } + + /// This is called by {@link #reportError} when the exception is a + /// [NoViableAltException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportNoViableAlternative(Parser recognizer, NoViableAltException e) { + final tokens = recognizer.inputStream; + String input; + if (tokens != null) { + if (e.startToken.type == Token.EOF) { + input = ''; + } else { + input = tokens.getTextRange(e.startToken, e.offendingToken); + } + } else { + input = ''; + } + final msg = 'no viable alternative at input ' + escapeWSAndQuote(input); + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This is called by {@link #reportError} when the exception is an + /// [InputMismatchException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportInputMismatch(Parser recognizer, InputMismatchException e) { + final msg = 'mismatched input ' + + getTokenErrorDisplay(e.offendingToken) + + ' expecting ' + + e.expectedTokens.toString(vocabulary: recognizer.vocabulary); + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This is called by {@link #reportError} when the exception is a + /// [FailedPredicateException]. + /// + /// @see #reportError + /// + /// @param recognizer the parser instance + /// @param e the recognition exception + void reportFailedPredicate(Parser recognizer, FailedPredicateException e) { + final ruleName = + recognizer.ruleNames[recognizer.context.ruleIndex]; + final msg = 'rule ' + ruleName + ' ' + e.message; + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /// This method is called to report a syntax error which requires the removal + /// of a token from the input stream. At the time this method is called, the + /// erroneous symbol is current {@code LT(1)} symbol and has not yet been + /// removed from the input stream. When this method returns, + /// [recognizer] is in error recovery mode. + /// + ///

This method is called when {@link #singleTokenDeletion} identifies + /// single-token deletion as a viable recovery strategy for a mismatched + /// input error.

+ /// + ///

The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// enter error recovery mode, followed by calling + /// {@link Parser#notifyErrorListeners}.

+ /// + /// @param recognizer the parser instance + void reportUnwantedToken(Parser recognizer) { + if (inErrorRecoveryMode(recognizer)) { + return; + } + + beginErrorCondition(recognizer); + + final t = recognizer.currentToken; + final tokenName = getTokenErrorDisplay(t); + final expecting = getExpectedTokens(recognizer); + final msg = 'extraneous input ' + + tokenName + + ' expecting ' + + expecting.toString(vocabulary: recognizer.vocabulary); + recognizer.notifyErrorListeners(msg, t, null); + } + + /// This method is called to report a syntax error which requires the + /// insertion of a missing token into the input stream. At the time this + /// method is called, the missing token has not yet been inserted. When this + /// method returns, [recognizer] is in error recovery mode. + /// + ///

This method is called when {@link #singleTokenInsertion} identifies + /// single-token insertion as a viable recovery strategy for a mismatched + /// input error.

+ /// + ///

The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// enter error recovery mode, followed by calling + /// {@link Parser#notifyErrorListeners}.

+ /// + /// @param recognizer the parser instance + void reportMissingToken(Parser recognizer) { + if (inErrorRecoveryMode(recognizer)) { + return; + } + + beginErrorCondition(recognizer); + + final t = recognizer.currentToken; + final expecting = getExpectedTokens(recognizer); + final msg = 'missing ' + + expecting.toString(vocabulary: recognizer.vocabulary) + + ' at ' + + getTokenErrorDisplay(t); + + recognizer.notifyErrorListeners(msg, t, null); + } + + /// {@inheritDoc} + /// + ///

The default implementation attempts to recover from the mismatched input + /// by using single token insertion and deletion as described below. If the + /// recovery attempt fails, this method throws an + /// [InputMismatchException].

+ /// + ///

EXTRA TOKEN (single token deletion)

+ /// + ///

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + /// right token, however, then assume {@code LA(1)} is some extra spurious + /// token and delete it. Then consume and return the next token (which was + /// the {@code LA(2)} token) as the successful result of the match operation.

+ /// + ///

This recovery strategy is implemented by {@link #singleTokenDeletion}.

+ /// + ///

MISSING TOKEN (single token insertion)

+ /// + ///

If current token (at {@code LA(1)}) is consistent with what could come + /// after the expected {@code LA(1)} token, then assume the token is missing + /// and use the parser's [TokenFactory] to create it on the fly. The + /// "insertion" is performed by returning the created token as the successful + /// result of the match operation.

+ /// + ///

This recovery strategy is implemented by {@link #singleTokenInsertion}.

+ /// + ///

EXAMPLE

+ /// + ///

For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + /// the parser returns from the nested call to [expr], it will have + /// call chain:

+ /// + ///
+  /// stat → expr → atom
+  /// 
+ /// + /// and it will be trying to match the {@code ')'} at this point in the + /// derivation: + /// + ///
+  /// => ID '=' '(' INT ')' ('+' atom)* ';'
+  ///                    ^
+  /// 
+ /// + /// The attempt to match {@code ')'} will fail when it sees {@code ';'} and + /// call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'} + /// is in the set of tokens that can follow the {@code ')'} token reference + /// in rule [atom]. It can assume that you forgot the {@code ')'}. + + @override + Token recoverInline(Parser recognizer) { +// SINGLE TOKEN DELETION + final matchedSymbol = singleTokenDeletion(recognizer); + if (matchedSymbol != null) { +// we have deleted the extra token. +// now, move past ttype token as if all were ok + recognizer.consume(); + return matchedSymbol; + } + +// SINGLE TOKEN INSERTION + if (singleTokenInsertion(recognizer)) { + return getMissingSymbol(recognizer); + } + +// even that didn't work; must throw the exception + InputMismatchException e; + if (nextTokensContext == null) { + e = InputMismatchException(recognizer); + } else { + e = InputMismatchException( + recognizer, nextTokensState, nextTokensContext); + } + + throw e; + } + + /// This method implements the single-token insertion inline error recovery + /// strategy. It is called by {@link #recoverInline} if the single-token + /// deletion strategy fails to recover from the mismatched input. If this + /// method returns [true], [recognizer] will be in error recovery + /// mode. + /// + ///

This method determines whether or not single-token insertion is viable by + /// checking if the {@code LA(1)} input symbol could be successfully matched + /// if it were instead the {@code LA(2)} symbol. If this method returns + /// [true], the caller is responsible for creating and inserting a + /// token with the correct type to produce this behavior.

+ /// + /// @param recognizer the parser instance + /// @return [true] if single-token insertion is a viable recovery + /// strategy for the current mismatched input, otherwise [false] + bool singleTokenInsertion(Parser recognizer) { + final currentSymbolType = recognizer.inputStream.LA(1); + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token; error recovery + // is free to conjure up and insert the missing token + final currentState = + recognizer.interpreter.atn.states[recognizer.state]; + final next = currentState.transition(0).target; + final atn = recognizer.interpreter.atn; + final expectingAtLL2 = atn.nextTokens(next, recognizer.context); +// System.out.println("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames())); + if (expectingAtLL2.contains(currentSymbolType)) { + reportMissingToken(recognizer); + return true; + } + return false; + } + + /// This method implements the single-token deletion inline error recovery + /// strategy. It is called by {@link #recoverInline} to attempt to recover + /// from mismatched input. If this method returns null, the parser and error + /// handler state will not have changed. If this method returns non-null, + /// [recognizer] will not be in error recovery mode since the + /// returned token was a successful match. + /// + ///

If the single-token deletion is successful, this method calls + /// {@link #reportUnwantedToken} to report the error, followed by + /// {@link Parser#consume} to actually "delete" the extraneous token. Then, + /// before returning {@link #reportMatch} is called to signal a successful + /// match.

+ /// + /// @param recognizer the parser instance + /// @return the successfully matched [Token] instance if single-token + /// deletion successfully recovers from the mismatched input, otherwise + /// null + Token singleTokenDeletion(Parser recognizer) { + final nextTokenType = recognizer.inputStream.LA(2); + final expecting = getExpectedTokens(recognizer); + if (expecting.contains(nextTokenType)) { + reportUnwantedToken(recognizer); + /* + log("recoverFromMismatchedToken deleting , level: Level.SEVERE.value"+ + ((TokenStream)recognizer.inputStream).LT(1)+ + " since "+((TokenStream)recognizer.inputStream).LT(2)+ + " is what we want"); + */ + recognizer.consume(); // simply delete extra token + // we want to return the token we're actually matching + final matchedSymbol = recognizer.currentToken; + reportMatch(recognizer); // we know current token is correct + return matchedSymbol; + } + return null; + } + + /// Conjure up a missing token during error recovery. + /// + /// The recognizer attempts to recover from single missing + /// symbols. But, actions might refer to that missing symbol. + /// For example, x=ID {f($x);}. The action clearly assumes + /// that there has been an identifier matched previously and that + /// $x points at that token. If that token is missing, but + /// the next token in the stream is what we want we assume that + /// this token is missing and we keep going. Because we + /// have to return some token to replace the missing token, + /// we have to conjure one up. This method gives the user control + /// over the tokens returned for missing tokens. Mostly, + /// you will want to create something special for identifier + /// tokens. For literals such as '{' and ',', the default + /// action in the parser or tree parser works. It simply creates + /// a CommonToken of the appropriate type. The text will be the token. + /// If you change what tokens must be created by the lexer, + /// override this method to create the appropriate tokens. + Token getMissingSymbol(Parser recognizer) { + final currentSymbol = recognizer.currentToken; + final expecting = getExpectedTokens(recognizer); + var expectedTokenType = Token.INVALID_TYPE; + if (!expecting.isNil) { + expectedTokenType = expecting.minElement; // get any element + } + String tokenText; + if (expectedTokenType == Token.EOF) { + tokenText = ''; + } else { + tokenText = ''; + } + var current = currentSymbol; + final lookback = recognizer.inputStream.LT(-1); + if (current.type == Token.EOF && lookback != null) { + current = lookback; + } + return recognizer.tokenFactory.create( + expectedTokenType, + tokenText, + Pair(current.tokenSource, current.tokenSource.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + current.line, + current.charPositionInLine); + } + + IntervalSet getExpectedTokens(Parser recognizer) { + return recognizer.expectedTokens; + } + + /// How should a token be displayed in an error message? The default + /// is to display just the text, but during development you might + /// want to have a lot of information spit out. Override in that case + /// to use t.toString() (which, for CommonToken, dumps everything about + /// the token). This is better than forcing you to override a method in + /// your token objects because you don't have to go modify your lexer + /// so that it creates a new Java type. + String getTokenErrorDisplay(Token t) { + if (t == null) return ''; + var s = getSymbolText(t); + if (s == null) { + if (getSymbolType(t) == Token.EOF) { + s = ''; + } else { + s = '<${getSymbolType(t)}>'; + } + } + return escapeWSAndQuote(s); + } + + String getSymbolText(Token symbol) { + return symbol.text; + } + + int getSymbolType(Token symbol) { + return symbol.type; + } + + String escapeWSAndQuote(String s) { +// if ( s==null ) return s; + s = s.replaceAll('\n', r'\n'); + s = s.replaceAll('\r', r'\r'); + s = s.replaceAll('\t', r'\t'); + return "'" + s + "'"; + } + +/* Compute the error recovery set for the current rule. During + * rule invocation, the parser pushes the set of tokens that can + * follow that rule reference on the stack; this amounts to + * computing FIRST of what follows the rule reference in the + * enclosing rule. See LinearApproximator.FIRST(). + * This local follow set only includes tokens + * from within the rule; i.e., the FIRST computation done by + * ANTLR stops at the end of a rule. + * + * EXAMPLE + * + * When you find a "no viable alt exception", the input is not + * consistent with any of the alternatives for rule r. The best + * thing to do is to consume tokens until you see something that + * can legally follow a call to r *or* any rule that called r. + * You don't want the exact set of viable next tokens because the + * input might just be missing a token--you might consume the + * rest of the input looking for one of the missing tokens. + * + * Consider grammar: + * + * a : '[' b ']' + * | '(' b ')' + * ; + * b : c '^' INT ; + * c : ID + * | INT + * ; + * + * At each rule invocation, the set of tokens that could follow + * that rule is pushed on a stack. Here are the various + * context-sensitive follow sets: + * + * FOLLOW(b1_in_a) = FIRST(']') = ']' + * FOLLOW(b2_in_a) = FIRST(')') = ')' + * FOLLOW(c_in_b) = FIRST('^') = '^' + * + * Upon erroneous input "[]", the call chain is + * + * a -> b -> c + * + * and, hence, the follow context stack is: + * + * depth follow set start of rule execution + * 0 a (from main()) + * 1 ']' b + * 2 '^' c + * + * Notice that ')' is not included, because b would have to have + * been called from a different context in rule a for ')' to be + * included. + * + * For error recovery, we cannot consider FOLLOW(c) + * (context-sensitive or otherwise). We need the combined set of + * all context-sensitive FOLLOW sets--the set of all tokens that + * could follow any reference in the call chain. We need to + * resync to one of those tokens. Note that FOLLOW(c)='^' and if + * we resync'd to that token, we'd consume until EOF. We need to + * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. + * In this case, for input "[]", LA(1) is ']' and in the set, so we would + * not consume anything. After printing an error, rule c would + * return normally. Rule b would not find the required '^' though. + * At this point, it gets a mismatched token error and throws an + * exception (since LA(1) is not in the viable following token + * set). The rule exception handler tries to recover, but finds + * the same recovery set and doesn't consume anything. Rule b + * exits normally returning to rule a. Now it finds the ']' (and + * with the successful match exits errorRecovery mode). + * + * So, you can see that the parser walks up the call chain looking + * for the token that was a member of the recovery set. + * + * Errors are not generated in errorRecovery mode. + * + * ANTLR's error recovery mechanism is based upon original ideas: + * + * "Algorithms + Data Structures = Programs" by Niklaus Wirth + * + * and + * + * "A note on error recovery in recursive descent parsers": + * http://portal.acm.org/citation.cfm?id=947902.947905 + * + * Later, Josef Grosch had some good ideas: + * + * "Efficient and Comfortable Error Recovery in Recursive Descent + * Parsers": + * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip + * + * Like Grosch I implement context-sensitive FOLLOW sets that are combined + * at run-time upon error to avoid overhead during parsing. + */ + IntervalSet getErrorRecoverySet(Parser recognizer) { + final atn = recognizer.interpreter.atn; + RuleContext ctx = recognizer.context; + final recoverSet = IntervalSet(); + while (ctx != null && ctx.invokingState >= 0) { + // compute what follows who invoked us + final invokingState = atn.states[ctx.invokingState]; + RuleTransition rt = invokingState.transition(0); + final follow = atn.nextTokens(rt.followState); + recoverSet.addAll(follow); + ctx = ctx.parent; + } + recoverSet.remove(Token.EPSILON); +// System.out.println("recover set "+recoverSet.toString(recognizer.getTokenNames())); + return recoverSet; + } + + /// Consume tokens until one matches the given token set. */ + void consumeUntil(Parser recognizer, IntervalSet set) { +// log("consumeUntil("+set.toString(recognizer.getTokenNames())+")", level: Level.SEVERE.value); + var ttype = recognizer.inputStream.LA(1); + while (ttype != Token.EOF && !set.contains(ttype)) { + //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]); +// recognizer.inputStream.consume(); + recognizer.consume(); + ttype = recognizer.inputStream.LA(1); + } + } +} + +/// This implementation of [ANTLRErrorStrategy] responds to syntax errors +/// by immediately canceling the parse operation with a +/// [ParseCancellationException]. The implementation ensures that the +/// {@link ParserRuleContext#exception} field is set for all parse tree nodes +/// that were not completed prior to encountering the error. +/// +///

+/// This error strategy is useful in the following scenarios.

+/// +///
    +///
  • Two-stage parsing: This error strategy allows the first +/// stage of two-stage parsing to immediately terminate if an error is +/// encountered, and immediately fall back to the second stage. In addition to +/// avoiding wasted work by attempting to recover from errors here, the empty +/// implementation of {@link BailErrorStrategy#sync} improves the performance of +/// the first stage.
  • +///
  • Silent validation: When syntax errors are not being +/// reported or logged, and the parse result is simply ignored if errors occur, +/// the [BailErrorStrategy] avoids wasting work on recovering from errors +/// when the result will be ignored either way.
  • +///
+/// +///

+/// {@code myparser.setErrorHandler(new BailErrorStrategy());}

+/// +/// @see Parser#setErrorHandler(ANTLRErrorStrategy) +class BailErrorStrategy extends DefaultErrorStrategy { + /// Instead of recovering from exception [e], re-throw it wrapped + /// in a [ParseCancellationException] so it is not caught by the + /// rule function catches. Use {@link Exception#getCause()} to get the + /// original [RecognitionException]. + + @override + void recover(Parser recognizer, RecognitionException e) { + for (var context = recognizer.context; + context != null; + context = context.parent) { + context.exception = e; + } + + throw ParseCancellationException(e.message); + } + + /// Make sure we don't attempt to recover inline; if the parser + /// successfully recovers, it won't throw an exception. + + @override + Token recoverInline(Parser recognizer) { + final e = InputMismatchException(recognizer); + for (var context = recognizer.context; + context != null; + context = context.parent) { + context.exception = e; + } + + throw ParseCancellationException(e.message); + } + + /// Make sure we don't attempt to recover from problems in subrules. */ + + @override + void sync(Parser recognizer) {} +} diff --git a/runtime/Dart/lib/src/error/src/errors.dart b/runtime/Dart/lib/src/error/src/errors.dart new file mode 100644 index 000000000..653905ec6 --- /dev/null +++ b/runtime/Dart/lib/src/error/src/errors.dart @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../atn/atn.dart'; +import '../../input_stream.dart'; +import '../../interval_set.dart'; +import '../../lexer.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../recognizer.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../token_stream.dart'; +import '../../util/utils.dart'; + +/// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +/// 3 kinds of errors: prediction errors, failed predicate errors, and +/// mismatched input errors. In each case, the parser knows where it is +/// in the input, where it is in the ATN, the rule invocation stack, +/// and what kind of problem occurred. +class RecognitionException extends StateError { + /// Gets the [Recognizer] where this exception occurred. + /// + ///

If the recognizer is not available, this method returns null.

+ /// + /// @return The recognizer where this exception occurred, or null if + /// the recognizer is not available. + final Recognizer recognizer; + + /// Gets the [RuleContext] at the time this exception was thrown. + /// + ///

If the context is not available, this method returns null.

+ /// + /// @return The [RuleContext] at the time this exception was thrown. + /// If the context is not available, this method returns null. + final RuleContext ctx; + + /// Gets the input stream which is the symbol source for the recognizer where + /// this exception was thrown. + /// + ///

If the input stream is not available, this method returns null.

+ /// + /// @return The input stream which is the symbol source for the recognizer + /// where this exception was thrown, or null if the stream is not + /// available. + final IntStream inputStream; + + /// The current [Token] when an error occurred. Since not all streams + /// support accessing symbols by index, we have to track the [Token] + /// instance itself. + Token offendingToken; + + /// Get the ATN state number the parser was in at the time the error + /// occurred. For [NoViableAltException] and + /// [LexerNoViableAltException] exceptions, this is the + /// [DecisionState] number. For others, it is the state whose outgoing + /// edge we couldn't match. + /// + ///

If the state number is not known, this method returns -1.

+ int offendingState = -1; + + RecognitionException(this.recognizer, this.inputStream, this.ctx, + [String message = '']) + : super(message) { + if (recognizer != null) offendingState = recognizer.state; + } + + /// Gets the set of input symbols which could potentially follow the + /// previously matched symbol at the time this exception was thrown. + /// + ///

If the set of expected tokens is not known and could not be computed, + /// this method returns null.

+ /// + /// @return The set of token types that could potentially follow the current + /// state in the ATN, or null if the information is not available. + IntervalSet get expectedTokens { + if (recognizer != null) { + return recognizer.getATN().getExpectedTokens(offendingState, ctx); + } + return null; + } +} + +class LexerNoViableAltException extends RecognitionException { + /// Matching attempted at what input index? */ + final int startIndex; + + /// Which configurations did we try at input.index() that couldn't match input.LA(1)? */ + final ATNConfigSet deadEndConfigs; + + LexerNoViableAltException( + Lexer lexer, CharStream input, this.startIndex, this.deadEndConfigs) + : super(lexer, input, null); + + @override + CharStream get inputStream { + return super.inputStream; + } + + @override + String toString() { + var symbol = ''; + if (startIndex >= 0 && startIndex < inputStream.size) { + symbol = inputStream.getText(Interval.of(startIndex, startIndex)); + symbol = escapeWhitespace(symbol); + } + + return "${LexerNoViableAltException}('${symbol}')"; + } +} + +/// Indicates that the parser could not decide which of two or more paths +/// to take based upon the remaining input. It tracks the starting token +/// of the offending input and also knows where the parser was +/// in the various paths when the error. Reported by reportNoViableAlternative() +class NoViableAltException extends RecognitionException { + /// Which configurations did we try at input.index() that couldn't match input.LT(1)? */ + + final ATNConfigSet deadEndConfigs; + + /// The token object at the start index; the input stream might + /// not be buffering tokens so get a reference to it. (At the + /// time the error occurred, of course the stream needs to keep a + /// buffer all of the tokens but later we might not have access to those.) + + final Token startToken; + +// NoViableAltException(Parser recognizer) { // LL(1) error +// this(recognizer, +// recognizer.inputStream, +// recognizer.getCurrentToken(), +// recognizer.getCurrentToken(), +// null, +// recognizer._ctx); +// } + + NoViableAltException._(Parser recognizer, TokenStream input, this.startToken, + Token offendingToken, this.deadEndConfigs, ParserRuleContext ctx) + : super(recognizer, input, ctx) { + this.offendingToken = offendingToken; + } + + NoViableAltException(Parser recognizer, + [TokenStream input, + Token startToken, + Token offendingToken, + ATNConfigSet deadEndConfigs, + ParserRuleContext ctx]) + : this._( + recognizer, + input ?? recognizer.inputStream, + startToken ?? recognizer.currentToken, + offendingToken ?? recognizer.currentToken, + deadEndConfigs, + ctx ?? recognizer.context); +} + +/// This signifies any kind of mismatched input exceptions such as +/// when the current input does not match the expected token. +class InputMismatchException extends RecognitionException { + InputMismatchException(Parser recognizer, + [int state = -1, ParserRuleContext ctx]) + : super(recognizer, recognizer.inputStream, ctx ?? recognizer.context) { + if (state != -1 && ctx != null) { + offendingState = state; + } + offendingToken = recognizer.currentToken; + } +} + +/// A semantic predicate failed during validation. Validation of predicates +/// occurs when normally parsing the alternative just like matching a token. +/// Disambiguating predicate evaluation occurs when we test a predicate during +/// prediction. +class FailedPredicateException extends RecognitionException { + int ruleIndex; + int predIndex; + final String predicate; + + FailedPredicateException(Parser recognizer, + [this.predicate, String message]) + : super(recognizer, recognizer.inputStream, recognizer.context, + formatMessage(predicate, message)) { + final s = recognizer.interpreter.atn.states[recognizer.state]; + + AbstractPredicateTransition trans = s.transition(0); + if (trans is PredicateTransition) { + ruleIndex = trans.ruleIndex; + predIndex = trans.predIndex; + } + offendingToken = recognizer.currentToken; + } + + static String formatMessage(String predicate, String message) { + if (message != null) { + return message; + } + + return 'failed predicate: {$predicate}?'; + } +} diff --git a/runtime/Dart/lib/src/input_stream.dart b/runtime/Dart/lib/src/input_stream.dart new file mode 100644 index 000000000..63dbd6089 --- /dev/null +++ b/runtime/Dart/lib/src/input_stream.dart @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; +import 'dart:math'; + +import 'interval_set.dart'; +import 'token.dart'; + +/// A simple stream of symbols whose values are represented as integers. This +/// interface provides marked ranges with support for a minimum level +/// of buffering necessary to implement arbitrary lookahead during prediction. +/// For more information on marked ranges, see {@link #mark}. +/// +///

Initializing Methods: Some methods in this interface have +/// unspecified behavior if no call to an initializing method has occurred after +/// the stream was constructed. The following is a list of initializing methods:

+/// +///
    +///
  • {@link #LA}
  • +///
  • {@link #consume}
  • +///
  • {@link #size}
  • +///
+abstract class IntStream { + /// The value returned by {@link #LA LA()} when the end of the stream is + /// reached. + static const int EOF = -1; + + /// The value returned by {@link #getSourceName} when the actual name of the + /// underlying source is not known. + static const UNKNOWN_SOURCE_NAME = ''; + + /// Consumes the current symbol in the stream. This method has the following + /// effects: + /// + ///
    + ///
  • Forward movement: The value of {@link #index index()} + /// before calling this method is less than the value of {@code index()} + /// after calling this method.
  • + ///
  • Ordered lookahead: The value of {@code LA(1)} before + /// calling this method becomes the value of {@code LA(-1)} after calling + /// this method.
  • + ///
+ /// + /// Note that calling this method does not guarantee that {@code index()} is + /// incremented by exactly 1, as that would preclude the ability to implement + /// filtering streams (e.g. [CommonTokenStream] which distinguishes + /// between "on-channel" and "off-channel" tokens). + /// + /// @throws IllegalStateException if an attempt is made to consume the + /// end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling + /// [consume]). + void consume(); + + /// Gets the value of the symbol at offset [i] from the current + /// position. When {@code i==1}, this method returns the value of the current + /// symbol in the stream (which is the next symbol to be consumed). When + /// {@code i==-1}, this method returns the value of the previously read + /// symbol in the stream. It is not valid to call this method with + /// {@code i==0}, but the specific behavior is unspecified because this + /// method is frequently called from performance-critical code. + /// + ///

This method is guaranteed to succeed if any of the following are true:

+ /// + ///
    + ///
  • {@code i>0}
  • + ///
  • {@code i==-1} and {@link #index index()} returns a value greater + /// than the value of {@code index()} after the stream was constructed + /// and {@code LA(1)} was called in that order. Specifying the current + /// {@code index()} relative to the index after the stream was created + /// allows for filtering implementations that do not return every symbol + /// from the underlying source. Specifying the call to {@code LA(1)} + /// allows for lazily initialized streams.
  • + ///
  • {@code LA(i)} refers to a symbol consumed within a marked region + /// that has not yet been released.
  • + ///
+ /// + ///

If [i] represents a position at or beyond the end of the stream, + /// this method returns {@link #EOF}.

+ /// + ///

The return value is unspecified if {@code i<0} and fewer than {@code -i} + /// calls to {@link #consume consume()} have occurred from the beginning of + /// the stream before calling this method.

+ /// + /// @throws UnsupportedOperationException if the stream does not support + /// retrieving the value of the specified symbol + int LA(int i); + + /// A mark provides a guarantee that {@link #seek seek()} operations will be + /// valid over a "marked range" extending from the index where {@code mark()} + /// was called to the current {@link #index index()}. This allows the use of + /// streaming input sources by specifying the minimum buffering requirements + /// to support arbitrary lookahead during prediction. + /// + ///

The returned mark is an opaque handle (type [int]) which is passed + /// to {@link #release release()} when the guarantees provided by the marked + /// range are no longer necessary. When calls to + /// {@code mark()}/{@code release()} are nested, the marks must be released + /// in reverse order of which they were obtained. Since marked regions are + /// used during performance-critical sections of prediction, the specific + /// behavior of invalid usage is unspecified (i.e. a mark is not released, or + /// a mark is released twice, or marks are not released in reverse order from + /// which they were created).

+ /// + ///

The behavior of this method is unspecified if no call to an + /// {@link IntStream initializing method} has occurred after this stream was + /// constructed.

+ /// + ///

This method does not change the current position in the input stream.

+ /// + ///

The following example shows the use of {@link #mark mark()}, + /// {@link #release release(mark)}, {@link #index index()}, and + /// {@link #seek seek(index)} as part of an operation to safely work within a + /// marked region, then restore the stream position to its original value and + /// release the mark.

+ ///
+  /// IntStream stream = ...;
+  /// int index = -1;
+  /// int mark = stream.mark();
+  /// try {
+  ///   index = stream.index();
+  ///   // perform work here...
+  /// } finally {
+  ///   if (index != -1) {
+  ///     stream.seek(index);
+  ///   }
+  ///   stream.release(mark);
+  /// }
+  /// 
+ /// + /// @return An opaque marker which should be passed to + /// {@link #release release()} when the marked range is no longer required. + int mark(); + + /// This method releases a marked range created by a call to + /// {@link #mark mark()}. Calls to {@code release()} must appear in the + /// reverse order of the corresponding calls to {@code mark()}. If a mark is + /// released twice, or if marks are not released in reverse order of the + /// corresponding calls to {@code mark()}, the behavior is unspecified. + /// + ///

For more information and an example, see {@link #mark}.

+ /// + /// @param marker A marker returned by a call to {@code mark()}. + /// @see #mark + void release(int marker); + + /// Return the index into the stream of the input symbol referred to by + /// {@code LA(1)}. + /// + ///

The behavior of this method is unspecified if no call to an + /// {@link IntStream initializing method} has occurred after this stream was + /// constructed.

+ int get index; + + /// Set the input cursor to the position indicated by [index]. If the + /// specified index lies past the end of the stream, the operation behaves as + /// though [index] was the index of the EOF symbol. After this method + /// returns without throwing an exception, then at least one of the following + /// will be true. + /// + ///
    + ///
  • {@link #index index()} will return the index of the first symbol + /// appearing at or after the specified [index]. Specifically, + /// implementations which filter their sources should automatically + /// adjust [index] forward the minimum amount required for the + /// operation to target a non-ignored symbol.
  • + ///
  • {@code LA(1)} returns {@link #EOF}
  • + ///
+ /// + /// This operation is guaranteed to not throw an exception if [index] + /// lies within a marked region. For more information on marked regions, see + /// {@link #mark}. The behavior of this method is unspecified if no call to + /// an {@link IntStream initializing method} has occurred after this stream + /// was constructed. + /// + /// @param index The absolute index to seek to. + /// + /// @throws IllegalArgumentException if [index] is less than 0 + /// @throws UnsupportedOperationException if the stream does not support + /// seeking to the specified index + void seek(int index); + + /// Returns the total number of symbols in the stream, including a single EOF + /// symbol. + /// + /// @throws UnsupportedOperationException if the size of the stream is + /// unknown. + int get size; + + /// Gets the name of the underlying symbol source. This method returns a + /// non-null, non-empty string. If such a name is not known, this method + /// returns {@link #UNKNOWN_SOURCE_NAME}. + + String get sourceName; +} + +/// A source of characters for an ANTLR lexer. */ +abstract class CharStream extends IntStream { + /// This method returns the text for a range of characters within this input + /// stream. This method is guaranteed to not throw an exception if the + /// specified [interval] lies entirely within a marked range. For more + /// information about marked ranges, see {@link IntStream#mark}. + /// + /// @param interval an interval within the stream + /// @return the text of the specified interval + /// + /// @throws NullPointerException if [interval] is null + /// @throws IllegalArgumentException if {@code interval.a < 0}, or if + /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or + /// past the end of the stream + /// @throws UnsupportedOperationException if the stream does not support + /// getting the text of the specified interval + String getText(Interval interval); +} + +// Vacuum all input from a string and then treat it like a buffer. +class InputStream extends CharStream { + final name = ''; + List data; + int _index = 0; + bool decodeToUnicodeCodePoints = false; + + InputStream(List data) { + this.data = data; + } + + InputStream.fromString(String data) { + this.data = data.runes.toList(growable: false); + } + + static Future fromStringStream(Stream stream) async { + final data = StringBuffer(); + await stream.listen((buf) { + data.write(buf); + }).asFuture(); + return InputStream.fromString(data.toString()); + } + + static Future fromStream(Stream> stream, + {Encoding encoding = utf8}) { + final data = stream.transform(encoding.decoder); + return fromStringStream(data); + } + + static Future fromPath(String path, {Encoding encoding = utf8}) { + return fromStream(File(path).openRead()); + } + + @override + int get index { + return _index; + } + + @override + int get size { + return data.length; + } + + /// Reset the stream so that it's in the same state it was + /// when the object was created *except* the data array is not + /// touched. + void reset() { + _index = 0; + } + + @override + void consume() { + if (_index >= size) { + // assert this.LA(1) == Token.EOF + throw ('cannot consume EOF'); + } + _index += 1; + } + + @override + int LA(int offset) { + if (offset == 0) { + return 0; // undefined + } + if (offset < 0) { + offset += 1; // e.g., translate LA(-1) to use offset=0 + } + final pos = _index + offset - 1; + if (pos < 0 || pos >= size) { + // invalid + return Token.EOF; + } + return data[pos]; + } + + /// mark/release do nothing; we have entire buffer + @override + int mark() { + return -1; + } + + @override + void release(int marker) {} + + /// consume() ahead until p==_index; can't just set p=_index as we must + /// update line and column. If we seek backwards, just set p + @override + void seek(int _index) { + if (_index <= this._index) { + this._index = _index; // just jump; don't update stream state (line, + // ...) + return; + } + // seek forward + this._index = min(_index, size); + } + + @override + String getText(Interval interval) { + final startIdx = min(interval.a, size); + final len = min(interval.b - interval.a + 1, size - startIdx); + return String.fromCharCodes(data, startIdx, startIdx + len); + } + + @override + String toString() { + return String.fromCharCodes(data); + } + + @override + String get sourceName { + // TODO: implement getSourceName + return IntStream.UNKNOWN_SOURCE_NAME; + } +} diff --git a/runtime/Dart/lib/src/interval_set.dart b/runtime/Dart/lib/src/interval_set.dart new file mode 100644 index 000000000..09cdba533 --- /dev/null +++ b/runtime/Dart/lib/src/interval_set.dart @@ -0,0 +1,735 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import 'package:collection/collection.dart'; + +import 'lexer.dart'; +import 'token.dart'; +import 'util/murmur_hash.dart'; +import 'vocabulary.dart'; + +/// An immutable inclusive interval a..b */ +class Interval { + static final int INTERVAL_POOL_MAX_VALUE = 1000; + + static final Interval INVALID = Interval(-1, -2); + + static List cache = List(INTERVAL_POOL_MAX_VALUE + 1); + + int a; + int b; + + static int creates = 0; + static int misses = 0; + static int hits = 0; + static int outOfRange = 0; + + Interval(this.a, this.b); + + /// Interval objects are used readonly so share all with the + /// same single value a==b up to some max size. Use an array as a perfect hash. + /// Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new + /// Interval object with a..a in it. On Java.g4, 218623 IntervalSets + /// have a..a (set with 1 element). + static Interval of(int a, int b) { + // cache just a..a + if (a != b || a < 0 || a > INTERVAL_POOL_MAX_VALUE) { + return Interval(a, b); + } + if (cache[a] == null) { + cache[a] = Interval(a, a); + } + return cache[a]; + } + + /// return number of elements between a and b inclusively. x..x is length 1. + /// if b < a, then length is 0. 9..10 has length 2. + int get length { + if (b < a) return 0; + return b - a + 1; + } + + @override + bool operator ==(Object o) { + if (o == null || !(o is Interval)) { + return false; + } + Interval other = o; + return a == other.a && b == other.b; + } + + @override + int get hashCode { + var hash = 23; + hash = hash * 31 + a; + hash = hash * 31 + b; + return hash; + } + + /// Does this start completely before other? Disjoint */ + bool startsBeforeDisjoint(Interval other) { + return a < other.a && b < other.a; + } + + /// Does this start at or before other? Nondisjoint */ + bool startsBeforeNonDisjoint(Interval other) { + return a <= other.a && b >= other.a; + } + + /// Does this.a start after other.b? May or may not be disjoint */ + bool startsAfter(Interval other) { + return a > other.a; + } + + /// Does this start completely after other? Disjoint */ + bool startsAfterDisjoint(Interval other) { + return a > other.b; + } + + /// Does this start after other? NonDisjoint */ + bool startsAfterNonDisjoint(Interval other) { + return a > other.a && a <= other.b; // this.b>=other.b implied + } + + /// Are both ranges disjoint? I.e., no overlap? */ + bool disjoint(Interval other) { + return startsBeforeDisjoint(other) || startsAfterDisjoint(other); + } + + /// Are two intervals adjacent such as 0..41 and 42..42? */ + bool adjacent(Interval other) { + return a == other.b + 1 || b == other.a - 1; + } + + bool properlyContains(Interval other) { + return other.a >= a && other.b <= b; + } + + /// Return the interval computed from combining this and other */ + Interval union(Interval other) { + return Interval.of(min(a, other.a), max(b, other.b)); + } + + /// Return the interval in common between this and o */ + Interval intersection(Interval other) { + return Interval.of(max(a, other.a), min(b, other.b)); + } + + /// Return the interval with elements from this not in other; + /// other must not be totally enclosed (properly contained) + /// within this, which would result in two disjoint intervals + /// instead of the single one returned by this method. + Interval differenceNotProperlyContained(Interval other) { + Interval diff; + // other.a to left of this.a (or same) + if (other.startsBeforeNonDisjoint(this)) { + diff = Interval.of(max(a, other.b + 1), b); + } + + // other.a to right of this.a + else if (other.startsAfterNonDisjoint(this)) { + diff = Interval.of(a, other.a - 1); + } + return diff; + } + + @override + String toString() { + return '$a..$b'; + } +} + +/// This class implements the [IntervalSet] backed by a sorted array of +/// non-overlapping intervals. It is particularly efficient for representing +/// large collections of numbers, where the majority of elements appear as part +/// of a sequential range of numbers that are all part of the set. For example, +/// the set { 1, 2, 3, 4, 7, 8 } may be represented as { [1, 4], [7, 8] }. +/// +///

+/// This class is able to represent sets containing any combination of values in +/// the range {@link int#MIN_VALUE} to {@link int#MAX_VALUE} +/// (inclusive).

+class IntervalSet { + static final IntervalSet COMPLETE_CHAR_SET = + IntervalSet.ofRange(Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE) + ..setReadonly(true); + + static final IntervalSet EMPTY_SET = IntervalSet([])..setReadonly(true); + + /// The list of sorted, disjoint intervals. */ + List intervals = []; + + bool readonly = false; + + IntervalSet([List intervals]) { + this.intervals = intervals ?? []; + } + + IntervalSet.ofSet(IntervalSet set) { + addAll(set); + } + +// TODO +// IntervalSet(int... els) { +//if ( els==null ) { +//intervals = new ArrayList(2); // most sets are 1 or 2 elements +//} +//else { +//intervals = new ArrayList(els.length); +//for (int e : els) add(e); +//} +//} + + /// Create a set with a single element, el. */ + + IntervalSet.ofOne(int a) { + addOne(a); + } + + /// Create a set with all ints within range [a..b] (inclusive) */ + static IntervalSet ofRange(int a, int b) { + final s = IntervalSet(); + s.addRange(a, b); + return s; + } + + void clear() { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + intervals.clear(); + } + + /// Add a single element to the set. An isolated element is stored + /// as a range el..el. + + void addOne(int el) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + addRange(el, el); + } + + /// Add interval; i.e., add all integers from a to b to set. + /// If b<a, do nothing. + /// Keep list in sorted order (by left range value). + /// If overlap, combine ranges. For example, + /// If this is {1..5, 10..20}, adding 6..7 yields + /// {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}. + void addRange(int a, int b) { + add(Interval.of(a, b)); + } + + // copy on write so we can cache a..a intervals and sets of that + void add(Interval addition) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + //System.out.println("add "+addition+" to "+intervals.toString()); + if (addition.b < addition.a) { + return; + } + for (var i = 0; i < intervals.length; i++) { + final r = intervals[i]; + if (addition == r) { + return; + } + if (addition.adjacent(r) || !addition.disjoint(r)) { + // next to each other, make a single larger interval + final bigger = addition.union(r); + intervals[i] = bigger; + + // make sure we didn't just create an interval that + // should be merged with next interval in list + for (i++; i < intervals.length; i++) { + final next = intervals[i]; + if (!bigger.adjacent(next) && bigger.disjoint(next)) { + break; + } + + // if we bump up against or overlap next, merge + intervals.removeAt(i); // remove this one + intervals[i - 1] = + bigger.union(next); // set previous to 3 merged ones + } + return; + } + if (addition.startsBeforeDisjoint(r)) { + // insert before r + intervals.insert(i, addition); + return; + } + // if disjoint and after r, a future iteration will handle it + + } + // ok, must be after last interval (and disjoint from last interval) + // just add it + intervals.add(addition); + } + + /// combine all sets in the array returned the or'd value */ + static IntervalSet or(List sets) { + final r = IntervalSet(); + for (final s in sets) { + r.addAll(s); + } + return r; + } + + IntervalSet operator |(IntervalSet a) { + final o = IntervalSet(); + o.addAll(this); + o.addAll(a); + return o; + } + + IntervalSet addAll(IntervalSet set) { + if (set == null) { + return this; + } + + if (set is IntervalSet) { + final other = set; + // walk set and add each interval + final n = other.intervals.length; + for (var i = 0; i < n; i++) { + final I = other.intervals[i]; + addRange(I.a, I.b); + } + } else { + for (final value in set.toList()) { + addOne(value); + } + } + + return this; + } + + IntervalSet complementRange(int minElement, int maxElement) { + return complement(IntervalSet.ofRange(minElement, maxElement)); + } + + /// {@inheritDoc} */ + IntervalSet complement(IntervalSet vocabulary) { + if (vocabulary == null || vocabulary.isNil) { + return null; // nothing in common with null set + } + IntervalSet vocabularyIS; + if (vocabulary is IntervalSet) { + vocabularyIS = vocabulary; + } else { + vocabularyIS = IntervalSet(); + vocabularyIS.addAll(vocabulary); + } + + return vocabularyIS - this; + } + + IntervalSet operator -(IntervalSet a) { + if (a == null || a.isNil) { + return IntervalSet.ofSet(this); + } + + if (a is IntervalSet) { + return subtract(this, a); + } + + final other = IntervalSet(); + other.addAll(a); + return subtract(this, other); + } + + /// Compute the set difference between two interval sets. The specific + /// operation is {@code left - right}. If either of the input sets is + /// null, it is treated as though it was an empty set. + static IntervalSet subtract(IntervalSet left, IntervalSet right) { + if (left == null || left.isNil) { + return IntervalSet(); + } + + final result = IntervalSet.ofSet(left); + if (right == null || right.isNil) { + // right set has no elements; just return the copy of the current set + return result; + } + + var resultI = 0; + var rightI = 0; + while ( + resultI < result.intervals.length && rightI < right.intervals.length) { + final resultInterval = result.intervals[resultI]; + final rightInterval = right.intervals[rightI]; + +// operation: (resultInterval - rightInterval) and update indexes + + if (rightInterval.b < resultInterval.a) { + rightI++; + continue; + } + + if (rightInterval.a > resultInterval.b) { + resultI++; + continue; + } + + Interval beforeCurrent; + Interval afterCurrent; + if (rightInterval.a > resultInterval.a) { + beforeCurrent = Interval(resultInterval.a, rightInterval.a - 1); + } + + if (rightInterval.b < resultInterval.b) { + afterCurrent = Interval(rightInterval.b + 1, resultInterval.b); + } + + if (beforeCurrent != null) { + if (afterCurrent != null) { +// split the current interval into two + result.intervals[resultI] = beforeCurrent; + result.intervals.insert(resultI + 1, afterCurrent); + resultI++; + rightI++; + continue; + } else { +// replace the current interval + result.intervals[resultI] = beforeCurrent; + resultI++; + continue; + } + } else { + if (afterCurrent != null) { +// replace the current interval + result.intervals[resultI] = afterCurrent; + rightI++; + continue; + } else { +// remove the current interval (thus no need to increment resultI) + result.intervals.removeAt(resultI); + continue; + } + } + } + +// If rightI reached right.intervals.length, no more intervals to subtract from result. +// If resultI reached result.intervals.length, we would be subtracting from an empty set. +// Either way, we are done. + return result; + } + + /// {@inheritDoc} */ + IntervalSet operator +(IntervalSet other) { + if (other == null) { + //|| !(other is IntervalSet) ) { + return null; // nothing in common with null set + } + + final myIntervals = intervals; + final theirIntervals = (other).intervals; + IntervalSet intersection; + final mySize = myIntervals.length; + final theirSize = theirIntervals.length; + var i = 0; + var j = 0; +// iterate down both interval lists looking for nondisjoint intervals + while (i < mySize && j < theirSize) { + final mine = myIntervals[i]; + final theirs = theirIntervals[j]; +//System.out.println("mine="+mine+" and theirs="+theirs); + if (mine.startsBeforeDisjoint(theirs)) { +// move this iterator looking for interval that might overlap + i++; + } else if (theirs.startsBeforeDisjoint(mine)) { +// move other iterator looking for interval that might overlap + j++; + } else if (mine.properlyContains(theirs)) { +// overlap, add intersection, get next theirs + intersection ??= IntervalSet(); intersection.add(mine.intersection(theirs)); + j++; + } else if (theirs.properlyContains(mine)) { +// overlap, add intersection, get next mine + intersection ??= IntervalSet(); intersection.add(mine.intersection(theirs)); + i++; + } else if (!mine.disjoint(theirs)) { +// overlap, add intersection + intersection ??= IntervalSet(); intersection.add(mine.intersection(theirs)); +// Move the iterator of lower range [a..b], but not +// the upper range as it may contain elements that will collide +// with the next iterator. So, if mine=[0..115] and +// theirs=[115..200], then intersection is 115 and move mine +// but not theirs as theirs may collide with the next range +// in thisIter. +// move both iterators to next ranges + if (mine.startsAfterNonDisjoint(theirs)) { + j++; + } else if (theirs.startsAfterNonDisjoint(mine)) { + i++; + } + } + } + if (intersection == null) { + return IntervalSet(); + } + return intersection; + } + + /// {@inheritDoc} */ + + bool contains(int el) { + final n = intervals.length; + var l = 0; + var r = n - 1; +// Binary search for the element in the (sorted, +// disjoint) array of intervals. + while (l <= r) { + final m = ((l + r) / 2).floor(); + final I = intervals[m]; + final a = I.a; + final b = I.b; + if (b < el) { + l = m + 1; + } else if (a > el) { + r = m - 1; + } else { + // el >= a && el <= b + return true; + } + } + return false; + } + + /// {@inheritDoc} */ + + bool get isNil { + return intervals == null || intervals.isEmpty; + } + + /// Returns the maximum value contained in the set if not isNil(). + /// + /// @return the maximum value contained in the set. + /// @throws RuntimeException if set is empty + int get maxElement { + if (isNil) { + throw StateError('set is empty'); + } + return intervals.last.b; + } + + /// Returns the minimum value contained in the set if not isNil(). + /// + /// @return the minimum value contained in the set. + /// @throws RuntimeException if set is empty + int get minElement { + if (isNil) { + throw StateError('set is empty'); + } + + return intervals.first.a; + } + + @override + int get hashCode { + var hash = MurmurHash.initialize(); + for (final I in intervals) { + hash = MurmurHash.update(hash, I.a); + hash = MurmurHash.update(hash, I.b); + } + + hash = MurmurHash.finish(hash, intervals.length * 2); + return hash; + } + + /// Are two IntervalSets equal? Because all intervals are sorted + /// and disjoint, equals is a simple linear walk over both lists + /// to make sure they are the same. Interval.equals() is used + /// by the List.equals() method to check the ranges. + + @override + bool operator ==(Object obj) { + if (obj == null || !(obj is IntervalSet)) { + return false; + } + IntervalSet other = obj; + return ListEquality().equals(intervals, other?.intervals); + } + + @override + String toString({bool elemAreChar = false, Vocabulary vocabulary}) { + if (intervals == null || intervals.isEmpty) { + return '{}'; + } + + final elemStr = intervals.map((Interval I) { + final buf = StringBuffer(); + final a = I.a; + final b = I.b; + if (a == b) { + if (vocabulary != null) { + buf.write(elementName(vocabulary, a)); + } else { + if (a == Token.EOF) { + buf.write(''); + } else if (elemAreChar) { + buf.write("'"); + buf.writeCharCode(a); + buf.write("'"); + } else { + buf.write(a); + } + } + } else { + if (vocabulary != null) { + for (var i = a; i <= b; i++) { + if (i > a) buf.write(', '); + buf.write(elementName(vocabulary, i)); + } + } else { + if (elemAreChar) { + buf.write("'"); + buf.writeCharCode(a); + buf.write("'..'"); + buf.writeCharCode(b); + buf.write("'"); + } else { + buf.write(a); + buf.write('..'); + buf.write(b); + } + } + } + return buf; + }).join(', '); + if (length > 1) { + return '{$elemStr}'; + } + return elemStr; + } + + String elementName(Vocabulary vocabulary, int a) { + if (a == Token.EOF) { + return ''; + } else if (a == Token.EPSILON) { + return ''; + } else { + return vocabulary.getDisplayName(a); + } + } + + int get length { + var n = 0; + final numIntervals = intervals.length; + if (numIntervals == 1) { + final firstInterval = intervals[0]; + return firstInterval.b - firstInterval.a + 1; + } + for (var i = 0; i < numIntervals; i++) { + final I = intervals[i]; + n += (I.b - I.a + 1); + } + return n; + } + + List toIntegerList() { + final values = List(length); + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + values.add(v); + } + } + return values; + } + + List toList() { + final values = []; + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + values.add(v); + } + } + return values; + } + + Set toSet() { + final s = {}; + for (final I in intervals) { + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + s.add(v); + } + } + return s; + } + + /// Get the ith element of ordered set. Used only by RandomPhrase so + /// don't bother to implement if you're not doing that for a new + /// ANTLR code gen target. + int get(int i) { + final n = intervals.length; + var index = 0; + for (var j = 0; j < n; j++) { + final I = intervals[j]; + final a = I.a; + final b = I.b; + for (var v = a; v <= b; v++) { + if (index == i) { + return v; + } + index++; + } + } + return -1; + } + + void remove(int el) { + if (readonly) throw StateError("can't alter readonly IntervalSet"); + final n = intervals.length; + for (var i = 0; i < n; i++) { + final I = intervals[i]; + final a = I.a; + final b = I.b; + if (el < a) { + break; // list is sorted and el is before this interval; not here + } +// if whole interval x..x, rm + if (el == a && el == b) { + intervals.removeAt(i); + break; + } +// if on left edge x..b, adjust left + if (el == a) { + I.a++; + break; + } +// if on right edge a..x, adjust right + if (el == b) { + I.b--; + break; + } +// if in middle a..x..b, split interval + if (el > a && el < b) { + // found in this interval + final oldb = I.b; + I.b = el - 1; // [a..x-1] + addRange(el + 1, oldb); // add [x+1..b] + } + } + } + + bool isReadonly() { + return readonly; + } + + void setReadonly(bool readonly) { + if (this.readonly && !readonly) { + throw StateError("can't alter readonly IntervalSet"); + } + this.readonly = readonly; + } +} diff --git a/runtime/Dart/lib/src/lexer.dart b/runtime/Dart/lib/src/lexer.dart new file mode 100644 index 000000000..90dcc3660 --- /dev/null +++ b/runtime/Dart/lib/src/lexer.dart @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/pair.dart'; +import 'recognizer.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'token_source.dart'; +import 'util/utils.dart'; + +abstract class Lexer extends Recognizer + implements TokenSource { + static final DEFAULT_MODE = 0; + static final MORE = -2; + static final SKIP = -3; + + static final DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL; + static final HIDDEN = Token.HIDDEN_CHANNEL; + static final MIN_CHAR_VALUE = 0x0000; + static final MAX_CHAR_VALUE = 0x10FFFF; + + CharStream _input; + + Pair _tokenFactorySourcePair; + @override + TokenFactory tokenFactory = CommonTokenFactory.DEFAULT; + + /// The goal of all lexer rules/methods is to create a token object. + /// this is an instance variable as multiple rules may collaborate to + /// create a single token. nextToken will return this object after + /// matching lexer rule(s). If you subclass to allow multiple token + /// emissions, then set this to the last token to be matched or + /// something nonnull so that the auto token emit mechanism will not + /// emit another token. + Token _token; + + /// What character index in the stream did the current token start at? + /// Needed, for example, to get the text for current token. Set at + /// the start of nextToken. + int tokenStartCharIndex = -1; + + /// The line on which the first character of the token resides + int tokenStartLine = -1; + + /// The character position of first character within the line + int tokenStartCharPositionInLine = -1; + + /// Once we see EOF on char stream, next token will be EOF. + /// If you have DONE : EOF ; then you see DONE EOF. + bool _hitEOF = false; + + /// The channel number for the current token + int channel = Token.DEFAULT_CHANNEL; + + /// The token type for the current token + int type = Token.INVALID_TYPE; + + final List _modeStack = []; + int mode_ = Lexer.DEFAULT_MODE; + + /// You can set the text for the current token to override what is in + /// the input char buffer. Use setText() or can set this instance var. + String _text; + + Lexer(CharStream input) { + _input = input; + _tokenFactorySourcePair = Pair(this, input); + } + + void reset() { + // wack Lexer state variables + if (_input != null) { + _input.seek(0); // rewind the input + } + _token = null; + type = Token.INVALID_TYPE; + channel = Token.DEFAULT_CHANNEL; + tokenStartCharIndex = -1; + tokenStartCharPositionInLine = -1; + tokenStartLine = -1; + _text = null; + + _hitEOF = false; + mode_ = Lexer.DEFAULT_MODE; + _modeStack.clear(); + + interpreter.reset(); + } + + /// Return a token from this source; i.e., match a token on the char stream. + @override + Token nextToken() { + if (_input == null) { + throw StateError('nextToken requires a non-null input stream.'); + } + + // Mark start location in char stream so unbuffered streams are + // guaranteed at least have text of current token + final tokenStartMarker = _input.mark(); + try { + outer: + while (true) { + if (_hitEOF) { + emitEOF(); + return _token; + } + + _token = null; + channel = Token.DEFAULT_CHANNEL; + tokenStartCharIndex = _input.index; + tokenStartCharPositionInLine = interpreter.charPositionInLine; + tokenStartLine = interpreter.line; + _text = null; + do { + type = Token.INVALID_TYPE; +// System.out.println("nextToken line "+tokenStartLine+" at "+((char)input.LA(1))+ +// " in mode "+mode+ +// " at index "+input.index()); + int ttype; + try { + ttype = interpreter.match(_input, mode_); + } on LexerNoViableAltException catch (e) { + notifyListeners(e); // report error + recover(e); + ttype = SKIP; + } + if (_input.LA(1) == IntStream.EOF) { + _hitEOF = true; + } + if (type == Token.INVALID_TYPE) type = ttype; + if (type == SKIP) { + continue outer; + } + } while (type == MORE); + if (_token == null) emit(); + return _token; + } + } finally { + // make sure we release marker after match or + // unbuffered char stream will keep buffering + _input.release(tokenStartMarker); + } + } + + /// Instruct the lexer to skip creating a token for current lexer rule + /// and look for another token. nextToken() knows to keep looking when + /// a lexer rule finishes with token set to SKIP_TOKEN. Recall that + /// if token==null at end of any token rule, it creates one for you + /// and emits it. + void skip() { + type = Lexer.SKIP; + } + + void more() { + type = Lexer.MORE; + } + + void mode(int m) { + mode_ = m; + } + + void pushMode(int m) { + if (LexerATNSimulator.debug) { + log('pushMode $m'); + } + _modeStack.add(mode_); + mode(m); + } + + int popMode() { + if (_modeStack.isEmpty) throw StateError(''); + if (LexerATNSimulator.debug) log('popMode back to ${_modeStack.last}'); + mode(_modeStack.removeLast()); + return mode_; + } + + /// Set the char stream and reset the lexer + @override + set inputStream(IntStream input) { + _input = null; + _tokenFactorySourcePair = + Pair(this, _input); + reset(); + _input = input; + _tokenFactorySourcePair = + Pair(this, _input); + } + + @override + String get sourceName { + return _input.sourceName; + } + + @override + CharStream get inputStream { + return _input; + } + + /// By default does not support multiple emits per nextToken invocation + /// for efficiency reasons. Subclass and override this method, nextToken, + /// and getToken (to push tokens into a list and pull from that list + /// rather than a single variable as this implementation does). + void emitToken(Token token) { + //System.err.println("emit "+token); + _token = token; + } + + /// The standard method called to automatically emit a token at the + /// outermost lexical rule. The token object should point into the + /// char buffer start..stop. If there is a text override in 'text', + /// use that to set the token's text. Override this method to emit + /// custom Token objects or provide a new factory. + Token emit() { + final t = tokenFactory.create( + type, + _text, + _tokenFactorySourcePair, + channel, + tokenStartCharIndex, + charIndex - 1, + tokenStartLine, + tokenStartCharPositionInLine); + emitToken(t); + return t; + } + + Token emitEOF() { + final cpos = charPositionInLine; + final eof = tokenFactory.create(Token.EOF, null, _tokenFactorySourcePair, + Token.DEFAULT_CHANNEL, _input.index, _input.index - 1, line, cpos); + emitToken(eof); + return eof; + } + + @override + int get charPositionInLine { + return interpreter.charPositionInLine; + } + + @override + int get line { + return interpreter.line; + } + + set line(int line) { + interpreter.line = line; + } + + set charPositionInLine(int charPositionInLine) { + interpreter.charPositionInLine = charPositionInLine; + } + + /// What is the index of the current character of lookahead? + int get charIndex { + return _input.index; + } + + /// Return the text matched so far for the current token or any + /// text override. + String get text { + if (_text != null) { + return _text; + } + return interpreter.getText(_input); + } + + /// Set the complete text of this token; it wipes any previous + /// changes to the text. + set text(String text) { + _text = text; + } + + /// Override if emitting multiple tokens. + Token get token { + return _token; + } + + void setToken(Token _token) { + this._token = _token; + } + + List get channelNames => null; + + List get modeNames => null; + + /// Return a list of all Token objects in input char stream. + /// Forces load of all tokens. Does not include EOF token. + List get allTokens { + final tokens = []; + var t = nextToken(); + while (t.type != Token.EOF) { + tokens.add(t); + t = nextToken(); + } + return tokens; + } + + void notifyListeners(LexerNoViableAltException e) { + final text = + _input.getText(Interval.of(tokenStartCharIndex, _input.index)); + final msg = "token recognition error at: '" + getErrorDisplay(text) + "'"; + + final listener = errorListenerDispatch; + listener.syntaxError( + this, null, tokenStartLine, tokenStartCharPositionInLine, msg, e); + } + + String getErrorDisplay(String s) { + return escapeWhitespace(s); + } + + String getCharErrorDisplay(int c) { + final s = getErrorDisplay(String.fromCharCode(c)); + return "'$s'"; + } + + /// Lexers can normally match any char in it's vocabulary after matching + /// a token, so do the easy thing and just kill a character and hope + /// it all works out. You can instead use the rule invocation stack + /// to do sophisticated error recovery if you are in a fragment rule. + void recover(RecognitionException re) { + if (re is LexerNoViableAltException) { + if (_input.LA(1) != IntStream.EOF) { + // skip a char and try again + interpreter.consume(_input); + } + } else { + //System.out.println("consuming char "+(char)input.LA(1)+" during recovery"); + //re.printStackTrace(); + // TODO: Do we lose character or line position information? + _input.consume(); + } + } +} diff --git a/runtime/Dart/lib/src/ll1_analyzer.dart b/runtime/Dart/lib/src/ll1_analyzer.dart new file mode 100644 index 000000000..773fa8213 --- /dev/null +++ b/runtime/Dart/lib/src/ll1_analyzer.dart @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import './util/bit_set.dart'; +import 'atn/atn.dart'; +import 'interval_set.dart'; +import 'prediction_context.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'util/bit_set.dart'; + +class LL1Analyzer { + /// Special value added to the lookahead sets to indicate that we hit + /// a predicate during analysis if {@code seeThruPreds==false}. + static final int HIT_PRED = Token.INVALID_TYPE; + + final ATN atn; + + LL1Analyzer(this.atn); + + /// Calculates the SLL(1) expected lookahead set for each outgoing transition + /// of an [ATNState]. The returned array has one element for each + /// outgoing transition in [s]. If the closure from transition + /// i leads to a semantic predicate before matching a symbol, the + /// element at index i of the result will be null. + /// + /// @param s the ATN state + /// @return the expected symbols for each outgoing transition of [s]. + List getDecisionLookahead(ATNState s) { +// System.out.println("LOOK("+s.stateNumber+")"); + if (s == null) { + return null; + } + + final look = List(s.numberOfTransitions); + for (var alt = 0; alt < s.numberOfTransitions; alt++) { + look[alt] = IntervalSet(); + final lookBusy = {}; + final seeThruPreds = false; // fail to get lookahead upon pred + _LOOK(s.transition(alt).target, null, PredictionContext.EMPTY, look[alt], + lookBusy, BitSet(), seeThruPreds, false); + // Wipe out lookahead for this alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if (look[alt].length == 0 || look[alt].contains(HIT_PRED)) { + look[alt] = null; + } + } + return look; + } + + /// Compute set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + /// + ///

If [ctx] is null and the end of the rule containing + /// [s] is reached, {@link Token#EPSILON} is added to the result set. + /// If [ctx] is not null and the end of the outermost rule is + /// reached, {@link Token#EOF} is added to the result set.

+ /// + /// @param s the ATN state + /// @param stopState the ATN state to stop at. This can be a + /// [BlockEndState] to detect epsilon paths through a closure. + /// @param ctx the complete parser context, or null if the context + /// should be ignored + /// + /// @return The set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + + IntervalSet LOOK(ATNState s, RuleContext ctx, [ATNState stopState]) { + final r = IntervalSet(); + final seeThruPreds = true; // ignore preds; get all lookahead + final lookContext = + ctx != null ? PredictionContext.fromRuleContext(s.atn, ctx) : null; + _LOOK( + s, stopState, lookContext, r, {}, BitSet(), seeThruPreds, true); + return r; + } + + /// Compute set of tokens that can follow [s] in the ATN in the + /// specified [ctx]. + /// + ///

If [ctx] is null and [stopState] or the end of the + /// rule containing [s] is reached, {@link Token#EPSILON} is added to + /// the result set. If [ctx] is not null and [addEOF] is + /// [true] and [stopState] or the end of the outermost rule is + /// reached, {@link Token#EOF} is added to the result set.

+ /// + /// @param s the ATN state. + /// @param stopState the ATN state to stop at. This can be a + /// [BlockEndState] to detect epsilon paths through a closure. + /// @param ctx The outer context, or null if the outer context should + /// not be used. + /// @param look The result lookahead set. + /// @param lookBusy A set used for preventing epsilon closures in the ATN + /// from causing a stack overflow. Outside code should pass + /// {@code new HashSet} for this argument. + /// @param calledRuleStack A set used for preventing left recursion in the + /// ATN from causing a stack overflow. Outside code should pass + /// {@code new BitSet()} for this argument. + /// @param seeThruPreds [true] to true semantic predicates as + /// implicitly [true] and "see through them", otherwise [false] + /// to treat semantic predicates as opaque and add {@link #HIT_PRED} to the + /// result if one is encountered. + /// @param addEOF Add {@link Token#EOF} to the result if the end of the + /// outermost context is reached. This parameter has no effect if [ctx] + /// is null. + void _LOOK( + ATNState s, + ATNState stopState, + PredictionContext ctx, + IntervalSet look, + Set lookBusy, + BitSet calledRuleStack, + bool seeThruPreds, + bool addEOF) { +// System.out.println("_LOOK("+s.stateNumber+", ctx="+ctx); + final c = ATNConfig(s, 0, ctx); + if (!lookBusy.add(c)) return; + + if (s == stopState) { + if (ctx == null) { + look.addOne(Token.EPSILON); + return; + } else if (ctx.isEmpty && addEOF) { + look.addOne(Token.EOF); + return; + } + } + + if (s is RuleStopState) { + if (ctx == null) { + look.addOne(Token.EPSILON); + return; + } else if (ctx.isEmpty && addEOF) { + look.addOne(Token.EOF); + return; + } + + if (ctx != PredictionContext.EMPTY) { + // run thru all possible stack tops in ctx + final removed = calledRuleStack[s.ruleIndex]; + try { + calledRuleStack.clear(s.ruleIndex); + for (var i = 0; i < ctx.length; i++) { + final returnState = atn.states[ctx.getReturnState(i)]; +// System.out.println("popping back to "+retState); + _LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, + calledRuleStack, seeThruPreds, addEOF); + } + } finally { + if (removed) { + calledRuleStack.set(s.ruleIndex); + } + } + return; + } + } + + for (var i = 0; i < s.numberOfTransitions; i++) { + final t = s.transition(i); + if (t is RuleTransition) { + if (calledRuleStack[t.target.ruleIndex]) { + continue; + } + + PredictionContext newContext = + SingletonPredictionContext.create(ctx, t.followState.stateNumber); + + try { + calledRuleStack.set(t.target.ruleIndex); + _LOOK(t.target, stopState, newContext, look, lookBusy, + calledRuleStack, seeThruPreds, addEOF); + } finally { + calledRuleStack.clear(t.target.ruleIndex); + } + } else if (t is AbstractPredicateTransition) { + if (seeThruPreds) { + _LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, + seeThruPreds, addEOF); + } else { + look.addOne(HIT_PRED); + } + } else if (t.isEpsilon) { + _LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, + seeThruPreds, addEOF); + } else if (t is WildcardTransition) { + look.addAll( + IntervalSet.ofRange(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType)); + } else { +// System.out.println("adding "+ t); + var set = t.label; + if (set != null) { + if (t is NotSetTransition) { + set = set.complement(IntervalSet.ofRange( + Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType)); + } + look.addAll(set); + } + } + } + } +} diff --git a/runtime/Dart/lib/src/misc/multi_map.dart b/runtime/Dart/lib/src/misc/multi_map.dart new file mode 100644 index 000000000..274eda939 --- /dev/null +++ b/runtime/Dart/lib/src/misc/multi_map.dart @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import 'pair.dart'; + +class MultiMap extends DelegatingMap> { + MultiMap() : super({}); + + void put(K key, V value) { + var elementsForKey = this[key]; + if (elementsForKey == null) { + elementsForKey = []; + this[key] = elementsForKey; + } + elementsForKey.add(value); + } + + List> get pairs { + final pairs = >[]; + for (var key in keys) { + for (var value in this[key]) { + pairs.add(Pair(key, value)); + } + } + return pairs; + } +} diff --git a/runtime/Dart/lib/src/misc/pair.dart b/runtime/Dart/lib/src/misc/pair.dart new file mode 100644 index 000000000..6d3e0d551 --- /dev/null +++ b/runtime/Dart/lib/src/misc/pair.dart @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../util/murmur_hash.dart'; + +class Pair { + final A a; + final B b; + + const Pair(this.a, this.b); + + @override + bool operator ==(other) { + return identical(this, other) || other is Pair && a == other.a && b == other.b; + } + + @override + String toString() { + return '($a, $b)'; + } + + @override + int get hashCode { + MurmurHash.initialize(); + + var hash = MurmurHash.initialize(); + hash = MurmurHash.update(hash, a); + hash = MurmurHash.update(hash, b); + return MurmurHash.finish(hash, 2); + } +} diff --git a/runtime/Dart/lib/src/parser.dart b/runtime/Dart/lib/src/parser.dart new file mode 100644 index 000000000..bc9c7001d --- /dev/null +++ b/runtime/Dart/lib/src/parser.dart @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:io'; + +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'lexer.dart'; +import 'parser_rule_context.dart'; +import 'recognizer.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'token_stream.dart'; +import 'tree/tree.dart'; + +/// This is all the parsing support code essentially; most of it is error recovery stuff. */ +abstract class Parser extends Recognizer { + /// This field maps from the serialized ATN string to the deserialized [ATN] with + /// bypass alternatives. + /// + /// @see ATNDeserializationOptions#isGenerateRuleBypassTransitions() + static final Map bypassAltsAtnCache = {}; + + /// The error handling strategy for the parser. The default value is a new + /// instance of [DefaultErrorStrategy]. + /// + /// @see #getErrorHandler + /// @see #setErrorHandler + + ErrorStrategy errorHandler = DefaultErrorStrategy(); + + /// The input stream. + /// + /// @see #getInputStream + /// @see #setInputStream + TokenStream _input; + + final List _precedenceStack = [0]; + + /// The [ParserRuleContext] object for the currently executing rule. + /// This is always non-null during the parsing process. + ParserRuleContext _ctx; + + /// Specifies whether or not the parser should construct a parse tree during + /// the parsing process. The default value is [true]. + /// + /// @see #getBuildParseTree + /// @see #setBuildParseTree + bool _buildParseTrees = true; + + /// When {@link #setTrace}{@code (true)} is called, a reference to the + /// [TraceListener] is stored here so it can be easily removed in a + /// later call to {@link #setTrace}{@code (false)}. The listener itself is + /// implemented as a parser listener so this field is not directly used by + /// other parser methods. + TraceListener _tracer; + + /// The list of [ParseTreeListener] listeners registered to receive + /// events during the parse. + /// + /// @see #addParseListener + List _parseListeners; + + /// The number of syntax errors reported during parsing. This value is + /// incremented each time {@link #notifyErrorListeners} is called. + int _syntaxErrors = 0; + + /// Indicates parser has match()ed EOF token. See {@link #exitRule()}. */ + bool matchedEOF = false; + + Parser(TokenStream input) { + inputStream = input; + } + + /// reset the parser's state */ + void reset() { + if (inputStream != null) inputStream.seek(0); + errorHandler.reset(this); + _ctx = null; + _syntaxErrors = 0; + matchedEOF = false; + setTrace(false); + _precedenceStack.clear(); + _precedenceStack.add(0); + if (interpreter != null) { + interpreter.reset(); + } + } + + /// Match current input symbol against [ttype]. If the symbol type + /// matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are + /// called to complete the match process. + /// + ///

If the symbol type does not match, + /// {@link ANTLRErrorStrategy#recoverInline} is called on the current error + /// strategy to attempt recovery. If {@link #getBuildParseTree} is + /// [true] and the token index of the symbol returned by + /// {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to + /// the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)}.

+ /// + /// @param ttype the token type to match + /// @return the matched symbol + /// @throws RecognitionException if the current input symbol did not match + /// [ttype] and the error strategy could not recover from the + /// mismatched symbol + Token match(int ttype) { + var t = currentToken; + if (t.type == ttype) { + if (ttype == Token.EOF) { + matchedEOF = true; + } + errorHandler.reportMatch(this); + consume(); + } else { + t = errorHandler.recoverInline(this); + if (_buildParseTrees && t.tokenIndex == -1) { + // we must have conjured up a new token during single token insertion + // if it's not the current symbol + _ctx.addErrorNode(createErrorNode(_ctx, t)); + } + } + return t; + } + + /// Match current input symbol as a wildcard. If the symbol type matches + /// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch} + /// and {@link #consume} are called to complete the match process. + /// + ///

If the symbol type does not match, + /// {@link ANTLRErrorStrategy#recoverInline} is called on the current error + /// strategy to attempt recovery. If {@link #getBuildParseTree} is + /// [true] and the token index of the symbol returned by + /// {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to + /// the parse tree by calling {@link Parser#createErrorNode(ParserRuleContext, Token)}. then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)}

+ /// + /// @return the matched symbol + /// @throws RecognitionException if the current input symbol did not match + /// a wildcard and the error strategy could not recover from the mismatched + /// symbol + Token matchWildcard() { + var t = currentToken; + if (t.type > 0) { + errorHandler.reportMatch(this); + consume(); + } else { + t = errorHandler.recoverInline(this); + if (_buildParseTrees && t.tokenIndex == -1) { + // we must have conjured up a new token during single token insertion + // if it's not the current symbol + _ctx.addErrorNode(createErrorNode(_ctx, t)); + } + } + + return t; + } + + /// Track the [ParserRuleContext] objects during the parse and hook + /// them up using the {@link ParserRuleContext#children} list so that it + /// forms a parse tree. The [ParserRuleContext] returned from the start + /// rule represents the root of the parse tree. + /// + ///

Note that if we are not building parse trees, rule contexts only point + /// upwards. When a rule exits, it returns the context but that gets garbage + /// collected if nobody holds a reference. It points upwards but nobody + /// points at it.

+ /// + ///

When we build parse trees, we are adding all of these contexts to + /// {@link ParserRuleContext#children} list. Contexts are then not candidates + /// for garbage collection.

+ set buildParseTree(bool buildParseTrees) { + _buildParseTrees = buildParseTrees; + } + + /// Gets whether or not a complete parse tree will be constructed while + /// parsing. This property is [true] for a newly constructed parser. + /// + /// @return [true] if a complete parse tree will be constructed while + /// parsing, otherwise [false] + bool get buildParseTree { + return _buildParseTrees; + } + + /// Trim the internal lists of the parse tree during parsing to conserve memory. + /// This property is set to [false] by default for a newly constructed parser. + /// + /// @param trimParseTrees [true] to trim the capacity of the {@link ParserRuleContext#children} + /// list to its size after a rule is parsed. + set trimParseTree(bool trimParseTrees) { + if (trimParseTrees) { + if (trimParseTree) return; + addParseListener(TrimToSizeListener.INSTANCE); + } else { + removeParseListener(TrimToSizeListener.INSTANCE); + } + } + + /// @return [true] if the {@link ParserRuleContext#children} list is trimmed + /// using the default {@link Parser.TrimToSizeListener} during the parse process. + bool get trimParseTree { + return parseListeners.contains(TrimToSizeListener.INSTANCE); + } + + List get parseListeners => _parseListeners; + + /// Registers [listener] to receive events during the parsing process. + /// + ///

To support output-preserving grammar transformations (including but not + /// limited to left-recursion removal, automated left-factoring, and + /// optimized code generation), calls to listener methods during the parse + /// may differ substantially from calls made by + /// {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In + /// particular, rule entry and exit events may occur in a different order + /// during the parse than after the parser. In addition, calls to certain + /// rule entry methods may be omitted.

+ /// + ///

With the following specific exceptions, calls to listener events are + /// deterministic, i.e. for identical input the calls to listener + /// methods will be the same.

+ /// + ///
    + ///
  • Alterations to the grammar used to generate code may change the + /// behavior of the listener calls.
  • + ///
  • Alterations to the command line options passed to ANTLR 4 when + /// generating the parser may change the behavior of the listener calls.
  • + ///
  • Changing the version of the ANTLR Tool used to generate the parser + /// may change the behavior of the listener calls.
  • + ///
+ /// + /// @param listener the listener to add + /// + /// @throws NullPointerException if {@code} listener is null + void addParseListener(ParseTreeListener listener) { + if (listener == null) { + throw ArgumentError.notNull('listener'); + } + + _parseListeners ??= []; + + _parseListeners.add(listener); + } + + /// Remove [listener] from the list of parse listeners. + /// + ///

If [listener] is null or has not been added as a parse + /// listener, this method does nothing.

+ /// + /// @see #addParseListener + /// + /// @param listener the listener to remove + void removeParseListener(ParseTreeListener listener) { + if (_parseListeners != null) { + if (_parseListeners.remove(listener)) { + if (_parseListeners.isEmpty) { + _parseListeners = null; + } + } + } + } + + /// Remove all parse listeners. + /// + /// @see #addParseListener + void removeParseListeners() { + _parseListeners = null; + } + + /// Notify any parse listeners of an enter rule event. + /// + /// @see #addParseListener + void triggerEnterRuleEvent() { + for (var listener in _parseListeners) { + listener.enterEveryRule(_ctx); + _ctx.enterRule(listener); + } + } + + /// Notify any parse listeners of an exit rule event. + /// + /// @see #addParseListener + void triggerExitRuleEvent() { + // reverse order walk of listeners + for (var i = _parseListeners.length - 1; i >= 0; i--) { + final listener = _parseListeners[i]; + _ctx.exitRule(listener); + listener.exitEveryRule(_ctx); + } + } + + /// Gets the number of syntax errors reported during parsing. This value is + /// incremented each time {@link #notifyErrorListeners} is called. + /// + /// @see #notifyErrorListeners + int get numberOfSyntaxErrors { + return _syntaxErrors; + } + + @override + TokenFactory get tokenFactory { + return _input.tokenSource.tokenFactory; + } + + /// Tell our token source and error strategy about a new way to create tokens. */ + @override + set tokenFactory(TokenFactory factory) { + _input.tokenSource.tokenFactory = factory; + } + + /// The ATN with bypass alternatives is expensive to create so we create it + /// lazily. + /// + /// @throws UnsupportedOperationException if the current parser does not + /// implement the {@link #getSerializedATN()} method. + ATN get ATNWithBypassAlts { + final serializedAtn = serializedATN; + if (serializedAtn == null) { + throw UnsupportedError( + 'The current parser does not support an ATN with bypass alternatives.'); + } + + var result = bypassAltsAtnCache[serializedAtn]; + if (result == null) { + final deserializationOptions = + ATNDeserializationOptions(); + deserializationOptions.setGenerateRuleBypassTransitions(true); + result = ATNDeserializer(deserializationOptions) + .deserialize(serializedAtn.codeUnits); + bypassAltsAtnCache[serializedAtn] = result; + } + + return result; + } + + /// The preferred method of getting a tree pattern. For example, here's a + /// sample use: + /// + ///
+  /// ParseTree t = parser.expr();
+  /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
+  /// ParseTreeMatch m = p.match(t);
+  /// String id = m.get("ID");
+  /// 
+ ParseTreePattern compileParseTreePattern(String pattern, int patternRuleIndex, + [Lexer lexer]) { + if (lexer == null) { + final tokenSource = tokenStream?.tokenSource; + if (tokenSource == null || !(tokenSource is Lexer)) { + throw UnsupportedError("Parser can't discover a lexer to use"); + } + lexer = tokenSource; + } + + final m = ParseTreePatternMatcher(lexer, this); + return m.compile(pattern, patternRuleIndex); + } + + @override + TokenStream get inputStream => tokenStream; + + @override + set inputStream(IntStream input) { + setTokenStream(input); + } + + TokenStream get tokenStream => _input; + + /// Set the token stream and reset the parser. */ + void setTokenStream(TokenStream input) { + _input = null; + reset(); + _input = input; + } + + /// Match needs to return the current input symbol, which gets put + /// into the label for the associated token ref; e.g., x=ID. + + Token get currentToken { + return _input.LT(1); + } + + void notifyErrorListeners(String msg, + [Token offendingToken, RecognitionException e]) { + offendingToken = offendingToken ?? currentToken; + _syntaxErrors++; + var line = -1; + var charPositionInLine = -1; + line = offendingToken.line; + charPositionInLine = offendingToken.charPositionInLine; + + final listener = errorListenerDispatch; + listener.syntaxError( + this, offendingToken, line, charPositionInLine, msg, e); + } + + /// Consume and return the {@linkplain #getCurrentToken current symbol}. + /// + ///

E.g., given the following input with [A] being the current + /// lookahead symbol, this function moves the cursor to [B] and returns + /// [A].

+ /// + ///
+  ///  A B
+  ///  ^
+  /// 
+ /// + /// If the parser is not in error recovery mode, the consumed symbol is added + /// to the parse tree using {@link ParserRuleContext#addChild}, and + /// {@link ParseTreeListener#visitTerminal} is called on any parse listeners. + /// If the parser is in error recovery mode, the consumed symbol is + /// added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then + /// {@link ParserRuleContext#addErrorNode(ErrorNode)} and + /// {@link ParseTreeListener#visitErrorNode} is called on any parse + /// listeners. + Token consume() { + final o = currentToken; + if (o.type != IntStream.EOF) { + inputStream.consume(); + } + final hasListener = _parseListeners != null && _parseListeners.isNotEmpty; + if (_buildParseTrees || hasListener) { + if (errorHandler.inErrorRecoveryMode(this)) { + final node = _ctx.addErrorNode(createErrorNode(_ctx, o)); + if (_parseListeners != null) { + for (var listener in _parseListeners) { + listener.visitErrorNode(node); + } + } + } else { + final node = _ctx.addChild(createTerminalNode(_ctx, o)); + if (_parseListeners != null) { + for (var listener in _parseListeners) { + listener.visitTerminal(node); + } + } + } + } + return o; + } + + /// How to create a token leaf node associated with a parent. + /// Typically, the terminal node to create is not a function of the parent. + /// + /// @since 4.7 + TerminalNode createTerminalNode(ParserRuleContext parent, Token t) { + return TerminalNodeImpl(t); + } + + /// How to create an error node, given a token, associated with a parent. + /// Typically, the error node to create is not a function of the parent. + /// + /// @since 4.7 + ErrorNode createErrorNode(ParserRuleContext parent, Token t) { + return ErrorNodeImpl(t); + } + + void addContextToParseTree() { + final parent = _ctx.parent; + // add current context to parent if we have a parent + if (parent != null) { + parent.addAnyChild(_ctx); + } + } + + /// Always called by generated parsers upon entry to a rule. Access field + /// {@link #_ctx} get the current context. + void enterRule(ParserRuleContext localctx, int state, int ruleIndex) { + this.state = state; + _ctx = localctx; + _ctx.start = _input.LT(1); + if (_buildParseTrees) addContextToParseTree(); + if (_parseListeners != null) triggerEnterRuleEvent(); + } + + void exitRule() { + if (matchedEOF) { + // if we have matched EOF, it cannot consume past EOF so we use LT(1) here + _ctx.stop = _input.LT(1); // LT(1) will be end of file + } else { + _ctx.stop = _input.LT(-1); // stop node is what we just matched + } + // trigger event on _ctx, before it reverts to parent + if (_parseListeners != null) triggerExitRuleEvent(); + state = _ctx.invokingState; + _ctx = _ctx.parent; + } + + void enterOuterAlt(ParserRuleContext localctx, int altNum) { + localctx.altNumber = altNum; + // if we have new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if (_buildParseTrees && _ctx != localctx) { + final parent = _ctx.parent; + if (parent != null) { + parent.removeLastChild(); + parent.addAnyChild(localctx); + } + } + _ctx = localctx; + } + + /// Get the precedence level for the top-most precedence rule. + /// + /// @return The precedence level for the top-most precedence rule, or -1 if + /// the parser context is not nested within a precedence rule. + int get precedence { + if (_precedenceStack.isEmpty) { + return -1; + } + + return _precedenceStack.last; + } + + void enterRecursionRule( + ParserRuleContext localctx, int state, int ruleIndex, int precedence) { + this.state = state; + _precedenceStack.add(precedence); + _ctx = localctx; + _ctx.start = _input.LT(1); + if (_parseListeners != null) { + triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + } + + /// Like {@link #enterRule} but for recursive rules. + /// Make the current context the child of the incoming localctx. + void pushNewRecursionContext( + ParserRuleContext localctx, int state, int ruleIndex) { + final previous = _ctx; + previous.parent = localctx; + previous.invokingState = state; + previous.stop = _input.LT(-1); + + _ctx = localctx; + _ctx.start = previous.start; + if (_buildParseTrees) { + _ctx.addAnyChild(previous); + } + + if (_parseListeners != null) { + triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules + } + } + + void unrollRecursionContexts(ParserRuleContext _parentctx) { + _precedenceStack.removeLast(); + _ctx.stop = _input.LT(-1); + final retctx = _ctx; // save current ctx (return value) + + // unroll so _ctx is as it was before call to recursive method + if (_parseListeners != null) { + while (_ctx != _parentctx) { + triggerExitRuleEvent(); + _ctx = _ctx.parent; + } + } else { + _ctx = _parentctx; + } + + // hook into tree + retctx.parent = _parentctx; + + if (_buildParseTrees && _parentctx != null) { + // add return ctx into invoking rule's tree + _parentctx.addAnyChild(retctx); + } + } + + ParserRuleContext getInvokingContext(int ruleIndex) { + var p = _ctx; + while (p != null) { + if (p.ruleIndex == ruleIndex) return p; + p = p.parent; + } + return null; + } + + ParserRuleContext get context { + return _ctx; + } + + set context(ParserRuleContext ctx) { + _ctx = ctx; + } + + @override + bool precpred(RuleContext localctx, int precedence) { + return precedence >= _precedenceStack.last; + } + + bool inContext(String context) { + // TODO: useful in parser? + return false; + } + + /// Checks whether or not [symbol] can follow the current state in the + /// ATN. The behavior of this method is equivalent to the following, but is + /// implemented such that the complete context-sensitive follow set does not + /// need to be explicitly constructed. + /// + ///
+  /// return expectedTokens.contains(symbol);
+  /// 
+ /// + /// @param symbol the symbol type to check + /// @return [true] if [symbol] can follow the current state in + /// the ATN, otherwise [false]. + bool isExpectedToken(int symbol) { +// return interpreter.atn.nextTokens(_ctx); + final atn = interpreter.atn; + var ctx = _ctx; + final s = atn.states[state]; + var following = atn.nextTokens(s); + if (following.contains(symbol)) { + return true; + } +// log("following "+s+"="+following); + if (!following.contains(Token.EPSILON)) return false; + + while (ctx != null && + ctx.invokingState >= 0 && + following.contains(Token.EPSILON)) { + final invokingState = atn.states[ctx.invokingState]; + RuleTransition rt = invokingState.transition(0); + following = atn.nextTokens(rt.followState); + if (following.contains(symbol)) { + return true; + } + + ctx = ctx.parent; + } + + if (following.contains(Token.EPSILON) && symbol == Token.EOF) { + return true; + } + + return false; + } + + bool isMatchedEOF() { + return matchedEOF; + } + + /// Computes the set of input symbols which could follow the current parser + /// state and context, as given by {@link #getState} and {@link #getContext}, + /// respectively. + /// + /// @see ATN#getExpectedTokens(int, RuleContext) + IntervalSet get expectedTokens { + return getATN().getExpectedTokens(state, context); + } + + IntervalSet get expectedTokensWithinCurrentRule { + final atn = interpreter.atn; + final s = atn.states[state]; + return atn.nextTokens(s); + } + + /// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. */ + int getRuleIndex(String ruleName) { + final ruleIndex = ruleIndexMap[ruleName]; + if (ruleIndex != null) return ruleIndex; + return -1; + } + + ParserRuleContext get ruleContext { + return _ctx; + } + + List get ruleInvocationStack => getRuleInvocationStack(); + + /// Return List<String> of the rule names in your parser instance + /// leading up to a call to the current rule. You could override if + /// you want more details such as the file/line info of where + /// in the ATN a rule is invoked. + /// + /// This is very useful for error messages. + List getRuleInvocationStack([RuleContext p]) { + p = p ?? _ctx; + final _ruleNames = ruleNames; + final stack = []; + while (p != null) { + // compute what follows who invoked us + final ruleIndex = p.ruleIndex; + if (ruleIndex < 0) { + stack.add('n/a'); + } else { + stack.add(_ruleNames[ruleIndex]); + } + p = p.parent; + } + return stack; + } + + /// For debugging and other purposes. */ + List get dfaStrings { + final s = []; + for (var d = 0; d < interpreter.decisionToDFA.length; d++) { + final dfa = interpreter.decisionToDFA[d]; + s.add(dfa.toString(vocabulary)); + } + return s; + } + + /// For debugging and other purposes. */ + void dumpDFA() { + var seenOne = false; + for (var d = 0; d < interpreter.decisionToDFA.length; d++) { + final dfa = interpreter.decisionToDFA[d]; + if (dfa.states.isNotEmpty) { + if (seenOne) print(''); + print('Decision ${dfa.decision}:'); + stdout.write(dfa.toString(vocabulary)); + seenOne = true; + } + } + } + + String get sourceName { + return _input.sourceName; + } + + @override + ParseInfo get parseInfo { + final interp = interpreter; + if (interp is ProfilingATNSimulator) { + return ParseInfo(interp); + } + return null; + } + + /// @since 4.3 + void setProfile(bool profile) { + final interp = interpreter; + final saveMode = interp.predictionMode; + if (profile) { + if (!(interp is ProfilingATNSimulator)) { + interpreter = ProfilingATNSimulator(this); + } + } else if (interp is ProfilingATNSimulator) { + final sim = ParserATNSimulator( + this, getATN(), interp.decisionToDFA, interp.sharedContextCache); + interpreter = sim; + } + interpreter.predictionMode = saveMode; + } + + /// During a parse is sometimes useful to listen in on the rule entry and exit + /// events as well as token matches. This is for quick and dirty debugging. + void setTrace(bool trace) { + if (!trace) { + removeParseListener(_tracer); + _tracer = null; + } else { + if (_tracer != null) { + removeParseListener(_tracer); + } else { + _tracer = TraceListener(this); + } + addParseListener(_tracer); + } + } + + /// Gets whether a [TraceListener] is registered as a parse listener + /// for the parser. + /// + /// @see #setTrace(bool) + bool isTrace() { + return _tracer != null; + } +} diff --git a/runtime/Dart/lib/src/parser_interpreter.dart b/runtime/Dart/lib/src/parser_interpreter.dart new file mode 100644 index 000000000..e14c547d5 --- /dev/null +++ b/runtime/Dart/lib/src/parser_interpreter.dart @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:collection'; + +import 'atn/atn.dart'; +import 'dfa/dfa.dart'; +import 'error/error.dart'; +import 'misc/pair.dart'; +import 'parser.dart'; +import 'parser_rule_context.dart'; +import 'token.dart'; +import 'token_stream.dart'; +import 'vocabulary.dart'; + +/// A parser simulator that mimics what ANTLR's generated +/// parser code does. A ParserATNSimulator is used to make +/// predictions via adaptivePredict but this class moves a pointer through the +/// ATN to simulate parsing. ParserATNSimulator just +/// makes us efficient rather than having to backtrack, for example. +/// +/// This properly creates parse trees even for left recursive rules. +/// +/// We rely on the left recursive rule invocation and special predicate +/// transitions to make left recursive rules work. +/// +/// See TestParserInterpreter for examples. +class ParserInterpreter extends Parser { + @override + final String grammarFileName; + final ATN atn; + + List decisionToDFA; // not shared like it is for generated parsers + final PredictionContextCache sharedContextCache = + PredictionContextCache(); + + @override + final List ruleNames; + + @override + final Vocabulary vocabulary; + + /// This stack corresponds to the _parentctx, _parentState pair of locals + /// that would exist on call stack frames with a recursive descent parser; + /// in the generated function for a left-recursive rule you'd see: + /// + /// EContext e(int _p) throws RecognitionException { + /// ParserRuleContext _parentctx = context; // Pair.a + /// int _parentState = state; // Pair.b + /// ... + /// } + /// + /// Those values are used to create new recursive rule invocation contexts + /// associated with left operand of an alt like "expr '*' expr". + final DoubleLinkedQueue> _parentContextStack = + DoubleLinkedQueue(); + + /// We need a map from (decision,inputIndex)->forced alt for computing ambiguous + /// parse trees. For now, we allow exactly one override. + int overrideDecision = -1; + int overrideDecisionInputIndex = -1; + int overrideDecisionAlt = -1; + bool overrideDecisionReached = + false; // latch and only override once; error might trigger infinite loop + + /// What is the current context when we override a decisions? This tells + /// us what the root of the parse tree is when using override + /// for an ambiguity/lookahead check. + InterpreterRuleContext overrideDecisionRoot; + + /// Return the root of the parse, which can be useful if the parser + /// bails out. You still can access the top node. Note that, + /// because of the way left recursive rules add children, it's possible + /// that the root will not have any children if the start rule immediately + /// called and left recursive rule that fails. + /// + /// @since 4.5.1 + InterpreterRuleContext rootContext; + + ParserInterpreter(this.grammarFileName, this.vocabulary, this.ruleNames, + this.atn, TokenStream input) + : super(input) { + // init decision DFA + final numberOfDecisions = atn.numberOfDecisions; + decisionToDFA = List(numberOfDecisions); + for (var i = 0; i < numberOfDecisions; i++) { + final decisionState = atn.getDecisionState(i); + decisionToDFA[i] = DFA(decisionState, i); + } + + // get atn simulator that knows how to do predictions + interpreter = + ParserATNSimulator(this, atn, decisionToDFA, sharedContextCache); + } + + @override + void reset() { + super.reset(); + overrideDecisionReached = false; + overrideDecisionRoot = null; + } + + @override + ATN getATN() { + return atn; + } + + /// Begin parsing at startRuleIndex */ + ParserRuleContext parse(int startRuleIndex) { + final startRuleStartState = atn.ruleToStartState[startRuleIndex]; + + rootContext = createInterpreterRuleContext( + null, ATNState.INVALID_STATE_NUMBER, startRuleIndex); + if (startRuleStartState.isLeftRecursiveRule) { + enterRecursionRule( + rootContext, startRuleStartState.stateNumber, startRuleIndex, 0); + } else { + enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex); + } + + while (true) { + final p = atnState; + switch (p.stateType) { + case StateType.RULE_STOP: + // pop; return from rule + if (context.isEmpty) { + if (startRuleStartState.isLeftRecursiveRule) { + final result = context; + final parentContext = + _parentContextStack.removeLast(); + unrollRecursionContexts(parentContext.a); + return result; + } else { + exitRule(); + return rootContext; + } + } + + visitRuleStopState(p); + break; + + default: + try { + visitState(p); + } on RecognitionException catch (e) { + state = atn.ruleToStopState[p.ruleIndex].stateNumber; + context.exception = e; + errorHandler.reportError(this, e); + recover(e); + } + + break; + } + } + } + + @override + void enterRecursionRule( + ParserRuleContext localctx, int state, int ruleIndex, int precedence) { + final pair = + Pair(context, localctx.invokingState); + _parentContextStack.add(pair); + super.enterRecursionRule(localctx, state, ruleIndex, precedence); + } + + ATNState get atnState { + return atn.states[state]; + } + + void visitState(ATNState p) { +// System.out.println("visitState "+p.stateNumber); + var predictedAlt = 1; + if (p is DecisionState) { + predictedAlt = visitDecisionState(p); + } + + final transition = p.transition(predictedAlt - 1); + switch (transition.type) { + case TransitionType.EPSILON: + if (p.stateType == StateType.STAR_LOOP_ENTRY && + (p as StarLoopEntryState).isPrecedenceDecision && + !(transition.target is LoopEndState)) { + // We are at the start of a left recursive rule's (...)* loop + // and we're not taking the exit branch of loop. + final localctx = createInterpreterRuleContext( + _parentContextStack.last.a, + _parentContextStack.last.b, + context.ruleIndex); + pushNewRecursionContext(localctx, + atn.ruleToStartState[p.ruleIndex].stateNumber, context.ruleIndex); + } + break; + + case TransitionType.ATOM: + match((transition as AtomTransition).atomLabel); + break; + + case TransitionType.RANGE: + case TransitionType.SET: + case TransitionType.NOT_SET: + if (!transition.matches( + inputStream.LA(1), Token.MIN_USER_TOKEN_TYPE, 65535)) { + recoverInline(); + } + matchWildcard(); + break; + + case TransitionType.WILDCARD: + matchWildcard(); + break; + + case TransitionType.RULE: + RuleStartState ruleStartState = transition.target; + final ruleIndex = ruleStartState.ruleIndex; + final newctx = + createInterpreterRuleContext(context, p.stateNumber, ruleIndex); + if (ruleStartState.isLeftRecursiveRule) { + enterRecursionRule(newctx, ruleStartState.stateNumber, ruleIndex, + (transition as RuleTransition).precedence); + } else { + enterRule(newctx, transition.target.stateNumber, ruleIndex); + } + break; + + case TransitionType.PREDICATE: + PredicateTransition predicateTransition = transition; + if (!sempred(context, predicateTransition.ruleIndex, + predicateTransition.predIndex)) { + throw FailedPredicateException(this); + } + + break; + + case TransitionType.ACTION: + ActionTransition actionTransition = transition; + action( + context, actionTransition.ruleIndex, actionTransition.actionIndex); + break; + + case TransitionType.PRECEDENCE: + if (!precpred(context, + (transition as PrecedencePredicateTransition).precedence)) { + throw FailedPredicateException(this, + 'precpred(context, ${(transition as PrecedencePredicateTransition).precedence})'); + } + break; + + default: + throw UnsupportedError('Unrecognized ATN transition type.'); + } + + state = transition.target.stateNumber; + } + + /// Method visitDecisionState() is called when the interpreter reaches + /// a decision state (instance of DecisionState). It gives an opportunity + /// for subclasses to track interesting things. + int visitDecisionState(DecisionState p) { + var predictedAlt = 1; + if (p.numberOfTransitions > 1) { + errorHandler.sync(this); + final decision = p.decision; + if (decision == overrideDecision && + inputStream.index == overrideDecisionInputIndex && + !overrideDecisionReached) { + predictedAlt = overrideDecisionAlt; + overrideDecisionReached = true; + } else { + predictedAlt = + interpreter.adaptivePredict(inputStream, decision, context); + } + } + return predictedAlt; + } + + /// Provide simple "factory" for InterpreterRuleContext's. + /// @since 4.5.1 + InterpreterRuleContext createInterpreterRuleContext( + ParserRuleContext parent, int invokingStateNumber, int ruleIndex) { + return InterpreterRuleContext(parent, invokingStateNumber, ruleIndex); + } + + void visitRuleStopState(ATNState p) { + final ruleStartState = atn.ruleToStartState[p.ruleIndex]; + if (ruleStartState.isLeftRecursiveRule) { + final parentContext = + _parentContextStack.removeLast(); + unrollRecursionContexts(parentContext.a); + state = parentContext.b; + } else { + exitRule(); + } + + RuleTransition ruleTransition = atn.states[state].transition(0); + state = ruleTransition.followState.stateNumber; + } + + /// Override this parser interpreters normal decision-making process + /// at a particular decision and input token index. Instead of + /// allowing the adaptive prediction mechanism to choose the + /// first alternative within a block that leads to a successful parse, + /// force it to take the alternative, 1..n for n alternatives. + /// + /// As an implementation limitation right now, you can only specify one + /// override. This is sufficient to allow construction of different + /// parse trees for ambiguous input. It means re-parsing the entire input + /// in general because you're never sure where an ambiguous sequence would + /// live in the various parse trees. For example, in one interpretation, + /// an ambiguous input sequence would be matched completely in expression + /// but in another it could match all the way back to the root. + /// + /// s : e '!'? ; + /// e : ID + /// | ID '!' + /// ; + /// + /// Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first + /// case, the ambiguous sequence is fully contained only by the root. + /// In the second case, the ambiguous sequences fully contained within just + /// e, as in: (e ID !). + /// + /// Rather than trying to optimize this and make + /// some intelligent decisions for optimization purposes, I settled on + /// just re-parsing the whole input and then using + /// {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal + /// subtree that contains the ambiguous sequence. I originally tried to + /// record the call stack at the point the parser detected and ambiguity but + /// left recursive rules create a parse tree stack that does not reflect + /// the actual call stack. That impedance mismatch was enough to make + /// it it challenging to restart the parser at a deeply nested rule + /// invocation. + /// + /// Only parser interpreters can override decisions so as to avoid inserting + /// override checking code in the critical ALL(*) prediction execution path. + /// + /// @since 4.5.1 + void addDecisionOverride(int decision, int tokenIndex, int forcedAlt) { + overrideDecision = decision; + overrideDecisionInputIndex = tokenIndex; + overrideDecisionAlt = forcedAlt; + } + + /// Rely on the error handler for this parser but, if no tokens are consumed + /// to recover, add an error node. Otherwise, nothing is seen in the parse + /// tree. + void recover(RecognitionException e) { + final i = inputStream.index; + errorHandler.recover(this, e); + if (inputStream.index == i) { + // no input consumed, better add an error node + if (e is InputMismatchException) { + final ime = e; + final tok = e.offendingToken; + var expectedTokenType = Token.INVALID_TYPE; + if (!ime.expectedTokens.isNil) { + expectedTokenType = ime.expectedTokens.minElement; // get any element + } + final errToken = tokenFactory.create( + expectedTokenType, + tok.text, + Pair(tok.tokenSource, tok.tokenSource.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + // invalid start/stop + tok.line, + tok.charPositionInLine); + context.addErrorNode(createErrorNode(context, errToken)); + } else { + // NoViableAlt + final tok = e.offendingToken; + final errToken = tokenFactory.create( + Token.INVALID_TYPE, + tok.text, + Pair(tok.tokenSource, tok.tokenSource.inputStream), + Token.DEFAULT_CHANNEL, + -1, + -1, + // invalid start/stop + tok.line, + tok.charPositionInLine); + context.addErrorNode(createErrorNode(context, errToken)); + } + } + } + + Token recoverInline() { + return errorHandler.recoverInline(this); + } +} diff --git a/runtime/Dart/lib/src/parser_rule_context.dart b/runtime/Dart/lib/src/parser_rule_context.dart new file mode 100644 index 000000000..c6c594704 --- /dev/null +++ b/runtime/Dart/lib/src/parser_rule_context.dart @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'error/error.dart'; +import 'interval_set.dart'; +import 'parser.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'tree/tree.dart'; + +/// A rule invocation record for parsing. +/// +/// Contains all of the information about the current rule not stored in the +/// RuleContext. It handles parse tree children list, Any ATN state +/// tracing, and the default values available for rule invocations: +/// start, stop, rule index, current alt number. +/// +/// Subclasses made for each rule and grammar track the parameters, +/// return values, locals, and labels specific to that rule. These +/// are the objects that are returned from rules. +/// +/// Note text is not an actual field of a rule return value; it is computed +/// from start and stop using the input stream's toString() method. I +/// could add a ctor to this so that we can pass in and store the input +/// stream, but I'm not sure we want to do that. It would seem to be undefined +/// to get the .text property anyway if the rule matches tokens from multiple +/// input streams. +/// +/// I do not use getters for fields of objects that are used simply to +/// group values such as this aggregate. The getters/setters are there to +/// satisfy the superclass interface. +class ParserRuleContext extends RuleContext { + /// If we are debugging or building a parse tree for a visitor, + /// we need to track all of the tokens and rule invocations associated + /// with this rule's context. This is empty for parsing w/o tree constr. + /// operation because we don't the need to track the details about + /// how we parse this rule. + List children; + + /// Get the initial/final token in this context. + /// Note that the range from start to stop is inclusive, so for rules that do not consume anything + /// (for example, zero length or error productions) this token may exceed stop. + Token start, stop; + + /// The exception that forced this rule to return. If the rule successfully + /// completed, this is null. + RecognitionException exception; + + ParserRuleContext([RuleContext parent, int invokingStateNumber]) + : super(parent: parent, invokingState: invokingStateNumber); + + /// COPY a ctx (I'm deliberately not using copy constructor) to avoid + /// confusion with creating node with parent. Does not copy children + /// (except error leaves). + /// + /// This is used in the generated parser code to flip a generic XContext + /// node for rule X to a YContext for alt label Y. In that sense, it is + /// not really a generic copy function. + /// + /// If we do an error sync() at start of a rule, we might add error nodes + /// to the generic XContext so this function must copy those nodes to + /// the YContext as well else they are lost! + void copyFrom(ParserRuleContext ctx) { + parent = ctx.parent; + invokingState = ctx.invokingState; + + start = ctx.start; + stop = ctx.stop; + + // copy any error nodes to alt label node + if (ctx.children != null) { + children = []; + // reset parent pointer for any error nodes + for (var child in ctx.children) { + if (child is ErrorNode) { + addChild(child); + } + } + } + } + + // Double dispatch methods for listeners + + void enterRule(ParseTreeListener listener) {} + + void exitRule(ParseTreeListener listener) {} + + /// Add a parse tree node to this as a child. Works for + /// internal and leaf nodes. Does not set parent link; + /// other add methods must do that. Other addChild methods + /// call this. + /// + /// We cannot set the parent pointer of the incoming node + /// because the existing interfaces do not have a setParent() + /// method and I don't want to break backward compatibility for this. + /// + /// @since 4.7 + T addAnyChild(T t) { + children ??= []; + children.add(t); + return t; + } + + /// Add a token leaf node child and force its parent to be this node. */ + TerminalNode addChild(TerminalNode t) { + t.parent = this; + return addAnyChild(t); + } + + /// Add an error node child and force its parent to be this node. + /// + /// @since 4.7 + ErrorNode addErrorNode(ErrorNode errorNode) { + errorNode.parent=this; + return addAnyChild(errorNode); + } + + /// Used by enterOuterAlt to toss out a RuleContext previously added as + /// we entered a rule. If we have # label, we will need to remove + /// generic ruleContext object. + void removeLastChild() { + if (children != null) { + children.removeLast(); + } + } + + // Override to make type more specific + @override + ParserRuleContext get parent { + return super.parent; + } + + @override + ParseTree getChild(int i) { + if (children == null || i < 0 || i >= children.length) { + return null; + } + + if (T == null) { + return children[i]; + } + var j = -1; // what element have we found with ctxType? + for (var o in children) { + if (o is T) { + j++; + if (j == i) { + return o; + } + } + } + return null; + } + + TerminalNode getToken(int ttype, int i) { + if (children == null || i < 0 || i >= children.length) { + return null; + } + + var j = -1; // what token with ttype have we found? + for (var o in children) { + if (o is TerminalNode) { + final tnode = o; + final symbol = tnode.symbol; + if (symbol.type == ttype) { + j++; + if (j == i) { + return tnode; + } + } + } + } + + return null; + } + + List getTokens(int ttype) { + if (children == null) { + return []; + } + + List tokens; + for (var o in children) { + if (o is TerminalNode) { + final tnode = o; + final symbol = tnode.symbol; + if (symbol.type == ttype) { + tokens ??= []; + tokens.add(tnode); + } + } + } + + if (tokens == null) { + return []; + } + + return tokens; + } + + T getRuleContext(int i) { + return getChild(i); + } + + List getRuleContexts() { + if (children == null) { + return []; + } + + List contexts; + for (var o in children) { + if (o is T) { + contexts ??= []; + + contexts.add(o); + } + } + + if (contexts == null) { + return []; + } + + return contexts; + } + + @override + int get childCount => children?.length ?? 0; + + @override + Interval get sourceInterval { + if (start == null) { + return Interval.INVALID; + } + if (stop == null || stop.tokenIndex < start.tokenIndex) { + return Interval(start.tokenIndex, start.tokenIndex - 1); // empty + } + return Interval(start.tokenIndex, stop.tokenIndex); + } + + /// Used for rule context info debugging during parse-time, not so much for ATN debugging */ + String toInfoString(Parser recognizer) { + final rules = recognizer.getRuleInvocationStack(this); + + return "ParserRuleContext${rules.reversed}{start=$start, stop=$stop}'"; + } + + static final EMPTY = ParserRuleContext(); +} + +/// This class extends [ParserRuleContext] by allowing the value of +/// {@link #getRuleIndex} to be explicitly set for the context. +/// +///

+/// [ParserRuleContext] does not include field storage for the rule index +/// since the context classes created by the code generator override the +/// {@link #getRuleIndex} method to return the correct value for that context. +/// Since the parser interpreter does not use the context classes generated for a +/// parser, this class (with slightly more memory overhead per node) is used to +/// provide equivalent functionality.

+class InterpreterRuleContext extends ParserRuleContext { + @override + int ruleIndex = -1; + + /// Constructs a new [InterpreterRuleContext] with the specified + /// parent, invoking state, and rule index. + /// + /// @param parent The parent context. + /// @param invokingStateNumber The invoking state number. + /// @param ruleIndex The rule index for the current context. + InterpreterRuleContext( + ParserRuleContext parent, int invokingStateNumber, this.ruleIndex) + : super(parent, invokingStateNumber); +} diff --git a/runtime/Dart/lib/src/prediction_context.dart b/runtime/Dart/lib/src/prediction_context.dart new file mode 100644 index 000000000..dcee2e385 --- /dev/null +++ b/runtime/Dart/lib/src/prediction_context.dart @@ -0,0 +1,877 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'package:collection/collection.dart'; + +import 'atn/atn.dart'; +import 'misc/pair.dart'; +import 'recognizer.dart'; +import 'rule_context.dart'; +import 'util/murmur_hash.dart'; + +abstract class PredictionContext { + /// Represents {@code $} in local context prediction, which means wildcard. + /// {@code *+x = *}. + static final EmptyPredictionContext EMPTY = EmptyPredictionContext(); + + /// Represents {@code $} in an array in full context mode, when {@code $} + /// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, + /// {@code $} = {@link #EMPTY_RETURN_STATE}. + static final int EMPTY_RETURN_STATE = 0x7FFFFFFF; + + static final int INITIAL_HASH = 1; + + static int globalNodeCount = 0; + int id = globalNodeCount++; + + /// Stores the computed hash code of this [PredictionContext]. The hash + /// code is computed in parts to match the following reference algorithm. + /// + ///
+  ///   int referenceHashCode() {
+  ///      int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
+  ///
+  ///      for (int i = 0; i < {@link #size()}; i++) {
+  ///          hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
+  ///      }
+  ///
+  ///      for (int i = 0; i < {@link #size()}; i++) {
+  ///          hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
+  ///      }
+  ///
+  ///      hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2 * {@link #size()});
+  ///      return hash;
+  ///  }
+  /// 
+ final int cachedHashCode; + + PredictionContext(this.cachedHashCode); + + /// Convert a [RuleContext] tree to a [PredictionContext] graph. + /// Return {@link #EMPTY} if [outerContext] is empty or null. + static PredictionContext fromRuleContext(ATN atn, RuleContext outerContext) { + outerContext ??= RuleContext.EMPTY; + + // if we are in RuleContext of start rule, s, then PredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if (outerContext.parent == null || outerContext == RuleContext.EMPTY) { + return PredictionContext.EMPTY; + } + + // If we have a parent, convert it to a PredictionContext graph + PredictionContext parent = EMPTY; + parent = PredictionContext.fromRuleContext(atn, outerContext.parent); + + final state = atn.states[outerContext.invokingState]; + RuleTransition transition = state.transition(0); + return SingletonPredictionContext.create( + parent, transition.followState.stateNumber); + } + + int get length; + + PredictionContext getParent(int index); + + int getReturnState(int index); + + /// This means only the {@link #EMPTY} (wildcard? not sure) context is in set. */ + bool get isEmpty { + return this == EMPTY; + } + + bool hasEmptyPath() { + // since EMPTY_RETURN_STATE can only appear in the last position, we check last one + return getReturnState(length - 1) == EMPTY_RETURN_STATE; + } + + @override + int get hashCode { + return cachedHashCode; + } + + @override + bool operator ==(Object obj); + + static int calculateEmptyHashCode() { + var hash = MurmurHash.initialize(INITIAL_HASH); + hash = MurmurHash.finish(hash, 0); + return hash; + } + + static int calculateHashCode( + List parents, List returnStates) { + var hash = MurmurHash.initialize(INITIAL_HASH); + + for (var parent in parents) { + hash = MurmurHash.update(hash, parent); + } + + for (var returnState in returnStates) { + hash = MurmurHash.update(hash, returnState); + } + + hash = MurmurHash.finish(hash, 2 * parents.length); + return hash; + } + + // dispatch + static PredictionContext merge( + PredictionContext a, + PredictionContext b, + bool rootIsWildcard, + Map, PredictionContext> + mergeCache) { + assert(a != null && b != null); // must be empty context, never null + + // share same graph if both same + if (a == b || a == b) return a; + + if (a is SingletonPredictionContext && b is SingletonPredictionContext) { + return mergeSingletons(a, b, rootIsWildcard, mergeCache); + } + + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as * wildcard + if (rootIsWildcard) { + if (a is EmptyPredictionContext) return a; + if (b is EmptyPredictionContext) return b; + } + + // convert singleton so both are arrays to normalize + if (a is SingletonPredictionContext) { + a = ArrayPredictionContext.of(a); + } + if (b is SingletonPredictionContext) { + b = ArrayPredictionContext.of(b); + } + return mergeArrays(a, b, rootIsWildcard, mergeCache); + } + + /// Merge two [SingletonPredictionContext] instances. + /// + ///

Stack tops equal, parents merge is same; return left graph.
+ ///

+ /// + ///

Same stack top, parents differ; merge parents giving array node, then + /// remainders of those graphs. A new root node is created to point to the + /// merged parents.
+ ///

+ /// + ///

Different stack tops pointing to same parent. Make array node for the + /// root where both element in the root point to the same (original) + /// parent.
+ ///

+ /// + ///

Different stack tops pointing to different parents. Make array node for + /// the root where each element points to the corresponding original + /// parent.
+ ///

+ /// + /// @param a the first [SingletonPredictionContext] + /// @param b the second [SingletonPredictionContext] + /// @param rootIsWildcard [true] if this is a local-context merge, + /// otherwise false to indicate a full-context merge + /// @param mergeCache + static PredictionContext mergeSingletons( + SingletonPredictionContext a, + SingletonPredictionContext b, + bool rootIsWildcard, + Map, PredictionContext> + mergeCache) { + if (mergeCache != null) { + var previous = mergeCache[Pair(a, b)]; + if (previous != null) return previous; + previous = mergeCache[Pair(b, a)]; + if (previous != null) return previous; + } + + final rootMerge = mergeRoot(a, b, rootIsWildcard); + if (rootMerge != null) { + if (mergeCache != null) mergeCache[Pair(a, b)] = rootMerge; + return rootMerge; + } + + if (a.returnState == b.returnState) { + // a == b + final parent = + merge(a.parent, b.parent, rootIsWildcard, mergeCache); + // if parent is same as existing a or b parent or reduced to a parent, return it + if (parent == a.parent) return a; // ax + bx = ax, if a=b + if (parent == b.parent) return b; // ax + bx = bx, if a=b + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // new joined parent so create new singleton pointing to it, a' + PredictionContext a_ = + SingletonPredictionContext.create(parent, a.returnState); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } else { + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + PredictionContext singleParent; + if (a == b || (a.parent != null && a.parent == b.parent)) { + // ax + bx = [a,b]x + singleParent = a.parent; + } + if (singleParent != null) { + // parents are same + // sort payloads and use same parent + final payloads = [a.returnState, b.returnState]; + if (a.returnState > b.returnState) { + payloads[0] = b.returnState; + payloads[1] = a.returnState; + } + final parents = [singleParent, singleParent]; + PredictionContext a_ = ArrayPredictionContext(parents, payloads); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + // parents differ and can't merge them. Just pack together + // into array; can't merge. + // ax + by = [ax,by] + final payloads = [a.returnState, b.returnState]; + var parents = [a.parent, b.parent]; + if (a.returnState > b.returnState) { + // sort by payload + payloads[0] = b.returnState; + payloads[1] = a.returnState; + parents = [b.parent, a.parent]; + } + PredictionContext a_ = ArrayPredictionContext(parents, payloads); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + } + + /// Handle case where at least one of [a] or [b] is + /// {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used + /// to represent {@link #EMPTY}. + /// + ///

Local-Context Merges

+ /// + ///

These local-context merge operations are used when [rootIsWildcard] + /// is true.

+ /// + ///

{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.
+ ///

+ /// + ///

{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is + /// {@code #EMPTY}; return left graph.
+ ///

+ /// + ///

Special case of last merge if local context.
+ ///

+ /// + ///

Full-Context Merges

+ /// + ///

These full-context merge operations are used when [rootIsWildcard] + /// is false.

+ /// + ///

+ /// + ///

Must keep all contexts; {@link #EMPTY} in array is a special value (and + /// null parent).
+ ///

+ /// + ///

+ /// + /// @param a the first [SingletonPredictionContext] + /// @param b the second [SingletonPredictionContext] + /// @param rootIsWildcard [true] if this is a local-context merge, + /// otherwise false to indicate a full-context merge + static PredictionContext mergeRoot(SingletonPredictionContext a, + SingletonPredictionContext b, bool rootIsWildcard) { + if (rootIsWildcard) { + if (a == EMPTY) return EMPTY; // * + b = * + if (b == EMPTY) return EMPTY; // a + * = * + } else { + if (a == EMPTY && b == EMPTY) return EMPTY; // $ + $ = $ + if (a == EMPTY) { + // $ + x = [x,$] + final payloads = [b.returnState, EMPTY_RETURN_STATE]; + final parents = [b.parent, null]; + PredictionContext joined = + ArrayPredictionContext(parents, payloads); + return joined; + } + if (b == EMPTY) { + // x + $ = [x,$] ($ is always last if present) + final payloads = [a.returnState, EMPTY_RETURN_STATE]; + final parents = [a.parent, null]; + PredictionContext joined = + ArrayPredictionContext(parents, payloads); + return joined; + } + } + return null; + } + + /// Merge two [ArrayPredictionContext] instances. + /// + ///

Different tops, different parents.
+ ///

+ /// + ///

Shared top, same parents.
+ ///

+ /// + ///

Shared top, different parents.
+ ///

+ /// + ///

Shared top, all shared parents.
+ ///

+ /// + ///

Equal tops, merge parents and reduce top to + /// [SingletonPredictionContext].
+ ///

+ static PredictionContext mergeArrays( + ArrayPredictionContext a, + ArrayPredictionContext b, + bool rootIsWildcard, + Map, PredictionContext> + mergeCache) { + if (mergeCache != null) { + var previous = mergeCache[Pair(a, b)]; + if (previous != null) return previous; + previous = mergeCache[Pair(b, a)]; + if (previous != null) return previous; + } + + // merge sorted payloads a + b => M + var i = 0; // walks a + var j = 0; // walks b + var k = 0; // walks target M array + + var mergedReturnStates = List( + a.returnStates.length + b.returnStates.length); // TODO Will it grow? + var mergedParents = List( + a.returnStates.length + b.returnStates.length); // TODO Will it grow? + // walk and merge to yield mergedParents, mergedReturnStates + while (i < a.returnStates.length && j < b.returnStates.length) { + final a_parent = a.parents[i]; + final b_parent = b.parents[j]; + if (a.returnStates[i] == b.returnStates[j]) { + // same payload (stack tops are equal), must yield merged singleton + final payload = a.returnStates[i]; + // $+$ = $ + final both$ = payload == EMPTY_RETURN_STATE && + a_parent == null && + b_parent == null; + final ax_ax = (a_parent != null && b_parent != null) && + a_parent == b_parent; // ax+ax -> ax + if (both$ || ax_ax) { + mergedParents[k] = a_parent; // choose left + mergedReturnStates[k] = payload; + } else { + // ax+ay -> a'[x,y] + final mergedParent = + merge(a_parent, b_parent, rootIsWildcard, mergeCache); + mergedParents[k] = mergedParent; + mergedReturnStates[k] = payload; + } + i++; // hop over left one as usual + j++; // but also skip one in right side since we merge + } else if (a.returnStates[i] < b.returnStates[j]) { + // copy a[i] to M + mergedParents[k] = a_parent; + mergedReturnStates[k] = a.returnStates[i]; + i++; + } else { + // b > a, copy b[j] to M + mergedParents[k] = b_parent; + mergedReturnStates[k] = b.returnStates[j]; + j++; + } + k++; + } + + // copy over any payloads remaining in either array + if (i < a.returnStates.length) { + for (var p = i; p < a.returnStates.length; p++) { + mergedParents[k] = a.parents[p]; + mergedReturnStates[k] = a.returnStates[p]; + k++; + } + } else { + for (var p = j; p < b.returnStates.length; p++) { + mergedParents[k] = b.parents[p]; + mergedReturnStates[k] = b.returnStates[p]; + k++; + } + } + + // trim merged if we combined a few that had same stack tops + if (k < mergedParents.length) { + // write index < last position; trim + if (k == 1) { + // for just one merged element, return singleton top + PredictionContext a_ = SingletonPredictionContext.create( + mergedParents[0], mergedReturnStates[0]); + if (mergeCache != null) mergeCache[Pair(a, b)] = a_; + return a_; + } + mergedParents = List(k)..setRange(0, k, mergedParents); + mergedReturnStates = List(k)..setRange(0, k, mergedReturnStates); + } + + PredictionContext M = + ArrayPredictionContext(mergedParents, mergedReturnStates); + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + if (M == a) { + if (mergeCache != null) mergeCache[Pair(a, b)] = a; + return a; + } + if (M == b) { + if (mergeCache != null) mergeCache[Pair(a, b)] = b; + return b; + } + + combineCommonParents(mergedParents); + + if (mergeCache != null) mergeCache[Pair(a, b)] = M; + return M; + } + + /// Make pass over all M [parents]; merge any {@code equals()} + /// ones. + static void combineCommonParents(List parents) { + final uniqueParents = + {}; + + for (var p = 0; p < parents.length; p++) { + final parent = parents[p]; + if (!uniqueParents.containsKey(parent)) { + // don't replace + uniqueParents[parent] = parent; + } + } + + for (var p = 0; p < parents.length; p++) { + parents[p] = uniqueParents[parents[p]]; + } + } + + static String toDOTString(PredictionContext context) { + if (context == null) return ''; + final buf = StringBuffer(); + buf.write('digraph G {\n'); + buf.write('rankdir=LR;\n'); + + final nodes = getAllContextNodes(context); + nodes.sort((PredictionContext o1, PredictionContext o2) { + return o1.id - o2.id; + }); + + for (var current in nodes) { + if (current is SingletonPredictionContext) { + final s = current.id.toString(); + buf.write(' s'); + buf.write(s); + var returnState = current.getReturnState(0).toString(); + if (current is EmptyPredictionContext) returnState = r'$'; + buf.write(' [label=\"'); + buf.write(returnState); + buf.write('\"];\n'); + continue; + } + ArrayPredictionContext arr = current; + buf.write(' s'); + buf.write(arr.id); + buf.write(' [shape=box, label=\"'); + buf.write('['); + var first = true; + for (var inv in arr.returnStates) { + if (!first) buf.write(', '); + if (inv == EMPTY_RETURN_STATE) { + buf.write(r'$'); + } else { + buf.write(inv); + } + first = false; + } + buf.write(']'); + buf.write('\"];\n'); + } + + for (var current in nodes) { + if (current == EMPTY) continue; + for (var i = 0; i < current.length; i++) { + if (current.getParent(i) == null) continue; + final s = current.id.toString(); + buf.write(' s'); + buf.write(s); + buf.write('->'); + buf.write('s'); + buf.write(current.getParent(i).id); + if (current.length > 1) { + buf.write(' [label=\"parent[$i]\"];\n'); + } else { + buf.write(';\n'); + } + } + } + + buf.write('}\n'); + return buf.toString(); + } + + // From Sam + static PredictionContext getCachedContext( + PredictionContext context, + PredictionContextCache contextCache, + Map visited) { + if (context.isEmpty) { + return context; + } + + var existing = visited[context]; + if (existing != null) { + return existing; + } + + existing = contextCache[context]; + if (existing != null) { + visited[context] = existing; + return existing; + } + + var changed = false; + var parents = List(context.length); + for (var i = 0; i < parents.length; i++) { + final parent = + getCachedContext(context.getParent(i), contextCache, visited); + if (changed || parent != context.getParent(i)) { + if (!changed) { + parents = List(context.length); + for (var j = 0; j < context.length; j++) { + parents[j] = context.getParent(j); + } + + changed = true; + } + + parents[i] = parent; + } + } + + if (!changed) { + contextCache.add(context); + visited[context] = context; + return context; + } + + PredictionContext updated; + if (parents.isEmpty) { + updated = EMPTY; + } else if (parents.length == 1) { + updated = SingletonPredictionContext.create( + parents[0], context.getReturnState(0)); + } else { + ArrayPredictionContext arrayPredictionContext = context; + updated = ArrayPredictionContext( + parents, arrayPredictionContext.returnStates); + } + + contextCache.add(updated); + visited[updated] = updated; + visited[context] = updated; + + return updated; + } + +// // extra structures, but cut/paste/morphed works, so leave it. +// // seems to do a breadth-first walk +// static List getAllNodes(PredictionContext context) { +// Map visited = +// new IdentityHashMap(); +// Deque workList = new ArrayDeque(); +// workList.add(context); +// visited.put(context, context); +// List nodes = new ArrayList(); +// while (!workList.isEmpty) { +// PredictionContext current = workList.pop(); +// nodes.add(current); +// for (int i = 0; i < current.length; i++) { +// PredictionContext parent = current.getParent(i); +// if ( parent!=null && visited.put(parent, parent) == null) { +// workList.push(parent); +// } +// } +// } +// return nodes; +// } + + // ter's recursive version of Sam's getAllNodes() + static List getAllContextNodes(PredictionContext context) { + final nodes = []; + final visited = + {}; + getAllContextNodes_(context, nodes, visited); + return nodes; + } + + static void getAllContextNodes_( + PredictionContext context, + List nodes, + Map visited) { + if (context == null || visited.containsKey(context)) return; + visited[context] = context; + nodes.add(context); + for (var i = 0; i < context.length; i++) { + getAllContextNodes_(context.getParent(i), nodes, visited); + } + } + + // FROM SAM + List toStrings( + Recognizer recognizer, PredictionContext stop, int currentState) { + final result = []; + + outer: + for (var perm = 0;; perm++) { + var offset = 0; + var last = true; + var p = this; + var stateNumber = currentState; + final localBuffer = StringBuffer(); + localBuffer.write('['); + while (!p.isEmpty && p != stop) { + var index = 0; + if (p.length > 0) { + var bits = 1; + while ((1 << bits) < p.length) { + bits++; + } + + final mask = (1 << bits) - 1; + index = (perm >> offset) & mask; + last &= index >= p.length - 1; + if (index >= p.length) { + continue outer; + } + offset += bits; + } + + if (recognizer != null) { + if (localBuffer.length > 1) { + // first char is '[', if more than that this isn't the first rule + localBuffer.write(' '); + } + + final atn = recognizer.getATN(); + final s = atn.states[stateNumber]; + final ruleName = recognizer.ruleNames[s.ruleIndex]; + localBuffer.write(ruleName); + } else if (p.getReturnState(index) != EMPTY_RETURN_STATE) { + if (!p.isEmpty) { + if (localBuffer.length > 1) { + // first char is '[', if more than that this isn't the first rule + localBuffer.write(' '); + } + + localBuffer.write(p.getReturnState(index)); + } + } + stateNumber = p.getReturnState(index); + p = p.getParent(index); + } + localBuffer.write(']'); + result.add(localBuffer.toString()); + + if (last) { + break; + } + } + + return result; + } +} + +class SingletonPredictionContext extends PredictionContext { + final PredictionContext parent; + final int returnState; + + SingletonPredictionContext(this.parent, this.returnState) + : super(parent != null + ? PredictionContext.calculateHashCode([parent], [returnState]) + : PredictionContext.calculateEmptyHashCode()) { + assert(returnState != ATNState.INVALID_STATE_NUMBER); + } + + static SingletonPredictionContext create( + PredictionContext parent, int returnState) { + if (returnState == PredictionContext.EMPTY_RETURN_STATE && parent == null) { + // someone can pass in the bits of an array ctx that mean $ + return PredictionContext.EMPTY; + } + return SingletonPredictionContext(parent, returnState); + } + + @override + int get length { + return 1; + } + + @override + PredictionContext getParent(int index) { + assert(index == 0); + return parent; + } + + @override + int getReturnState(int index) { + assert(index == 0); + return returnState; + } + + @override + bool operator ==(Object o) { + if (identical(this, o)) { + return true; + } else if (o is SingletonPredictionContext) { + if (hashCode != o.hashCode) { + return false; // can't be same if hash is different + } + + final s = o; + return returnState == s.returnState && + (parent != null && parent == s.parent); + } + return false; + } + + @override + String toString() { + final up = parent != null ? parent.toString() : ''; + if (up.isEmpty) { + if (returnState == PredictionContext.EMPTY_RETURN_STATE) { + return r'$'; + } + return returnState.toString(); + } + return '$returnState $up'; + } +} + +class EmptyPredictionContext extends SingletonPredictionContext { + EmptyPredictionContext() : super(null, PredictionContext.EMPTY_RETURN_STATE); + + @override + bool get isEmpty { + return true; + } + + @override + int get length { + return 1; + } + + @override + PredictionContext getParent(int index) { + return null; + } + + @override + int getReturnState(int index) { + return returnState; + } + + @override + String toString() { + return r'$'; + } +} + +class ArrayPredictionContext extends PredictionContext { + /// Parent can be null only if full ctx mode and we make an array + /// from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and + /// returnState == {@link #EMPTY_RETURN_STATE}. + List parents; + + /// Sorted for merge, no duplicates; if present, + /// {@link #EMPTY_RETURN_STATE} is always last. + List returnStates; + + ArrayPredictionContext.of(SingletonPredictionContext a) + : this([a.parent], [a.returnState]); + + ArrayPredictionContext( + List parents, List returnStates) + : super(PredictionContext.calculateHashCode(parents, returnStates)) { + assert(parents != null && parents.isNotEmpty); + assert(returnStates != null && returnStates.isNotEmpty); +// System.err.println("CREATE ARRAY: "+Arrays.toString(parents)+", "+Arrays.toString(returnStates)); + this.parents = parents; + this.returnStates = returnStates; + } + + @override + bool get isEmpty { + // since EMPTY_RETURN_STATE can only appear in the last position, we + // don't need to verify that size==1 + return returnStates[0] == PredictionContext.EMPTY_RETURN_STATE; + } + + @override + int get length { + return returnStates.length; + } + + @override + PredictionContext getParent(int index) { + return parents[index]; + } + + @override + int getReturnState(int index) { + return returnStates[index]; + } + +// int findReturnState(int returnState) { +// return Arrays.binarySearch(returnStates, returnState); +// } + + @override + bool operator ==(Object o) { + if (identical(this, o)) { + return true; + } else if (o is ArrayPredictionContext) { + if (hashCode != o.hashCode) { + return false; // can't be same if hash is different + } + + final a = o; + return ListEquality().equals(returnStates, a.returnStates) && + ListEquality().equals(parents, a.parents); + } + return false; + } + + @override + String toString() { + if (isEmpty) return '[]'; + final buf = StringBuffer(); + buf.write('['); + for (var i = 0; i < returnStates.length; i++) { + if (i > 0) buf.write(', '); + if (returnStates[i] == PredictionContext.EMPTY_RETURN_STATE) { + buf.write(r'$'); + continue; + } + buf.write(returnStates[i]); + if (parents[i] != null) { + buf.write(' '); + buf.write(parents[i].toString()); + } else { + buf.write('null'); + } + } + buf.write(']'); + return buf.toString(); + } +} diff --git a/runtime/Dart/lib/src/recognizer.dart b/runtime/Dart/lib/src/recognizer.dart new file mode 100644 index 000000000..78180b53c --- /dev/null +++ b/runtime/Dart/lib/src/recognizer.dart @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'vocabulary.dart'; +import 'atn/atn.dart'; +import 'error/error.dart'; +import 'input_stream.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_factory.dart'; +import 'util/utils.dart'; + +abstract class Recognizer { + static const EOF = -1; + + static final Map> tokenTypeMapCache = {}; + static final Map, Map> ruleIndexMapCache = {}; + final List _listeners = [ConsoleErrorListener.INSTANCE]; + + /// The ATN interpreter used by the recognizer for prediction. + ATNInterpreter interpreter; + int _stateNumber = -1; + + List get ruleNames; + + /// Get the vocabulary used by the recognizer. + /// + /// @return A [Vocabulary] instance providing information about the + /// vocabulary used by the grammar. + Vocabulary get vocabulary; + + /// Get a map from token names to token types. + /// + ///

Used for XPath and tree pattern compilation.

+ Map get tokenTypeMap { + final _vocabulary = vocabulary; + + var result = tokenTypeMapCache[_vocabulary]; + if (result == null) { + result = {}; + for (var i = 0; i <= getATN().maxTokenType; i++) { + final literalName = _vocabulary.getLiteralName(i); + if (literalName != null) { + result[literalName] = i; + } + + final symbolicName = _vocabulary.getSymbolicName(i); + if (symbolicName != null) { + result[symbolicName] = i; + } + } + + result['EOF'] = Token.EOF; + result = Map.unmodifiable(result); + tokenTypeMapCache[_vocabulary] = result; + } + + return result; + } + + /// Get a map from rule names to rule indexes. + /// + ///

Used for XPath and tree pattern compilation.

+ Map get ruleIndexMap { + final _ruleNames = ruleNames; + if (_ruleNames == null) { + throw UnsupportedError( + 'The current recognizer does not provide a list of rule names.'); + } + + var result = ruleIndexMapCache[_ruleNames]; + if (result == null) { + result = Map.unmodifiable(toMap(_ruleNames)); + ruleIndexMapCache[_ruleNames] = result; + } + + return result; + } + + int getTokenType(String tokenName) { + final ttype = tokenTypeMap[tokenName]; + if (ttype != null) return ttype; + return Token.INVALID_TYPE; + } + + /// If this recognizer was generated, it will have a serialized ATN + /// representation of the grammar. + /// + ///

For interpreters, we don't know their serialized ATN despite having + /// created the interpreter from it.

+ String get serializedATN { + throw UnsupportedError('there is no serialized ATN'); + } + + /// For debugging and other purposes, might want the grammar name. + /// Have ANTLR generate an implementation for this method. + String get grammarFileName; + + /// Get the [ATN] used by the recognizer for prediction. + /// + /// @return The [ATN] used by the recognizer for prediction. + ATN getATN(); + + /// If profiling during the parse/lex, this will return DecisionInfo records + /// for each decision in recognizer in a ParseInfo object. + /// + /// @since 4.3 + ParseInfo get parseInfo { + return null; + } + + /// What is the error header, normally line/character position information? */ + String getErrorHeader(RecognitionException e) { + final line = e.offendingToken.line; + final charPositionInLine = e.offendingToken.charPositionInLine; + return 'line $line:$charPositionInLine'; + } + + /// @exception NullPointerException if [listener] is null. + void addErrorListener(ErrorListener listener) { + if (listener == null) { + throw ArgumentError.notNull('listener'); + } + + _listeners.add(listener); + } + + void removeErrorListener(ErrorListener listener) { + _listeners.remove(listener); + } + + void removeErrorListeners() { + _listeners.clear(); + } + + List get errorListeners { + return _listeners; + } + + ErrorListener get errorListenerDispatch { + return ProxyErrorListener(errorListeners); + } + + // subclass needs to override these if there are sempreds or actions + // that the ATN interp needs to execute + bool sempred(RuleContext _localctx, int ruleIndex, int actionIndex) { + return true; + } + + bool precpred(RuleContext localctx, int precedence) { + return true; + } + + void action(RuleContext _localctx, int ruleIndex, int actionIndex) {} + + int get state { + return _stateNumber; + } + + /// Indicate that the recognizer has changed internal state that is + /// consistent with the ATN state passed in. This way we always know + /// where we are in the ATN as the parser goes along. The rule + /// context objects form a stack that lets us see the stack of + /// invoking rules. Combine this and we have complete ATN + /// configuration information. + set state(int atnState) { +// System.err.println("setState "+atnState); + _stateNumber = atnState; +// if ( traceATNStates ) _ctx.trace(atnState); + } + + IntStream get inputStream; + + set inputStream(IntStream input); + + TokenFactory get tokenFactory; + + set tokenFactory(TokenFactory input); +} diff --git a/runtime/Dart/lib/src/rule_context.dart b/runtime/Dart/lib/src/rule_context.dart new file mode 100644 index 000000000..1932aa059 --- /dev/null +++ b/runtime/Dart/lib/src/rule_context.dart @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'atn/atn.dart'; +import 'interval_set.dart'; +import 'parser.dart'; +import 'parser_rule_context.dart'; +import 'recognizer.dart'; +import 'tree/tree.dart'; + +/// A rule context is a record of a single rule invocation. +/// +/// We form a stack of these context objects using the parent +/// pointer. A parent pointer of null indicates that the current +/// context is the bottom of the stack. The ParserRuleContext subclass +/// as a children list so that we can turn this data structure into a +/// tree. +/// +/// The root node always has a null pointer and invokingState of -1. +/// +/// Upon entry to parsing, the first invoked rule function creates a +/// context object (a subclass specialized for that rule such as +/// SContext) and makes it the root of a parse tree, recorded by field +/// Parser._ctx. +/// +/// public final SContext s() throws RecognitionException { +/// SContext _localctx = new SContext(_ctx, getState()); <-- create new node +/// enterRule(_localctx, 0, RULE_s); <-- push it +/// ... +/// exitRule(); <-- pop back to _localctx +/// return _localctx; +/// } +/// +/// A subsequent rule invocation of r from the start rule s pushes a +/// new context object for r whose parent points at s and use invoking +/// state is the state with r emanating as edge label. +/// +/// The invokingState fields from a context object to the root +/// together form a stack of rule indication states where the root +/// (bottom of the stack) has a -1 sentinel value. If we invoke start +/// symbol s then call r1, which calls r2, the would look like +/// this: +/// +/// SContext[-1] <- root node (bottom of the stack) +/// R1Context[p] <- p in rule s called r1 +/// R2Context[q] <- q in rule r1 called r2 +/// +/// So the top of the stack, _ctx, represents a call to the current +/// rule and it holds the return address from another rule that invoke +/// to this rule. To invoke a rule, we must always have a current context. +/// +/// The parent contexts are useful for computing lookahead sets and +/// getting error information. +/// +/// These objects are used during parsing and prediction. +/// For the special case of parsers, we use the subclass +/// ParserRuleContext. +/// +/// @see ParserRuleContext +abstract class RuleContext extends RuleNode { + /// What context invoked this rule? + @override + RuleContext parent; + + /// What state invoked the rule associated with this context? + /// The "return address" is the followState of invokingState + /// If parent is null, this should be -1. + int invokingState; + + RuleContext({this.parent, this.invokingState}) { + invokingState = invokingState ?? -1; + } + + int depth() { + var n = 0; + var p = this; + while (p != null) { + p = p.parent; + n++; + } + return n; + } + + /// A context is empty if there is no invoking state; meaning nobody call + /// current context. + bool get isEmpty => invokingState == -1; + + /// satisfy the ParseTree / SyntaxTree interface + @override + Interval get sourceInterval => Interval.INVALID; + + @override + RuleContext get ruleContext => this; + + @override + RuleContext get payload => this; + + /// Return the combined text of all child nodes. This method only considers + /// tokens which have been added to the parse tree. + ///

+ /// Since tokens on hidden channels (e.g. whitespace or comments) are not + /// added to the parse trees, they will not appear in the output of this + /// method. + @override + String get text { + if (childCount == 0) { + return ''; + } + + final builder = StringBuffer(); + for (var i = 0; i < childCount; i++) { + builder.write(getChild(i).text); + } + + return builder.toString(); + } + + int get ruleIndex => -1; + + /// For rule associated with this parse tree internal node, return + /// the outer alternative number used to match the input. Default + /// implementation does not compute nor store this alt num. Create + /// a subclass of ParserRuleContext with backing field and set + /// option contextSuperClass. + /// to set it. + int get altNumber => ATN.INVALID_ALT_NUMBER; + + /// Set the outer alternative number for this context node. Default + /// implementation does nothing to avoid backing field overhead for + /// trees that don't need it. Create + /// a subclass of ParserRuleContext with backing field and set + /// option contextSuperClass. + set altNumber(int altNumber) {} + + @override + ParseTree getChild(int i) { + return null; + } + + @override + int get childCount => 0; + + @override + T accept(ParseTreeVisitor visitor) { + return visitor.visitChildren(this); + } + + /// Print out a whole tree, not just a node, in LISP format + /// (root child1 .. childN). Print just a node if this is a leaf. + /// + @override + String toStringTree({List ruleNames, Parser parser}) { + return Trees.toStringTree(this, ruleNames: ruleNames, recog: parser); + } + + @override + String toString( + {List ruleNames, Recognizer recog, RuleContext stop}) { + ruleNames = ruleNames ?? recog?.ruleNames; + final buf = StringBuffer(); + var p = this; + buf.write('['); + while (p != null && p != stop) { + if (ruleNames == null) { + if (!p.isEmpty) { + buf.write(p.invokingState); + } + } else { + final ruleIndex = p.ruleIndex; + final ruleName = ruleIndex >= 0 && ruleIndex < ruleNames.length + ? ruleNames[ruleIndex] + : ruleIndex.toString(); + buf.write(ruleName); + } + + if (p.parent != null && + (ruleNames != null || !p.parent.isEmpty)) { + buf.write(' '); + } + + p = p.parent; + } + + buf.write(']'); + return buf.toString(); + } + + static final EMPTY = ParserRuleContext(); +} diff --git a/runtime/Dart/lib/src/runtime_meta_data.dart b/runtime/Dart/lib/src/runtime_meta_data.dart new file mode 100644 index 000000000..37232e4de --- /dev/null +++ b/runtime/Dart/lib/src/runtime_meta_data.dart @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; +import 'dart:math' as math; + +import 'package:logging/logging.dart'; + +/// This class provides access to the current version of the ANTLR 4 runtime +/// library as compile-time and runtime constants, along with methods for +/// checking for matching version numbers and notifying listeners in the case +/// where a version mismatch is detected. +/// +///

+/// The runtime version information is provided by {@link #VERSION} and +/// {@link #getRuntimeVersion()}. Detailed information about these values is +/// provided in the documentation for each member.

+/// +///

+/// The runtime version check is implemented by {@link #checkVersion}. Detailed +/// information about incorporating this call into user code, as well as its use +/// in generated code, is provided in the documentation for the method.

+/// +///

+/// Version strings x.y and x.y.z are considered "compatible" and no error +/// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are +/// considered "compatible" because the major and minor components x.y +/// are the same in each.

+/// +///

+/// To trap any error messages issued by this code, use System.setErr() +/// in your main() startup code. +///

+/// +/// @since 4.3 +class RuntimeMetaData { + /// A compile-time constant containing the current version of the ANTLR 4 + /// runtime library. + /// + ///

+ /// This compile-time constant value allows generated parsers and other + /// libraries to include a literal reference to the version of the ANTLR 4 + /// runtime library the code was compiled against. At each release, we + /// change this value.

+ /// + ///

Version numbers are assumed to have the form + /// + /// major.minor.patch.revision-suffix, + /// + /// with the individual components defined as follows.

+ /// + ///
    + ///
  • major is a required non-negative integer, and is equal to + /// {@code 4} for ANTLR 4.
  • + ///
  • minor is a required non-negative integer.
  • + ///
  • patch is an optional non-negative integer. When + /// patch is omitted, the {@code .} (dot) appearing before it is + /// also omitted.
  • + ///
  • revision is an optional non-negative integer, and may only + /// be included when patch is also included. When revision + /// is omitted, the {@code .} (dot) appearing before it is also omitted.
  • + ///
  • suffix is an optional string. When suffix is + /// omitted, the {@code -} (hyphen-minus) appearing before it is also + /// omitted.
  • + ///
+ static final String VERSION = '4.8'; + + /// Gets the currently executing version of the ANTLR 4 runtime library. + /// + ///

+ /// This method provides runtime access to the [VERSION] field, as + /// opposed to directly referencing the field as a compile-time constant.

+ /// + /// @return The currently executing version of the ANTLR 4 library + static String get runtimeVersion { + return VERSION; + } + + /// This method provides the ability to detect mismatches between the version + /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a + /// parser was compiled against, and the version of the ANTLR runtime which + /// is currently executing. + /// + ///

+ /// The version check is designed to detect the following two specific + /// scenarios.

+ /// + ///
    + ///
  • The ANTLR Tool version used for code generation does not match the + /// currently executing runtime version.
  • + ///
  • The ANTLR Runtime version referenced at the time a parser was + /// compiled does not match the currently executing runtime version.
  • + ///
+ /// + ///

+ /// Starting with ANTLR 4.3, the code generator emits a call to this method + /// using two constants in each generated lexer and parser: a hard-coded + /// constant indicating the version of the tool used to generate the parser + /// and a reference to the compile-time constant {@link #VERSION}. At + /// runtime, this method is called during the initialization of the generated + /// parser to detect mismatched versions, and notify the registered listeners + /// prior to creating instances of the parser.

+ /// + ///

+ /// This method does not perform any detection or filtering of semantic + /// changes between tool and runtime versions. It simply checks for a + /// version match and emits an error to stderr if a difference + /// is detected.

+ /// + ///

+ /// Note that some breaking changes between releases could result in other + /// types of runtime exceptions, such as a [LinkageError], prior to + /// calling this method. In these cases, the underlying version mismatch will + /// not be reported here. This method is primarily intended to + /// notify users of potential semantic changes between releases that do not + /// result in binary compatibility problems which would be detected by the + /// class loader. As with semantic changes, changes that break binary + /// compatibility between releases are mentioned in the release notes + /// accompanying the affected release.

+ /// + ///

+ /// Additional note for target developers: The version check + /// implemented by this class is designed to address specific compatibility + /// concerns that may arise during the execution of Java applications. Other + /// targets should consider the implementation of this method in the context + /// of that target's known execution environment, which may or may not + /// resemble the design provided for the Java target.

+ /// + /// @param generatingToolVersion The version of the tool used to generate a parser. + /// This value may be null when called from user code that was not generated + /// by, and does not reference, the ANTLR 4 Tool itself. + /// @param compileTimeVersion The version of the runtime the parser was + /// compiled against. This should always be passed using a direct reference + /// to [VERSION]. + static void checkVersion( + String generatingToolVersion, String compileTimeVersion) { + final runtimeVersion = VERSION; + var runtimeConflictsWithGeneratingTool = false; + var runtimeConflictsWithCompileTimeTool = false; + + if (generatingToolVersion != null) { + runtimeConflictsWithGeneratingTool = + !(runtimeVersion == generatingToolVersion) && + !(getMajorMinorVersion(runtimeVersion) == + getMajorMinorVersion(generatingToolVersion)); + } + + runtimeConflictsWithCompileTimeTool = + !(runtimeVersion == compileTimeVersion) && + !(getMajorMinorVersion(runtimeVersion) == + getMajorMinorVersion(compileTimeVersion)); + + if (runtimeConflictsWithGeneratingTool) { + log('ANTLR Tool version $generatingToolVersion used for code generation does not match the current runtime version $runtimeVersion', + level: Level.SEVERE.value); + } + if (runtimeConflictsWithCompileTimeTool) { + log('ANTLR Runtime version $compileTimeVersion used for parser compilation does not match the current runtime version $runtimeVersion', + level: Level.SEVERE.value); + } + } + + /// Gets the major and minor version numbers from a version string. For + /// details about the syntax of the input [version]. + /// E.g., from x.y.z return x.y. + /// + /// @param version The complete version string. + /// @return A string of the form major.minor containing + /// only the major and minor components of the version string. + static String getMajorMinorVersion(String version) { + final firstDot = version.indexOf('.'); + final secondDot = firstDot >= 0 ? version.indexOf('.', firstDot + 1) : -1; + final firstDash = version.indexOf('-'); + var referenceLength = version.length; + if (secondDot >= 0) { + referenceLength = math.min(referenceLength, secondDot); + } + + if (firstDash >= 0) { + referenceLength = math.min(referenceLength, firstDash); + } + + return version.substring(0, referenceLength); + } +} diff --git a/runtime/Dart/lib/src/token.dart b/runtime/Dart/lib/src/token.dart new file mode 100644 index 000000000..49c1b43cb --- /dev/null +++ b/runtime/Dart/lib/src/token.dart @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/pair.dart'; +import 'recognizer.dart'; +import 'token_source.dart'; + +/// A token has properties: text, type, line, character position in the line +/// (so we can ignore tabs), token channel, index, and source from which +/// we obtained this token. +abstract class Token { + static const int INVALID_TYPE = 0; + + /// During lookahead operations, this "token" signifies we hit rule end ATN state + /// and did not follow it despite needing to. + static const int EPSILON = -2; + + static const int MIN_USER_TOKEN_TYPE = 1; + + static const int EOF = IntStream.EOF; + + /// All tokens go to the parser (unless skip() is called in that rule) + /// on a particular "channel". The parser tunes to a particular channel + /// so that whitespace etc... can go to the parser on a "hidden" channel. + static const int DEFAULT_CHANNEL = 0; + + /// Anything on different channel than DEFAULT_CHANNEL is not parsed + /// by parser. + static const int HIDDEN_CHANNEL = 1; + + /// This is the minimum constant value which can be assigned to a + /// user-defined token channel. + /// + ///

+ /// The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are + /// assigned to the predefined channels {@link #DEFAULT_CHANNEL} and + /// {@link #HIDDEN_CHANNEL}.

+ /// + /// @see Token#getChannel() + static const int MIN_USER_CHANNEL_VALUE = 2; + + /// Get the text of the token. + String get text; + + /// Get the token type of the token */ + int get type; + + /// The line number on which the 1st character of this token was matched, + /// line=1..n + int get line; + + /// The index of the first character of this token relative to the + /// beginning of the line at which it occurs, 0..n-1 + int get charPositionInLine; + + /// Return the channel this token. Each token can arrive at the parser + /// on a different channel, but the parser only "tunes" to a single channel. + /// The parser ignores everything not on DEFAULT_CHANNEL. + int get channel; + + /// An index from 0..n-1 of the token object in the input stream. + /// This must be valid in order to print token streams and + /// use TokenRewriteStream. + /// + /// Return -1 to indicate that this token was conjured up since + /// it doesn't have a valid index. + int get tokenIndex; + + /// The starting character index of the token + /// This method is optional; return -1 if not implemented. + int get startIndex; + + /// The last character index of the token. + /// This method is optional; return -1 if not implemented. + int get stopIndex; + + /// Gets the [TokenSource] which created this token. + TokenSource get tokenSource; + + /// Gets the [CharStream] from which this token was derived. + CharStream get inputStream; +} + +abstract class WritableToken extends Token { + set text(String text); + + set type(int ttype); + + set line(int line); + + set charPositionInLine(int pos); + + set channel(int channel); + + set tokenIndex(int index); +} + +class CommonToken extends WritableToken { + /// An empty [Pair] which is used as the default value of + /// {@link #source} for tokens that do not have a source. + static const Pair EMPTY_SOURCE = + Pair(null, null); + + @override + int type; + + @override + int line; + + @override + int charPositionInLine = -1; // set to invalid position + + @override + int channel = Token.DEFAULT_CHANNEL; + + /// These properties share a field to reduce the memory footprint of + /// [CommonToken]. Tokens created by a [CommonTokenFactory] from + /// the same source and input stream share a reference to the same + /// [Pair] containing these values.

+ Pair source; + + /// This is the backing field for {@link #getText} when the token text is + /// explicitly set in the constructor or via {@link #setText}. + /// + /// @see #getText() + String _text; + + @override + int tokenIndex = -1; + + @override + int startIndex; + + @override + int stopIndex; + + /// Constructs a new [CommonToken] with the specified token type and + /// text. + /// + /// @param type The token type. + /// @param text The text of the token. + CommonToken(this.type, + {this.source = EMPTY_SOURCE, + this.channel = Token.DEFAULT_CHANNEL, + this.startIndex, + this.stopIndex, + text}) { + _text = text; + if (source.a != null) { + line = source.a.line; + charPositionInLine = source.a.charPositionInLine; + } + } + + /// Constructs a new [CommonToken] as a copy of another [Token]. + /// + ///

+ /// If [oldToken] is also a [CommonToken] instance, the newly + /// constructed token will share a reference to the {@link #text} field and + /// the [Pair] stored in {@link #source}. Otherwise, {@link #text} will + /// be assigned the result of calling {@link #getText}, and {@link #source} + /// will be constructed from the result of {@link Token#getTokenSource} and + /// {@link Token#getInputStream}.

+ /// + /// @param oldToken The token to copy. + CommonToken.copy(Token oldToken) { + type = oldToken.type; + line = oldToken.line; + tokenIndex = oldToken.tokenIndex; + charPositionInLine = oldToken.charPositionInLine; + channel = oldToken.channel; + startIndex = oldToken.startIndex; + stopIndex = oldToken.stopIndex; + + if (oldToken is CommonToken) { + _text = oldToken.text; + source = oldToken.source; + } else { + _text = oldToken.text; + source = Pair( + oldToken.tokenSource, oldToken.inputStream); + } + } + + @override + String get text { + if (_text != null) { + return _text; + } + + final input = inputStream; + if (input == null) return null; + final n = input.size; + if (startIndex < n && stopIndex < n) { + return input.getText(Interval.of(startIndex, stopIndex)); + } else { + return ''; + } + } + + /// Explicitly set the text for this token. If {code text} is not + /// null, then {@link #getText} will return this value rather than + /// extracting the text from the input. + /// + /// @param text The explicit text of the token, or null if the text + /// should be obtained from the input along with the start and stop indexes + /// of the token. + @override + set text(String text) { + _text = text; + } + + @override + TokenSource get tokenSource { + return source.a; + } + + @override + CharStream get inputStream { + return source.b; + } + + @override + String toString([Recognizer r]) { + var txt = text; + if (txt != null) { + txt = txt + .replaceAll('\n', r'\n') + .replaceAll('\r', r'\r') + .replaceAll('\t', r'\t'); + } else { + txt = ''; + } + return "[@$tokenIndex,$startIndex:$stopIndex='$txt',<$type>" + + (channel > 0 ? ',channel=$channel' : '') + + ',$line:$charPositionInLine]'; + } +} + +/// A [Token] object representing an entire subtree matched by a parser +/// rule; e.g., {@code }. These tokens are created for [TagChunk] +/// chunks where the tag corresponds to a parser rule. +class RuleTagToken implements Token { + /// Gets the name of the rule associated with this rule tag. + /// + /// @return The name of the parser rule associated with this rule tag. + final String ruleName; + + /// The token type for the current token. This is the token type assigned to + /// the bypass alternative for the rule during ATN deserialization. + final int bypassTokenType; + + /// Gets the label associated with the rule tag. + /// + /// @return The name of the label associated with the rule tag, or + /// null if this is an unlabeled rule tag. + final String label; + + /// Constructs a new instance of [RuleTagToken] with the specified rule + /// name, bypass token type, and label. + /// + /// @param ruleName The name of the parser rule this rule tag matches. + /// @param bypassTokenType The bypass token type assigned to the parser rule. + /// @param label The label associated with the rule tag, or null if + /// the rule tag is unlabeled. + /// + /// @exception ArgumentError.value(value) if [ruleName] is null + /// or empty. + RuleTagToken(this.ruleName, this.bypassTokenType, [this.label]) { + if (ruleName == null || ruleName.isEmpty) { + throw ArgumentError.value( + ruleName, 'ruleName', 'cannot be null or empty.'); + } + } + + /// {@inheritDoc} + /// + ///

Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.

+ + @override + int get channel { + return Token.DEFAULT_CHANNEL; + } + + /// {@inheritDoc} + /// + ///

This method returns the rule tag formatted with {@code <} and {@code >} + /// delimiters.

+ + @override + String get text { + if (label != null) { + return '<' + label + ':' + ruleName + '>'; + } + + return '<' + ruleName + '>'; + } + + /// {@inheritDoc} + /// + ///

Rule tag tokens have types assigned according to the rule bypass + /// transitions created during ATN deserialization.

+ + @override + int get type { + return bypassTokenType; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns 0.

+ + @override + int get line { + return 0; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns -1.

+ @override + int get charPositionInLine { + return -1; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns -1.

+ @override + int get tokenIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns -1.

+ @override + int get startIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns -1.

+ + @override + int get stopIndex { + return -1; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns null.

+ + @override + TokenSource get tokenSource { + return null; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] always returns null.

+ + @override + CharStream get inputStream { + return null; + } + + /// {@inheritDoc} + /// + ///

The implementation for [RuleTagToken] returns a string of the form + /// {@code ruleName:bypassTokenType}.

+ + @override + String toString() { + return ruleName + ':$bypassTokenType'; + } +} + +/// A [Token] object representing a token of a particular type; e.g., +/// {@code }. These tokens are created for [TagChunk] chunks where the +/// tag corresponds to a lexer rule or token type. +class TokenTagToken extends CommonToken { + /// Gets the token name. + /// @return The token name. + final String tokenName; + + /// Gets the label associated with the rule tag. + /// + /// @return The name of the label associated with the rule tag, or + /// null if this is an unlabeled rule tag. + final String label; + + /// Constructs a new instance of [TokenTagToken] with the specified + /// token name, type, and label. + /// + /// @param tokenName The token name. + /// @param type The token type. + /// @param label The label associated with the token tag, or null if + /// the token tag is unlabeled. + TokenTagToken(this.tokenName, type, [this.label]) : super(type); + + /// {@inheritDoc} + /// + ///

The implementation for [TokenTagToken] returns the token tag + /// formatted with {@code <} and {@code >} delimiters.

+ + @override + String get text { + if (label != null) { + return '<' + label + ':' + tokenName + '>'; + } + + return '<' + tokenName + '>'; + } + + /// {@inheritDoc} + /// + ///

The implementation for [TokenTagToken] returns a string of the form + /// {@code tokenName:type}.

+ + @override + String toString([recognizer]) { + return tokenName + ':$type'; + } +} diff --git a/runtime/Dart/lib/src/token_factory.dart b/runtime/Dart/lib/src/token_factory.dart new file mode 100644 index 000000000..cbfe820f2 --- /dev/null +++ b/runtime/Dart/lib/src/token_factory.dart @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'misc/pair.dart'; +import 'token.dart'; +import 'token_source.dart'; + +/// The default mechanism for creating tokens. It's used by default in Lexer and +/// the error handling strategy (to create missing tokens). Notifying the parser +/// of a new factory means that it notifies its token source and error strategy. +abstract class TokenFactory { + /// This is the method used to create tokens in the lexer and in the + /// error handling strategy. If text!=null, than the start and stop positions + /// are wiped to -1 in the text override is set in the CommonToken. + Symbol create(int type, String text, + [Pair source, + int channel, + int start, + int stop, + int line, + int charPositionInLine]); +} + +/// This default implementation of [TokenFactory] creates +/// [CommonToken] objects. +class CommonTokenFactory implements TokenFactory { + /// The default [CommonTokenFactory] instance. + /// + ///

+ /// This token factory does not explicitly copy token text when constructing + /// tokens.

+ static final TokenFactory DEFAULT = CommonTokenFactory(); + + /// Indicates whether {@link CommonToken#setText} should be called after + /// constructing tokens to explicitly set the text. This is useful for cases + /// where the input stream might not be able to provide arbitrary substrings + /// of text from the input after the lexer creates a token (e.g. the + /// implementation of {@link CharStream#getText} in + /// [UnbufferedCharStream] throws an + /// [UnsupportedOperationException]). Explicitly setting the token text + /// allows {@link Token#getText} to be called at any time regardless of the + /// input stream implementation. + /// + ///

+ /// The default value is [false] to avoid the performance and memory + /// overhead of copying text for every token unless explicitly requested.

+ final bool copyText; + + /// Constructs a [CommonTokenFactory] with the specified value for + /// {@link #copyText}. + /// + ///

+ /// When [copyText] is [false], the {@link #DEFAULT} instance + /// should be used instead of constructing a new instance.

+ /// + /// @param copyText The value for {@link #copyText}. + CommonTokenFactory([this.copyText = false]); + + @override + CommonToken create(int type, String text, + [Pair source, + int channel, + int start, + int stop, + int line, + int charPositionInLine]) { + if (source == null) { + return CommonToken(type, text: text); + } + + final t = CommonToken(type, + source: source, channel: channel, startIndex: start, stopIndex: stop); + t.line = line; + t.charPositionInLine = charPositionInLine; + if (text != null) { + t.text = text; + } else if (copyText && source.b != null) { + t.text = source.b.getText(Interval.of(start, stop)); + } + + return t; + } +} diff --git a/runtime/Dart/lib/src/token_source.dart b/runtime/Dart/lib/src/token_source.dart new file mode 100644 index 000000000..05cfabe03 --- /dev/null +++ b/runtime/Dart/lib/src/token_source.dart @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import 'input_stream.dart'; +import 'misc/pair.dart'; +import 'token.dart'; +import 'token_factory.dart'; + +/// A source of tokens must provide a sequence of tokens via {@link #nextToken()} +/// and also must reveal it's source of characters; [CommonToken]'s text is +/// computed from a [CharStream]; it only store indices into the char +/// stream. +/// +///

Errors from the lexer are never passed to the parser. Either you want to keep +/// going or you do not upon token recognition error. If you do not want to +/// continue lexing then you do not want to continue parsing. Just throw an +/// exception not under [RecognitionException] and Java will naturally toss +/// you all the way out of the recognizers. If you want to continue lexing then +/// you should not throw an exception to the parser--it has already requested a +/// token. Keep lexing until you get a valid one. Just report errors and keep +/// going, looking for a valid token.

+abstract class TokenSource { + /// Return a [Token] object from your input stream (usually a + /// [CharStream]). Do not fail/return upon lexing error; keep chewing + /// on the characters until you get a good one; errors are not passed through + /// to the parser. + Token nextToken(); + + /// Get the line number for the current position in the input stream. The + /// first line in the input is line 1. + /// + /// @return The line number for the current position in the input stream, or + /// 0 if the current token source does not track line numbers. + int get line; + + /// Get the index into the current line for the current position in the input + /// stream. The first character on a line has position 0. + /// + /// @return The line number for the current position in the input stream, or + /// -1 if the current token source does not track character positions. + int get charPositionInLine; + + /// Get the [CharStream] from which this token source is currently + /// providing tokens. + /// + /// @return The [CharStream] associated with the current position in + /// the input, or null if no input stream is available for the token + /// source. + CharStream get inputStream; + + /// Gets the name of the underlying input source. This method returns a + /// non-null, non-empty string. If such a name is not known, this method + /// returns {@link IntStream#UNKNOWN_SOURCE_NAME}. + String get sourceName; + + /// Set the [TokenFactory] this token source should use for creating + /// [Token] objects from the input. + /// + /// @param factory The [TokenFactory] to use for creating tokens. + set tokenFactory(TokenFactory factory); + + /// Gets the [TokenFactory] this token source is currently using for + /// creating [Token] objects from the input. + /// + /// @return The [TokenFactory] currently used by this token source. + TokenFactory get tokenFactory; +} + +/// Provides an implementation of [TokenSource] as a wrapper around a list +/// of [Token] objects. +/// +///

If the final token in the list is an {@link Token#EOF} token, it will be used +/// as the EOF token for every call to {@link #nextToken} after the end of the +/// list is reached. Otherwise, an EOF token will be created.

+class ListTokenSource implements TokenSource { + /// The wrapped collection of [Token] objects to return. + final List tokens; + + final String _sourceName; + + /// The index into {@link #tokens} of token to return by the next call to + /// {@link #nextToken}. The end of the input is indicated by this value + /// being greater than or equal to the number of items in {@link #tokens}. + int i; + + /// This field caches the EOF token for the token source. + Token eofToken; + + /// This is the backing field for {@link #getTokenFactory} and + /// [setTokenFactory]. + @override + TokenFactory tokenFactory = CommonTokenFactory.DEFAULT; + + /** + * Constructs a new [ListTokenSource] instance from the specified + * collection of [Token] objects. + * + * @param tokens The collection of [Token] objects to provide as a + * [TokenSource]. + * @exception NullPointerException if [tokens] is null + */ + + /// Constructs a new [ListTokenSource] instance from the specified + /// collection of [Token] objects and source name. + /// + /// @param tokens The collection of [Token] objects to provide as a + /// [TokenSource]. + /// @param sourceName The name of the [TokenSource]. If this value is + /// null, {@link #getSourceName} will attempt to infer the name from + /// the next [Token] (or the previous token if the end of the input has + /// been reached). + /// + /// @exception NullPointerException if [tokens] is null + ListTokenSource(this.tokens, [this._sourceName]) { + if (tokens == null) { + throw ArgumentError.notNull('tokens'); + } + } + + /// {@inheritDoc} + + @override + int get charPositionInLine { + if (i < tokens.length) { + return tokens[i].charPositionInLine; + } else if (eofToken != null) { + return eofToken.charPositionInLine; + } else if (tokens.isNotEmpty) { + // have to calculate the result from the line/column of the previous + // token, along with the text of the token. + final lastToken = tokens[tokens.length - 1]; + final tokenText = lastToken.text; + if (tokenText != null) { + final lastNewLine = tokenText.lastIndexOf('\n'); + if (lastNewLine >= 0) { + return tokenText.length - lastNewLine - 1; + } + } + + return lastToken.charPositionInLine + + lastToken.stopIndex - + lastToken.startIndex + + 1; + } + + // only reach this if tokens is empty, meaning EOF occurs at the first + // position in the input + return 0; + } + + /// {@inheritDoc} + + @override + Token nextToken() { + if (i >= tokens.length) { + if (eofToken == null) { + var start = -1; + if (tokens.isNotEmpty) { + final previousStop = tokens[tokens.length - 1].stopIndex; + if (previousStop != -1) { + start = previousStop + 1; + } + } + + final stop = max(-1, start - 1); + eofToken = tokenFactory.create(Token.EOF, 'EOF', Pair(this, inputStream), + Token.DEFAULT_CHANNEL, start, stop, line, charPositionInLine); + } + + return eofToken; + } + + final t = tokens[i]; + if (i == tokens.length - 1 && t.type == Token.EOF) { + eofToken = t; + } + + i++; + return t; + } + + /// {@inheritDoc} + + @override + int get line { + if (i < tokens.length) { + return tokens[i].line; + } else if (eofToken != null) { + return eofToken.line; + } else if (tokens.isNotEmpty) { + // have to calculate the result from the line/column of the previous + // token, along with the text of the token. + final lastToken = tokens[tokens.length - 1]; + var line = lastToken.line; + + final tokenText = lastToken.text; + if (tokenText != null) { + for (var i = 0; i < tokenText.length; i++) { + if (tokenText[i] == '\n') { + line++; + } + } + } + + // if no text is available, assume the token did not contain any newline characters. + return line; + } + + // only reach this if tokens is empty, meaning EOF occurs at the first + // position in the input + return 1; + } + + /// {@inheritDoc} + + @override + CharStream get inputStream { + if (i < tokens.length) { + return tokens[i].inputStream; + } else if (eofToken != null) { + return eofToken.inputStream; + } else if (tokens.isNotEmpty) { + return tokens[tokens.length - 1].inputStream; + } + + // no input stream information is available + return null; + } + + /// The name of the input source. If this value is null, a call to + /// {@link #getSourceName} should return the source name used to create the + /// the next token in {@link #tokens} (or the previous token if the end of + /// the input has been reached). + @override + String get sourceName =>_sourceName ?? inputStream?.sourceName ?? 'List'; +} diff --git a/runtime/Dart/lib/src/token_stream.dart b/runtime/Dart/lib/src/token_stream.dart new file mode 100644 index 000000000..e434b54a7 --- /dev/null +++ b/runtime/Dart/lib/src/token_stream.dart @@ -0,0 +1,627 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +import 'input_stream.dart'; +import 'interval_set.dart'; +import 'lexer.dart'; +import 'rule_context.dart'; +import 'token.dart'; +import 'token_source.dart'; + +/// An [IntStream] whose symbols are [Token] instances. +abstract class TokenStream extends IntStream { + /// Get the [Token] instance associated with the value returned by + /// {@link #LA LA(k)}. This method has the same pre- and post-conditions as + /// {@link IntStream#LA}. In addition, when the preconditions of this method + /// are met, the return value is non-null and the value of + /// {@code LT(k).getType()==LA(k)}. + /// + /// @see IntStream#LA + Token LT(int k); + + /// Gets the [Token] at the specified [index] in the stream. When + /// the preconditions of this method are met, the return value is non-null. + /// + ///

The preconditions for this method are the same as the preconditions of + /// {@link IntStream#seek}. If the behavior of {@code seek(index)} is + /// unspecified for the current state and given [index], then the + /// behavior of this method is also unspecified.

+ /// + ///

The symbol referred to by [index] differs from {@code seek()} only + /// in the case of filtering streams where [index] lies before the end + /// of the stream. Unlike {@code seek()}, this method does not adjust + /// [index] to point to a non-ignored symbol.

+ /// + /// @throws IllegalArgumentException if {code index} is less than 0 + /// @throws UnsupportedOperationException if the stream does not support + /// retrieving the token at the specified index + Token get(int index); + + /// Gets the underlying [TokenSource] which provides tokens for this + /// stream. + TokenSource get tokenSource; + + /// Return the text of all tokens within the specified [interval]. This + /// method behaves like the following code (including potential exceptions + /// for violating preconditions of {@link #get}, but may be optimized by the + /// specific implementation. + /// + ///
+  /// TokenStream stream = ...;
+  /// String text = "";
+  /// for (int i = interval.a; i <= interval.b; i++) {
+  ///   text += stream.get(i).getText();
+  /// }
+  /// 
+ /// + ///
+  /// TokenStream stream = ...;
+  /// String text = stream.getText(new Interval(0, stream.length));
+  /// 
+ /// + ///
+  /// TokenStream stream = ...;
+  /// String text = stream.getText(ctx.getSourceInterval());
+  /// 
+ /// + /// @param interval The interval of tokens within this stream to get text + /// for. + /// @return The text of all tokens / within the specified interval in this + /// stream. + String getText([Interval interval]); + + String get text; + + /// Return the text of all tokens in the source interval of the specified + /// context. This method behaves like the following code, including potential + /// exceptions from the call to {@link #getText(Interval)}, but may be + /// optimized by the specific implementation. + /// + ///

If {@code ctx.getSourceInterval()} does not return a valid interval of + /// tokens provided by this stream, the behavior is unspecified.

+ /// + /// @param ctx The context providing the source interval of tokens to get + /// text for. + /// @return The text of all tokens within the source interval of [ctx]. + String getTextFromCtx(RuleContext ctx); + + /// Return the text of all tokens in this stream between [start] and + /// [stop] (inclusive). + /// + ///

If the specified [start] or [stop] token was not provided by + /// this stream, or if the [stop] occurred before the [start] + /// token, the behavior is unspecified.

+ /// + ///

For streams which ensure that the {@link Token#getTokenIndex} method is + /// accurate for all of its provided tokens, this method behaves like the + /// following code. Other streams may implement this method in other ways + /// provided the behavior is consistent with this at a high level.

+ /// + ///
+  /// TokenStream stream = ...;
+  /// String text = "";
+  /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
+  ///   text += stream.get(i).getText();
+  /// }
+  /// 
+ /// + /// @param start The first token in the interval to get text for. + /// @param stop The last token in the interval to get text for (inclusive). + /// @return The text of all tokens lying between the specified [start] + /// and [stop] tokens. + /// + /// @throws UnsupportedOperationException if this stream does not support + /// this method for the specified tokens + String getTextRange(Token start, Token stop); +} + +/// This implementation of [TokenStream] loads tokens from a +/// [TokenSource] on-demand, and places the tokens in a buffer to provide +/// access to any previous token by index. +/// +///

+/// This token stream ignores the value of {@link Token#getChannel}. If your +/// parser requires the token stream filter tokens to only those on a particular +/// channel, such as {@link Token#DEFAULT_CHANNEL} or +/// {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a +/// [CommonTokenStream].

+class BufferedTokenStream implements TokenStream { + /// The [TokenSource] from which tokens for this stream are fetched. + TokenSource _tokenSource; + + /// A collection of all tokens fetched from the token source. The list is + /// considered a complete view of the input once {@link #fetchedEOF} is set + /// to [true]. + List tokens = []; + + /// The index into [tokens] of the current token (next token to [consume]). + /// [tokens][p] should be [LT(1)]. + /// + ///

This field is set to -1 when the stream is first constructed or when + /// [tokenSource] is set, indicating that the first token has + /// not yet been fetched from the token source. For additional information, + /// see the documentation of [IntStream] for a description of + /// Initializing Methods.

+ int p = -1; + + /// Indicates whether the [Token.EOF] token has been fetched from + /// [tokenSource] and added to [tokens]. This field improves + /// performance for the following cases: + /// + ///
    + ///
  • {@link #consume}: The lookahead check in {@link #consume} to prevent + /// consuming the EOF symbol is optimized by checking the values of + /// {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
  • + ///
  • {@link #fetch}: The check to prevent adding multiple EOF symbols into + /// [{@link #]tokens} is trivial with this field.
  • + ///
      + bool fetchedEOF = false; + + BufferedTokenStream(this._tokenSource) { + if (_tokenSource == null) { + throw ArgumentError.notNull('tokenSource'); + } + } + + @override + int get index => p; + + @override + int mark() { + return 0; + } + + @override + void release(int marker) { + // no resources to release + } + + @override + void seek(int index) { + lazyInit(); + p = adjustSeekIndex(index); + } + + @override + int get size { + return tokens.length; + } + + @override + void consume() { + bool skipEofCheck; + if (p >= 0) { + if (fetchedEOF) { + // the last token in tokens is EOF. skip check if p indexes any + // fetched token except the last. + skipEofCheck = p < tokens.length - 1; + } else { + // no EOF token in tokens. skip check if p indexes a fetched token. + skipEofCheck = p < tokens.length; + } + } else { + // not yet initialized + skipEofCheck = false; + } + + if (!skipEofCheck && LA(1) == IntStream.EOF) { + throw StateError('cannot consume EOF'); + } + + if (sync(p + 1)) { + p = adjustSeekIndex(p + 1); + } + } + + /// Make sure index [i] in tokens has a token. + /// + /// @return [true] if a token is located at index [i], otherwise + /// [false]. + /// @see #get(int i) + bool sync(int i) { + assert(i >= 0); + final n = i - tokens.length + 1; // how many more elements we need? + //System.out.println("sync("+i+") needs "+n); + if (n > 0) { + final fetched = fetch(n); + return fetched >= n; + } + + return true; + } + + /// Add [n] elements to buffer. + /// + /// @return The actual number of elements added to the buffer. + int fetch(int n) { + if (fetchedEOF) { + return 0; + } + + for (var i = 0; i < n; i++) { + final t = tokenSource.nextToken(); + if (t is WritableToken) { + t.tokenIndex = tokens.length; + } + tokens.add(t); + if (t.type == Token.EOF) { + fetchedEOF = true; + return i + 1; + } + } + + return n; + } + + @override + Token get(int i) { + if (i < 0 || i >= tokens.length) { + throw RangeError.index(i, tokens); + } + return tokens[i]; + } + + /// Get all tokens from start..stop inclusively */ + List getRange(int start, [int stop]) { + if (start < 0 || stop < 0) return null; + lazyInit(); + final subset = []; + if (stop >= tokens.length) stop = tokens.length - 1; + for (var i = start; i <= stop; i++) { + final t = tokens[i]; + if (t.type == Token.EOF) break; + subset.add(t); + } + return subset; + } + + @override + int LA(int i) { + return LT(i).type; + } + + Token LB(int k) { + if ((p - k) < 0) return null; + return tokens[p - k]; + } + + @override + Token LT(int k) { + lazyInit(); + if (k == 0) return null; + if (k < 0) return LB(-k); + + final i = p + k - 1; + sync(i); + if (i >= tokens.length) { + // return EOF token + // EOF must be last token + return tokens.last; + } +// if ( i>range ) range = i; + return tokens[i]; + } + + /// Allowed derived classes to modify the behavior of operations which change + /// the current stream position by adjusting the target token index of a seek + /// operation. The default implementation simply returns [i]. If an + /// exception is thrown in this method, the current stream index should not be + /// changed. + /// + ///

      For example, [CommonTokenStream] overrides this method to ensure that + /// the seek target is always an on-channel token.

      + /// + /// @param i The target token index. + /// @return The adjusted target token index. + int adjustSeekIndex(int i) { + return i; + } + + void lazyInit() { + if (p == -1) { + setup(); + } + } + + void setup() { + sync(0); + p = adjustSeekIndex(0); + } + + @override + TokenSource get tokenSource => _tokenSource; + + /// Reset this token stream by setting its token source. */ + set tokenSource(TokenSource tokenSource) { + _tokenSource = tokenSource; + tokens.clear(); + p = -1; + fetchedEOF = false; + } + + /// Given a start and stop index, return a List of all tokens in + /// the token type BitSet. Return null if no tokens were found. This + /// method looks at both on and off channel tokens. + List getTokens( + [int start, int stop, Set types]) { + if (start == null && stop == null) { + return tokens; + } + lazyInit(); + if (start < 0 || start >= tokens.length) { + throw RangeError.index(start, tokens); + } else if (stop < 0 || stop >= tokens.length) { + throw RangeError.index(stop, tokens); + } + if (start > stop) return null; + + // list = tokens[start:stop]:{T t, t.getType() in types} + var filteredTokens = []; + for (var i = start; i <= stop; i++) { + final t = tokens[i]; + if (types == null || types.contains(t.type)) { + filteredTokens.add(t); + } + } + if (filteredTokens.isEmpty) { + filteredTokens = null; + } + return filteredTokens; + } + + /// Given a starting index, return the index of the next token on channel. + /// Return [i] if {@code tokens[i]} is on channel. Return the index of + /// the EOF token if there are no tokens on channel between [i] and + /// EOF. + int nextTokenOnChannel(int i, int channel) { + sync(i); + if (i >= size) { + return size - 1; + } + + var token = tokens[i]; + while (token.channel != channel) { + if (token.type == Token.EOF) { + return i; + } + + i++; + sync(i); + token = tokens[i]; + } + + return i; + } + + /// Given a starting index, return the index of the previous token on + /// channel. Return [i] if {@code tokens[i]} is on channel. Return -1 + /// if there are no tokens on channel between [i] and 0. + /// + ///

      + /// If [i] specifies an index at or after the EOF token, the EOF token + /// index is returned. This is due to the fact that the EOF token is treated + /// as though it were on every channel.

      + int previousTokenOnChannel(int i, int channel) { + sync(i); + if (i >= size) { + // the EOF token is on every channel + return size - 1; + } + + while (i >= 0) { + final token = tokens[i]; + if (token.type == Token.EOF || token.channel == channel) { + return i; + } + + i--; + } + + return i; + } + + /// Collect all tokens on specified channel to the right of + /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or + /// EOF. If channel is -1, find any non default channel token. + List getHiddenTokensToRight(int tokenIndex, [int channel = -1]) { + lazyInit(); + if (tokenIndex < 0 || tokenIndex >= tokens.length) { + throw RangeError.index(tokenIndex, tokens); + } + + final nextOnChannel = + nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL); + // if none onchannel to right, nextOnChannel=-1 so set to = last token + final to = nextOnChannel == -1 ? size - 1 : nextOnChannel; + final from = tokenIndex + 1; + + return filterForChannel(from, to, channel); + } + + /// Collect all tokens on specified channel to the left of + /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. + /// If channel is -1, find any non default channel token. + List getHiddenTokensToLeft(int tokenIndex, [int channel = -1]) { + lazyInit(); + if (tokenIndex < 0 || tokenIndex >= tokens.length) { + throw RangeError.index(tokenIndex, tokens); + } + + if (tokenIndex == 0) { + // obviously no tokens can appear before the first token + return null; + } + + final prevOnChannel = + previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL); + if (prevOnChannel == tokenIndex - 1) return null; + // if none onchannel to left, prevOnChannel=-1 then from=0 + final from = prevOnChannel + 1; + final to = tokenIndex - 1; + + return filterForChannel(from, to, channel); + } + + List filterForChannel(int from, int to, int channel) { + final hidden = []; + for (var i = from; i <= to; i++) { + final t = tokens[i]; + if (channel == -1) { + if (t.channel != Lexer.DEFAULT_TOKEN_CHANNEL) hidden.add(t); + } else { + if (t.channel == channel) hidden.add(t); + } + } + if (hidden.isEmpty) return null; + return hidden; + } + + @override + String get sourceName => tokenSource.sourceName; + + @override + String get text => getText(); + + @override + String getText([Interval interval]) { + interval = interval ?? + Interval.of(0, size - 1); // Get the text of all tokens in this buffer. + final start = interval.a; + var stop = interval.b; + if (start < 0 || stop < 0) return ''; + fill(); + if (stop >= tokens.length) stop = tokens.length - 1; + + final buf = StringBuffer(); + for (var i = start; i <= stop; i++) { + final t = tokens[i]; + if (t.type == Token.EOF) break; + buf.write(t.text); + } + return buf.toString(); + } + + @override + String getTextFromCtx(RuleContext ctx) { + return getText(ctx.sourceInterval); + } + + @override + String getTextRange(Token start, Token stop) { + if (start != null && stop != null) { + return getText(Interval.of(start.tokenIndex, stop.tokenIndex)); + } + + return ''; + } + + /// Get all tokens from lexer until EOF */ + void fill() { + lazyInit(); + final blockSize = 1000; + while (true) { + final fetched = fetch(blockSize); + if (fetched < blockSize) { + return; + } + } + } +} + +/// This class extends [BufferedTokenStream] with functionality to filter +/// token streams to tokens on a particular channel (tokens where +/// {@link Token#getChannel} returns a particular value). +/// +///

      +/// This token stream provides access to all tokens by index or when calling +/// methods like {@link #getText}. The channel filtering is only used for code +/// accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and +/// {@link #LB}.

      +/// +///

      +/// By default, tokens are placed on the default channel +/// ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the +/// {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to +/// call {@link Lexer#setChannel}. +///

      +/// +///

      +/// Note: lexer rules which use the {@code ->skip} lexer command or call +/// {@link Lexer#skip} do not produce tokens at all, so input text matched by +/// such a rule will not be available as part of the token stream, regardless of +/// channel.

      we +class CommonTokenStream extends BufferedTokenStream { + /// Specifies the channel to use for filtering tokens. + /// + ///

      + /// The default value is {@link Token#DEFAULT_CHANNEL}, which matches the + /// default channel assigned to tokens created by the lexer.

      + int channel; + + /// Constructs a new [CommonTokenStream] using the specified token + /// source and filtering tokens to the specified channel. Only tokens whose + /// {@link Token#getChannel} matches [channel] or have the + /// {@link Token#getType} equal to {@link Token#EOF} will be returned by the + /// token stream lookahead methods. + /// + /// @param tokenSource The token source. + /// @param channel The channel to use for filtering tokens. + CommonTokenStream(TokenSource tokenSource, + [this.channel = Token.DEFAULT_CHANNEL]) + : super(tokenSource); + + @override + int adjustSeekIndex(int i) { + return nextTokenOnChannel(i, channel); + } + + @override + Token LB(int k) { + if (k == 0 || (p - k) < 0) return null; + + var i = p; + var n = 1; + // find k good tokens looking backwards + while (n <= k && i > 0) { + // skip off-channel tokens + i = previousTokenOnChannel(i - 1, channel); + n++; + } + if (i < 0) return null; + return tokens[i]; + } + + @override + Token LT(int k) { + //System.out.println("enter LT("+k+")"); + lazyInit(); + if (k == 0) return null; + if (k < 0) return LB(-k); + var i = p; + var n = 1; // we know tokens[p] is a good one + // find k good tokens + while (n < k) { + // skip off-channel tokens, but make sure to not look past EOF + if (sync(i + 1)) { + i = nextTokenOnChannel(i + 1, channel); + } + n++; + } +// if ( i>range ) range = i; + return tokens[i]; + } + + /// Count EOF just once. */ + int get numberOfOnChannelTokens { + var n = 0; + fill(); + for (var i = 0; i < tokens.length; i++) { + final t = tokens[i]; + if (t.channel == channel) n++; + if (t.type == Token.EOF) break; + } + return n; + } +} diff --git a/runtime/Dart/lib/src/tree/src/pattern/chunk.dart b/runtime/Dart/lib/src/tree/src/pattern/chunk.dart new file mode 100644 index 000000000..bf2d3f474 --- /dev/null +++ b/runtime/Dart/lib/src/tree/src/pattern/chunk.dart @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/// A chunk is either a token tag, a rule tag, or a span of literal text within a +/// tree pattern. +/// +///

      The method {@link ParseTreePatternMatcher#split(String)} returns a list of +/// chunks in preparation for creating a token stream by +/// {@link ParseTreePatternMatcher#tokenize(String)}. From there, we get a parse +/// tree from with {@link ParseTreePatternMatcher#compile(String, int)}. These +/// chunks are converted to [RuleTagToken], [TokenTagToken], or the +/// regular tokens of the text surrounding the tags.

      +abstract class Chunk {} + +/// Represents a placeholder tag in a tree pattern. A tag can have any of the +/// following forms. +/// +///
        +///
      • [expr]: An unlabeled placeholder for a parser rule [expr].
      • +///
      • [ID]: An unlabeled placeholder for a token of type [ID].
      • +///
      • {@code e:expr}: A labeled placeholder for a parser rule [expr].
      • +///
      • {@code id:ID}: A labeled placeholder for a token of type [ID].
      • +///
      +/// +/// This class does not perform any validation on the tag or label names aside +/// from ensuring that the tag is a non-null, non-empty string. +class TagChunk extends Chunk { + /// The tag for the chunk. + final String tag; + + /// The label assigned to this chunk, or null if no label is + /// assigned to the chunk. + final String label; + + /// Construct a new instance of [TagChunk] using the specified label + /// and tag. + /// + /// @param label The label for the tag. If this is null, the + /// [TagChunk] represents an unlabeled tag. + /// @param tag The tag, which should be the name of a parser rule or token + /// type. + /// + /// @exception ArgumentError if [tag] is null or empty. + TagChunk(this.tag, {this.label}) { + if (tag == null || tag.isEmpty) { + throw ArgumentError.value(tag, 'tag', 'cannot be null or empty'); + } + } + + /// This method returns a text representation of the tag chunk. Labeled tags + /// are returned in the form {@code label:tag}, and unlabeled tags are + /// returned as just the tag name. + @override + String toString() { + if (label != null) { + return label + ':' + tag; + } + + return tag; + } +} + +/// Represents a span of raw text (concrete syntax) between tags in a tree +/// pattern string. +class TextChunk extends Chunk { + /// The text of the chunk. + final String text; + + /// Constructs a new instance of [TextChunk] with the specified text. + /// + /// @param text The text of this chunk. + /// @exception IllegalArgumentException if [text] is null. + TextChunk(this.text) { + if (text == null) { + throw ArgumentError.notNull('text'); + } + } + + /// {@inheritDoc} + /// + ///

      The implementation for [TextChunk] returns the result of + /// {@link #getText()} in single quotes.

      + @override + String toString() { + return "'" + text + "'"; + } +} diff --git a/runtime/Dart/lib/src/tree/src/pattern/parse_tree_match.dart b/runtime/Dart/lib/src/tree/src/pattern/parse_tree_match.dart new file mode 100644 index 000000000..175517c97 --- /dev/null +++ b/runtime/Dart/lib/src/tree/src/pattern/parse_tree_match.dart @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import '../../../error/error.dart'; +import '../../../input_stream.dart'; +import '../../../lexer.dart'; +import '../../../misc/multi_map.dart'; +import '../../../parser.dart'; +import '../../../parser_interpreter.dart'; +import '../../../parser_rule_context.dart'; +import '../../../token.dart'; +import '../../../token_source.dart'; +import '../../../token_stream.dart'; +import '../../../util/utils.dart'; +import '../tree.dart'; +import 'chunk.dart'; + +/// Represents the result of matching a [ParseTree] against a tree pattern. +class ParseTreeMatch { + /// Get the parse tree we are trying to match to a pattern. + /// + /// @return The [ParseTree] we are trying to match to a pattern. + final ParseTree tree; + + /// Get the tree pattern we are matching against. + /// + /// @return The tree pattern we are matching against. + final ParseTreePattern pattern; + + + /// Return a mapping from label → [list of nodes]. + /// + ///

      The map includes special entries corresponding to the names of rules and + /// tokens referenced in tags in the original pattern. For additional + /// information, see the description of {@link #getAll(String)}.

      + /// + /// @return A mapping from labels to parse tree nodes. If the parse tree + /// pattern did not contain any rule or token tags, this map will be empty. + final MultiMap labels; + + /// Get the node at which we first detected a mismatch. + /// + /// @return the node at which we first detected a mismatch, or null + /// if the match was successful. + final ParseTree mismatchedNode; + + /// Constructs a new instance of [ParseTreeMatch] from the specified + /// parse tree and pattern. + /// + /// @param tree The parse tree to match against the pattern. + /// @param pattern The parse tree pattern. + /// @param labels A mapping from label names to collections of + /// [ParseTree] objects located by the tree pattern matching process. + /// @param mismatchedNode The first node which failed to match the tree + /// pattern during the matching process. + /// + /// @exception ArgumentError.notNull) if [tree] is null + /// @exception ArgumentError.notNull) if [pattern] is null + /// @exception ArgumentError.notNull) if [labels] is null + ParseTreeMatch(this.tree, this.pattern, this.labels, this.mismatchedNode) { + if (tree == null) { + throw ArgumentError.notNull('tree'); + } + + if (pattern == null) { + throw ArgumentError.notNull('pattern'); + } + + if (labels == null) { + throw ArgumentError.notNull('labels'); + } + } + + /// Get the last node associated with a specific [label]. + /// + ///

      For example, for pattern {@code }, {@code get("id")} returns the + /// node matched for that [ID]. If more than one node + /// matched the specified label, only the last is returned. If there is + /// no node associated with the label, this returns null.

      + /// + ///

      Pattern tags like {@code } and {@code } without labels are + /// considered to be labeled with [ID] and [expr], respectively.

      + /// + /// @param label The label to check. + /// + /// @return The last [ParseTree] to match a tag with the specified + /// label, or null if no parse tree matched a tag with the label. + + ParseTree get(String label) { + final parseTrees = labels[label]; + if (parseTrees == null || parseTrees.isEmpty) { + return null; + } + + return parseTrees[parseTrees.length - 1]; // return last if multiple + } + + /// Return all nodes matching a rule or token tag with the specified label. + /// + ///

      If the [label] is the name of a parser rule or token in the + /// grammar, the resulting list will contain both the parse trees matching + /// rule or tags explicitly labeled with the label and the complete set of + /// parse trees matching the labeled and unlabeled tags in the pattern for + /// the parser rule or token. For example, if [label] is {@code "foo"}, + /// the result will contain all of the following.

      + /// + ///
        + ///
      • Parse tree nodes matching tags of the form {@code } and + /// {@code }.
      • + ///
      • Parse tree nodes matching tags of the form {@code }.
      • + ///
      • Parse tree nodes matching tags of the form {@code }.
      • + ///
      + /// + /// @param label The label. + /// + /// @return A collection of all [ParseTree] nodes matching tags with + /// the specified [label]. If no nodes matched the label, an empty list + /// is returned. + + List getAll(String label) { + final nodes = labels[label]; + if (nodes == null) { + return []; + } + + return nodes; + } + + /// Gets a value indicating whether the match operation succeeded. + /// + /// @return [true] if the match operation succeeded; otherwise, + /// [false]. + bool get succeeded => mismatchedNode == null; + + /// {@inheritDoc} + @override + String toString() { + return "Match ${succeeded ? "succeeded" : "failed"}; found ${labels.length} labels"; + } +} + +/// A pattern like {@code = ;} converted to a [ParseTree] by +/// {@link ParseTreePatternMatcher#compile(String, int)}. +class ParseTreePattern { + /// Get the parser rule which serves as the outermost rule for the tree + /// pattern. + /// + /// @return The parser rule which serves as the outermost rule for the tree + /// pattern. + final int patternRuleIndex; + + /// Get the tree pattern in concrete syntax form. + /// + /// @return The tree pattern in concrete syntax form. + final String pattern; + + + /// Get the tree pattern as a [ParseTree]. The rule and token tags from + /// the pattern are present in the parse tree as terminal nodes with a symbol + /// of type [RuleTagToken] or [TokenTagToken]. + /// + /// @return The tree pattern as a [ParseTree]. + final ParseTree patternTree; + + /// Get the [ParseTreePatternMatcher] which created this tree pattern. + /// + /// @return The [ParseTreePatternMatcher] which created this tree + /// pattern. + final ParseTreePatternMatcher matcher; + + /// Construct a new instance of the [ParseTreePattern] class. + /// + /// @param matcher The [ParseTreePatternMatcher] which created this + /// tree pattern. + /// @param pattern The tree pattern in concrete syntax form. + /// @param patternRuleIndex The parser rule which serves as the root of the + /// tree pattern. + /// @param patternTree The tree pattern in [ParseTree] form. + ParseTreePattern( + this.matcher, this.pattern, this.patternRuleIndex, this.patternTree); + + /// Match a specific parse tree against this tree pattern. + /// + /// @param tree The parse tree to match against this tree pattern. + /// @return A [ParseTreeMatch] object describing the result of the + /// match operation. The {@link ParseTreeMatch#succeeded()} method can be + /// used to determine whether or not the match was successful. + + ParseTreeMatch match(ParseTree tree) { + return matcher.match(tree, pattern: this); + } + + /// Determine whether or not a parse tree matches this tree pattern. + /// + /// @param tree The parse tree to match against this tree pattern. + /// @return [true] if [tree] is a match for the current tree + /// pattern; otherwise, [false]. + bool matches(ParseTree tree) { + return matcher.match(tree, pattern: this).succeeded; + } +} + +/// A tree pattern matching mechanism for ANTLR [ParseTree]s. +/// +///

      Patterns are strings of source input text with special tags representing +/// token or rule references such as:

      +/// +///

      {@code = ;}

      +/// +///

      Given a pattern start rule such as [statement], this object constructs +/// a [ParseTree] with placeholders for the [ID] and [expr] +/// subtree. Then the {@link #match} routines can compare an actual +/// [ParseTree] from a parse with this pattern. Tag {@code } matches +/// any [ID] token and tag {@code } references the result of the +/// [expr] rule (generally an instance of [ExprContext].

      +/// +///

      Pattern {@code x = 0;} is a similar pattern that matches the same pattern +/// except that it requires the identifier to be [x] and the expression to +/// be {@code 0}.

      +/// +///

      The {@link #matches} routines return [true] or [false] based +/// upon a match for the tree rooted at the parameter sent in. The +/// {@link #match} routines return a [ParseTreeMatch] object that +/// contains the parse tree, the parse tree pattern, and a map from tag name to +/// matched nodes (more below). A subtree that fails to match, returns with +/// {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not +/// match.

      +/// +///

      For efficiency, you can compile a tree pattern in string form to a +/// [ParseTreePattern] object.

      +/// +///

      See [TestParseTreeMatcher] for lots of examples. +/// [ParseTreePattern] has two static helper methods: +/// {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that +/// are easy to use but not super efficient because they create new +/// [ParseTreePatternMatcher] objects each time and have to compile the +/// pattern in string form before using it.

      +/// +///

      The lexer and parser that you pass into the [ParseTreePatternMatcher] +/// constructor are used to parse the pattern in string form. The lexer converts +/// the {@code = ;} into a sequence of four tokens (assuming lexer +/// throws out whitespace or puts it on a hidden channel). Be aware that the +/// input stream is reset for the lexer (but not the parser; a +/// [ParserInterpreter] is created to parse the input.). Any user-defined +/// fields you have put into the lexer might get changed when this mechanism asks +/// it to scan the pattern string.

      +/// +///

      Normally a parser does not accept token {@code } as a valid +/// [expr] but, from the parser passed in, we create a special version of +/// the underlying grammar representation (an [ATN]) that allows imaginary +/// tokens representing rules ({@code }) to match entire rules. We call +/// these bypass alternatives.

      +/// +///

      Delimiters are {@code <} and {@code >}, with {@code \} as the escape string +/// by default, but you can set them to whatever you want using +/// {@link #setDelimiters}. You must escape both start and stop strings +/// {@code \<} and {@code \>}.

      +class ParseTreePatternMatcher { + /// Used to convert the tree pattern string into a series of tokens. The + /// input stream is reset. + final Lexer lexer; + + /// Used to collect to the grammar file name, token names, rule names for + /// used to parse the pattern into a parse tree. + final Parser parser; + + String start = '<'; + String stop = '>'; + String escape = '\\'; // e.g., \< and \> must escape BOTH! + + /// Constructs a [ParseTreePatternMatcher] or from a [Lexer] and + /// [Parser] object. The lexer input stream is altered for tokenizing + /// the tree patterns. The parser is used as a convenient mechanism to get + /// the grammar name, plus token, rule names. + ParseTreePatternMatcher(this.lexer, this.parser); + + /// Set the delimiters used for marking rule and token tags within concrete + /// syntax used by the tree pattern parser. + /// + /// @param start The start delimiter. + /// @param stop The stop delimiter. + /// @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. + /// + /// @exception ArgumentError if [start] is null or empty. + /// @exception ArgumentError if [stop] is null or empty. + void setDelimiters(String start, String stop, String escapeLeft) { + if (start == null || start.isEmpty) { + throw ArgumentError.value(start, 'start', 'cannot be null or empty'); + } + + if (stop == null || stop.isEmpty) { + throw ArgumentError.value(stop, 'stop', 'cannot be null or empty'); + } + + this.start = start; + this.stop = stop; + escape = escapeLeft; + } + + /// Does [pattern] matched as rule patternRuleIndex match tree? Pass in a + /// compiled pattern instead of a string representation of a tree pattern. + bool matches(ParseTree tree, + {ParseTreePattern pattern, String patternStr, int patternRuleIndex}) { + pattern ??= compile(patternStr, patternRuleIndex); + + final labels = MultiMap(); + final mismatchedNode = + matchImpl(tree, pattern.patternTree, labels); + return mismatchedNode == null; + } + + /// Compare [pattern] matched against [tree] and return a + /// [ParseTreeMatch] object that contains the matched elements, or the + /// node at which the match failed. Pass in a compiled pattern instead of a + /// string representation of a tree pattern. + + ParseTreeMatch match(ParseTree tree, + {ParseTreePattern pattern, String patternStr, int patternRuleIndex}) { + pattern ??= compile(patternStr, patternRuleIndex); + + final labels = MultiMap(); + final mismatchedNode = + matchImpl(tree, pattern.patternTree, labels); + return ParseTreeMatch(tree, pattern, labels, mismatchedNode); + } + + /// For repeated use of a tree pattern, compile it to a + /// [ParseTreePattern] using this method. + ParseTreePattern compile(String pattern, int patternRuleIndex) { + final tokenList = tokenize(pattern); + final tokenSrc = ListTokenSource(tokenList); + final tokens = CommonTokenStream(tokenSrc); + + final parserInterp = ParserInterpreter( + parser.grammarFileName, + parser.vocabulary, + parser.ruleNames, + parser.ATNWithBypassAlts, + tokens); + + ParseTree tree; + try { + parserInterp.errorHandler = BailErrorStrategy(); + tree = parserInterp.parse(patternRuleIndex); +// System.out.println("pattern tree = "+tree.toStringTree(parserInterp)); + } on ParseCancellationException { + rethrow; + } on RecognitionException { + rethrow; + } catch (e) { + throw CannotInvokeStartRule(e); + } + + // Make sure tree pattern compilation checks for a complete parse + if (tokens.LA(1) != Token.EOF) { + throw StartRuleDoesNotConsumeFullPattern(); + } + + return ParseTreePattern(this, pattern, patternRuleIndex, tree); + } + + // ---- SUPPORT CODE ---- + + /// Recursively walk [tree] against [patternTree], filling + /// {@code match.}{@link ParseTreeMatch#labels labels}. + /// + /// @return the first node encountered in [tree] which does not match + /// a corresponding node in [patternTree], or null if the match + /// was successful. The specific node returned depends on the matching + /// algorithm used by the implementation, and may be overridden. + + ParseTree matchImpl(ParseTree tree, ParseTree patternTree, + MultiMap labels) { + if (tree == null) { + throw ArgumentError('tree cannot be null'); + } + + if (patternTree == null) { + throw ArgumentError('patternTree cannot be null'); + } + + // x and , x and y, or x and x; or could be mismatched types + if (tree is TerminalNode && patternTree is TerminalNode) { + final t1 = tree; + final t2 = patternTree; + ParseTree mismatchedNode; + // both are tokens and they have same type + if (t1.symbol.type == t2.symbol.type) { + if (t2.symbol is TokenTagToken) { + // x and + TokenTagToken tokenTagToken = t2.symbol; + // track label->list-of-nodes for both token name and label (if any) + labels.put(tokenTagToken.tokenName, tree); + if (tokenTagToken.label != null) { + labels.put(tokenTagToken.label, tree); + } + } else if (t1.text == t2.text) { + // x and x + } else { + // x and y + mismatchedNode ??= t1; + } + } else { + mismatchedNode ??= t1; + } + + return mismatchedNode; + } + + if (tree is ParserRuleContext && patternTree is ParserRuleContext) { + final r1 = tree; + final r2 = patternTree; + ParseTree mismatchedNode; + // (expr ...) and + final ruleTagToken = getRuleTagToken(r2); + if (ruleTagToken != null) { + if (r1.ruleContext.ruleIndex == r2.ruleContext.ruleIndex) { + // track label->list-of-nodes for both rule name and label (if any) + labels.put(ruleTagToken.ruleName, tree); + if (ruleTagToken.label != null) { + labels.put(ruleTagToken.label, tree); + } + } else { + mismatchedNode ??= r1; + } + + return mismatchedNode; + } + + // (expr ...) and (expr ...) + if (r1.childCount != r2.childCount) { + mismatchedNode ??= r1; + + return mismatchedNode; + } + + final n = r1.childCount; + for (var i = 0; i < n; i++) { + final childMatch = + matchImpl(r1.getChild(i), patternTree.getChild(i), labels); + if (childMatch != null) { + return childMatch; + } + } + + return mismatchedNode; + } + + // if nodes aren't both tokens or both rule nodes, can't match + return tree; + } + + /// Is [t] {@code (expr )} subtree? */ + RuleTagToken getRuleTagToken(ParseTree t) { + if (t is RuleNode) { + final r = t; + if (r.childCount == 1 && r.getChild(0) is TerminalNode) { + TerminalNode c = r.getChild(0); + if (c.symbol is RuleTagToken) { +// System.out.println("rule tag subtree "+t.toStringTree(parser)); + return c.symbol; + } + } + } + return null; + } + + List tokenize(String pattern) { + // split pattern into chunks: sea (raw input) and islands (, ) + final chunks = split(pattern); + + // create token stream from text and tags + final tokens = []; + for (var chunk in chunks) { + if (chunk is TagChunk) { + final tagChunk = chunk; + // add special rule token or conjure up new token from name + if (isUpperCase(tagChunk.tag[0])) { + final ttype = parser.getTokenType(tagChunk.tag); + if (ttype == Token.INVALID_TYPE) { + throw ArgumentError('Unknown token ' + + tagChunk.tag + + ' in pattern: ' + + pattern); + } + final t = + TokenTagToken(tagChunk.tag, ttype, tagChunk.label); + tokens.add(t); + } else if (isLowerCase(tagChunk.tag[0])) { + final ruleIndex = parser.getRuleIndex(tagChunk.tag); + if (ruleIndex == -1) { + throw ArgumentError('Unknown rule ' + + tagChunk.tag + + ' in pattern: ' + + pattern); + } + final ruleImaginaryTokenType = + parser.ATNWithBypassAlts.ruleToTokenType[ruleIndex]; + tokens.add(RuleTagToken( + tagChunk.tag, ruleImaginaryTokenType, tagChunk.label)); + } else { + throw ArgumentError( + 'invalid tag: ' + tagChunk.tag + ' in pattern: ' + pattern); + } + } else { + TextChunk textChunk = chunk; + final inputStream = + InputStream.fromString(textChunk.text); + lexer.inputStream = inputStream; + var t = lexer.nextToken(); + while (t.type != Token.EOF) { + tokens.add(t); + t = lexer.nextToken(); + } + } + } + +// System.out.println("tokens="+tokens); + return tokens; + } + + /// Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}. */ + List split(String pattern) { + var p = 0; + final n = pattern.length; + final chunks = []; + // find all start and stop indexes first, then collect + final starts = []; + final stops = []; + while (p < n) { + if (p == pattern.indexOf(escape + start, p)) { + p += escape.length + start.length; + } else if (p == pattern.indexOf(escape + stop, p)) { + p += escape.length + stop.length; + } else if (p == pattern.indexOf(start, p)) { + starts.add(p); + p += start.length; + } else if (p == pattern.indexOf(stop, p)) { + stops.add(p); + p += stop.length; + } else { + p++; + } + } + +// System.out.println(""); +// System.out.println(starts); +// System.out.println(stops); + if (starts.length > stops.length) { + throw ArgumentError('unterminated tag in pattern: ' + pattern); + } + + if (starts.length < stops.length) { + throw ArgumentError('missing start tag in pattern: ' + pattern); + } + + final ntags = starts.length; + for (var i = 0; i < ntags; i++) { + if (starts[i] >= stops[i]) { + throw ArgumentError( + 'tag delimiters out of order in pattern: ' + pattern); + } + } + + // collect into chunks now + if (ntags == 0) { + final text = pattern.substring(0, n); + chunks.add(TextChunk(text)); + } + + if (ntags > 0 && starts[0] > 0) { + // copy text up to first tag into chunks + final text = pattern.substring(0, starts[0]); + chunks.add(TextChunk(text)); + } + for (var i = 0; i < ntags; i++) { + // copy inside of + final tag = pattern.substring(starts[i] + start.length, stops[i]); + var ruleOrToken = tag; + String label; + final colon = tag.indexOf(':'); + if (colon >= 0) { + label = tag.substring(0, colon); + ruleOrToken = tag.substring(colon + 1, tag.length); + } + chunks.add(TagChunk(ruleOrToken, label: label)); + if (i + 1 < ntags) { + // copy from end of to start of next + final text = pattern.substring(stops[i] + stop.length, starts[i + 1]); + chunks.add(TextChunk(text)); + } + } + if (ntags > 0) { + final afterLastTag = stops[ntags - 1] + stop.length; + if (afterLastTag < n) { + // copy text from end of last tag to end + final text = pattern.substring(afterLastTag, n); + chunks.add(TextChunk(text)); + } + } + + // strip out the escape sequences from text chunks but not tags + for (var i = 0; i < chunks.length; i++) { + final c = chunks[i]; + if (c is TextChunk) { + final tc = c; + final unescaped = tc.text.replaceAll(escape, ''); + if (unescaped.length < tc.text.length) { + chunks[i] = TextChunk(unescaped); + } + } + } + + return chunks; + } +} + +class CannotInvokeStartRule extends StateError { + CannotInvokeStartRule(String message) : super(message); +} + +// Fixes https://github.com/antlr/antlr4/issues/413 +// "Tree pattern compilation doesn't check for a complete parse" +class StartRuleDoesNotConsumeFullPattern extends Error {} + +/// This exception is thrown to cancel a parsing operation. This exception does +/// not extend [RecognitionException], allowing it to bypass the standard +/// error recovery mechanisms. [BailErrorStrategy] throws this exception in +/// response to a parse error. +class ParseCancellationException extends StateError { + ParseCancellationException(String message) : super(message); +} diff --git a/runtime/Dart/lib/src/tree/src/tree.dart b/runtime/Dart/lib/src/tree/src/tree.dart new file mode 100644 index 000000000..f2eb32313 --- /dev/null +++ b/runtime/Dart/lib/src/tree/src/tree.dart @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:developer'; + +import '../../interval_set.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; + +/// The basic notion of a tree has a parent, a payload, and a list of children. +/// It is the most abstract interface for all the trees used by ANTLR. +abstract class Tree { + Tree get parent; + + dynamic get payload; + + Tree getChild(int i); + +// Tree getChild(int i); + + int get childCount; + + String toStringTree(); +} + +abstract class SyntaxTree extends Tree { + /// Return an [Interval] indicating the index in the + /// [TokenStream] of the first and last token associated with this + /// subtree. If this node is a leaf, then the interval represents a single + /// token and has interval i..i for token index i. + /// + ///

      An interval of i..i-1 indicates an empty interval at position + /// i in the input stream, where 0 <= i <= the size of the input + /// token stream. Currently, the code base can only have i=0..n-1 but + /// in concept one could have an empty interval after EOF.

      + /// + ///

      If source interval is unknown, this returns {@link Interval#INVALID}.

      + /// + ///

      As a weird special case, the source interval for rules matched after + /// EOF is unspecified.

      + Interval get sourceInterval; +} + +abstract class ParseTree extends SyntaxTree { + // the following methods narrow the return type; they are not additional methods + @override + ParseTree get parent; + + @override + ParseTree getChild(int i); + + /// Set the parent for this node. + /// + /// This is not backward compatible as it changes + /// the interface but no one was able to create custom + /// nodes anyway so I'm adding as it improves internal + /// code quality. + /// + /// One could argue for a restructuring of + /// the class/interface hierarchy so that + /// setParent, addChild are moved up to Tree + /// but that's a major change. So I'll do the + /// minimal change, which is to add this method. + /// + /// @since 4.7 + set parent(RuleContext parent); + + /// The [ParseTreeVisitor] needs a double dispatch method. */ + T accept(ParseTreeVisitor visitor); + + /// Return the combined text of all leaf nodes. Does not get any + /// off-channel tokens (if any) so won't return whitespace and + /// comments if they are sent to parser on hidden channel. + String get text; + + /// Specialize toStringTree so that it can print out more information + /// based upon the parser. + @override + String toStringTree({Parser parser}); +} + +abstract class RuleNode extends ParseTree { + RuleContext get ruleContext; +} + +abstract class TerminalNode extends ParseTree { + Token get symbol; +} + +abstract class ErrorNode extends TerminalNode {} + +abstract class ParseTreeVisitor { + /// {@inheritDoc} + /// + ///

      The default implementation calls {@link ParseTree#accept} on the + /// specified tree.

      + T visit(ParseTree tree) { + return tree.accept(this); + } + + /// {@inheritDoc} + /// + ///

      The default implementation initializes the aggregate result to + /// {@link #defaultResult defaultResult()}. Before visiting each child, it + /// calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result + /// is [false] no more children are visited and the current aggregate + /// result is returned. After visiting a child, the aggregate result is + /// updated by calling {@link #aggregateResult aggregateResult} with the + /// previous aggregate result and the result of visiting the child.

      + /// + ///

      The default implementation is not safe for use in visitors that modify + /// the tree structure. Visitors that modify the tree should override this + /// method to behave properly in respect to the specific algorithm in use.

      + T visitChildren(RuleNode node) { + var result = defaultResult(); + final n = node.childCount; + for (var i = 0; i < n; i++) { + if (!shouldVisitNextChild(node, result)) { + break; + } + + final c = node.getChild(i); + final childResult = c.accept(this); + result = aggregateResult(result, childResult); + } + + return result; + } + + /// {@inheritDoc} + /// + ///

      The default implementation returns the result of + /// {@link #defaultResult defaultResult}.

      + + T visitTerminal(TerminalNode node) { + return defaultResult(); + } + + /// {@inheritDoc} + /// + ///

      The default implementation returns the result of + /// {@link #defaultResult defaultResult}.

      + + T visitErrorNode(ErrorNode node) { + return defaultResult(); + } + + /// Gets the default value returned by visitor methods. This value is + /// returned by the default implementations of + /// {@link #visitTerminal visitTerminal}, {@link #visitErrorNode visitErrorNode}. + /// The default implementation of {@link #visitChildren visitChildren} + /// initializes its aggregate result to this value. + /// + ///

      The base implementation returns null.

      + /// + /// @return The default value returned by visitor methods. + T defaultResult() { + return null; + } + + /// Aggregates the results of visiting multiple children of a node. After + /// either all children are visited or {@link #shouldVisitNextChild} returns + /// [false], the aggregate value is returned as the result of + /// {@link #visitChildren}. + /// + ///

      The default implementation returns [nextResult], meaning + /// {@link #visitChildren} will return the result of the last child visited + /// (or return the initial value if the node has no children).

      + /// + /// @param aggregate The previous aggregate value. In the default + /// implementation, the aggregate value is initialized to + /// {@link #defaultResult}, which is passed as the [aggregate] argument + /// to this method after the first child node is visited. + /// @param nextResult The result of the immediately preceeding call to visit + /// a child node. + /// + /// @return The updated aggregate result. + T aggregateResult(T aggregate, T nextResult) => nextResult; + + /// This method is called after visiting each child in + /// {@link #visitChildren}. This method is first called before the first + /// child is visited; at that point [currentResult] will be the initial + /// value (in the default implementation, the initial value is returned by a + /// call to {@link #defaultResult}. This method is not called after the last + /// child is visited. + /// + ///

      The default implementation always returns [true], indicating that + /// [visitChildren] should only return after all children are visited. + /// One reason to override this method is to provide a "short circuit" + /// evaluation option for situations where the result of visiting a single + /// child has the potential to determine the result of the visit operation as + /// a whole.

      + /// + /// @param node The [RuleNode] whose children are currently being + /// visited. + /// @param currentResult The current aggregate result of the children visited + /// to the current point. + /// + /// @return [true] to continue visiting children. Otherwise return + /// [false] to stop visiting children and immediately return the + /// current aggregate result from {@link #visitChildren}. + bool shouldVisitNextChild(RuleNode node, T currentResult) => true; +} + +abstract class ParseTreeListener { + void visitTerminal(TerminalNode node); + + void visitErrorNode(ErrorNode node); + + void enterEveryRule(ParserRuleContext node); + + void exitEveryRule(ParserRuleContext node); +} + +class TraceListener implements ParseTreeListener { + final Parser parser; + + TraceListener(this.parser); + + @override + void enterEveryRule(ParserRuleContext ctx) { + log('enter ' + + parser.ruleNames[ctx.ruleIndex] + + ', LT(1)=${parser.inputStream.LT(1).text}'); + } + + @override + void visitTerminal(TerminalNode node) { + log('consume ${node.symbol} rule ' + + parser.ruleNames[parser.context.ruleIndex]); + } + + @override + void visitErrorNode(ErrorNode node) {} + + @override + void exitEveryRule(ParserRuleContext ctx) { + log('exit ${parser.ruleNames[ctx.ruleIndex]}' ', LT(1)=' + + parser.inputStream.LT(1).text); + } +} + +class TrimToSizeListener implements ParseTreeListener { + static final TrimToSizeListener INSTANCE = TrimToSizeListener(); + + @override + void enterEveryRule(ParserRuleContext ctx) {} + + @override + void visitTerminal(TerminalNode node) {} + + @override + void visitErrorNode(ErrorNode node) {} + + @override + void exitEveryRule(ParserRuleContext ctx) { + // TODO trim dart List's size +// if (ctx.children is List) { +// (ctx.children).trimToSize(); +// } + } +} + +class TerminalNodeImpl extends TerminalNode { + @override + Token symbol; + @override + ParseTree parent; + + TerminalNodeImpl(this.symbol); + + @override + ParseTree getChild(i) { + return null; + } + + @override + Token get payload => symbol; + + @override + Interval get sourceInterval { + if (symbol == null) return Interval.INVALID; + + final tokenIndex = symbol.tokenIndex; + return Interval(tokenIndex, tokenIndex); + } + + @override + int get childCount { + return 0; + } + + @override + T accept(ParseTreeVisitor visitor) { + return visitor.visitTerminal(this); + } + + @override + String get text { + return symbol.text; + } + + @override + String toStringTree({Parser parser}) { + return toString(); + } + + @override + String toString() { + if (symbol.type == Token.EOF) return ''; + return symbol.text; + } +} + +/// Represents a token that was consumed during resynchronization +/// rather than during a valid match operation. For example, +/// we will create this kind of a node during single token insertion +/// and deletion as well as during "consume until error recovery set" +/// upon no viable alternative exceptions. +class ErrorNodeImpl extends TerminalNodeImpl implements ErrorNode { + ErrorNodeImpl(token) : super(token); + + bool isErrorNode() => true; + + @override + T accept(ParseTreeVisitor visitor) { + return visitor.visitErrorNode(this); + } +} + +class ParseTreeWalker { + void walk(ParseTreeListener listener, ParseTree t) { + if (t is ErrorNode) { + listener.visitErrorNode(t); + return; + } else if (t is TerminalNode) { + listener.visitTerminal(t); + return; + } + RuleNode r = t; + enterRule(listener, r); + for (var i = 0; i < r.childCount; i++) { + walk(listener, r.getChild(i)); + } + exitRule(listener, r); + } + + /// The discovery of a rule node, involves sending two events: the generic + /// {@link ParseTreeListener#enterEveryRule} and a + /// [RuleContext]-specific event. First we trigger the generic and then + /// the rule specific. We to them in reverse order upon finishing the node. + void enterRule(ParseTreeListener listener, RuleNode r) { + ParserRuleContext ctx = r.ruleContext; + listener.enterEveryRule(ctx); + ctx.enterRule(listener); + } + + void exitRule(ParseTreeListener listener, RuleNode r) { + ParserRuleContext ctx = r.ruleContext; + ctx.exitRule(listener); + listener.exitEveryRule(ctx); + } + + static final DEFAULT = ParseTreeWalker(); +} diff --git a/runtime/Dart/lib/src/tree/src/trees.dart b/runtime/Dart/lib/src/tree/src/trees.dart new file mode 100644 index 000000000..80a447238 --- /dev/null +++ b/runtime/Dart/lib/src/tree/src/trees.dart @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:core'; + +import '../../atn/atn.dart'; +import '../../parser.dart'; +import '../../parser_rule_context.dart'; +import '../../rule_context.dart'; +import '../../token.dart'; +import '../../util/utils.dart'; +import 'tree.dart'; + +/// A set of utility routines useful for all kinds of ANTLR trees. */ +class Trees { + /// Print out a whole tree in LISP form. {@link #getNodeText} is used on the + /// node payloads to get the text for the nodes. Detect + /// parse trees and extract data appropriately. + static String toStringTree(Tree t, {Parser recog, List ruleNames}) { + ruleNames ??= recog?.ruleNames; + var s = escapeWhitespace(getNodeText(t, ruleNames: ruleNames), false); + if (t.childCount == 0) return s; + final buf = StringBuffer(); + buf.write('('); + s = escapeWhitespace(getNodeText(t, ruleNames: ruleNames), false); + buf.write(s); + buf.write(' '); + for (var i = 0; i < t.childCount; i++) { + if (i > 0) buf.write(' '); + buf.write(toStringTree(t.getChild(i), ruleNames: ruleNames)); + } + buf.write(')'); + return buf.toString(); + } + + static String getNodeText(Tree t, {Parser recog, List ruleNames}) { + ruleNames ??= recog?.ruleNames; + if (ruleNames != null) { + if (t is RuleContext) { + final ruleIndex = t.ruleContext.ruleIndex; + final ruleName = ruleNames[ruleIndex]; + final altNumber = t.altNumber; + if (altNumber != ATN.INVALID_ALT_NUMBER) { + return ruleName + ':$altNumber'; + } + return ruleName; + } else if (t is ErrorNode) { + return t.toString(); + } else if (t is TerminalNode) { + final symbol = (t).symbol; + if (symbol != null) { + final s = symbol.text; + return s; + } + } + } + // no recog for rule names + Object payload = t.payload; + if (payload is Token) { + return payload.text; + } + return t.payload.toString(); + } + + /// Return ordered list of all children of this node */ + static List getChildren(Tree t) { + final kids = []; + for (var i = 0; i < t.childCount; i++) { + kids.add(t.getChild(i)); + } + return kids; + } + + /// Return a list of all ancestors of this node. The first node of + /// list is the root and the last is the parent of this node. + /// + /// @since 4.5.1 + static List getAncestors(Tree t) { + if (t.parent == null) return []; + final ancestors = []; + t = t.parent; + while (t != null) { + ancestors.insert(0, t); // insert at start + t = t.parent; + } + return ancestors; + } + + /// Return true if t is u's parent or a node on path to root from u. + /// Use == not equals(). + /// + /// @since 4.5.1 + static bool isAncestorOf(Tree t, Tree u) { + if (t == null || u == null || t.parent == null) return false; + var p = u.parent; + while (p != null) { + if (t == p) return true; + p = p.parent; + } + return false; + } + + static List findAllTokenNodes(ParseTree t, int ttype) { + return findAllNodes(t, ttype, true); + } + + static List findAllRuleNodes(ParseTree t, int ruleIndex) { + return findAllNodes(t, ruleIndex, false); + } + + static List findAllNodes(ParseTree t, int index, bool findTokens) { + final nodes = []; + _findAllNodes(t, index, findTokens, nodes); + return nodes; + } + + static void _findAllNodes( + ParseTree t, int index, bool findTokens, List nodes) { + // check this node (the root) first + if (findTokens && t is TerminalNode) { + final tnode = t; + if (tnode.symbol.type == index) nodes.add(t); + } else if (!findTokens && t is ParserRuleContext) { + final ctx = t; + if (ctx.ruleIndex == index) nodes.add(t); + } + // check children + for (var i = 0; i < t.childCount; i++) { + _findAllNodes(t.getChild(i), index, findTokens, nodes); + } + } + + /// Get all descendents; includes t itself. + /// + /// @since 4.5.1 + static List getDescendants(ParseTree t) { + final nodes = []; + nodes.add(t); + + final n = t.childCount; + for (var i = 0; i < n; i++) { + nodes.addAll(getDescendants(t.getChild(i))); + } + return nodes; + } + + /// @deprecated */ + static List descendants(ParseTree t) { + return getDescendants(t); + } + + /// Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex + /// inclusively using postorder traversal. Recursive depth-first-search. + /// + /// @since 4.5.1 + static ParserRuleContext getRootOfSubtreeEnclosingRegion( + ParseTree t, + int startTokenIndex, // inclusive + int stopTokenIndex) // inclusive + { + final n = t.childCount; + for (var i = 0; i < n; i++) { + final child = t.getChild(i); + final r = getRootOfSubtreeEnclosingRegion( + child, startTokenIndex, stopTokenIndex); + if (r != null) return r; + } + if (t is ParserRuleContext) { + final r = t; + if (startTokenIndex >= + r.start.tokenIndex && // is range fully contained in t? + (r.stop == null || stopTokenIndex <= r.stop.tokenIndex)) { + // note: r.getStop()==null likely implies that we bailed out of parser and there's nothing to the right + return r; + } + } + return null; + } + + /// Replace any subtree siblings of root that are completely to left + /// or right of lookahead range with a CommonToken(Token.INVALID_TYPE,"...") + /// node. The source interval for t is not altered to suit smaller range! + /// + /// WARNING: destructive to t. + /// + /// @since 4.5.1 + static void stripChildrenOutOfRange(ParserRuleContext t, + ParserRuleContext root, int startIndex, int stopIndex) { + if (t == null) return; + for (var i = 0; i < t.childCount; i++) { + final child = t.getChild(i); + final range = child.sourceInterval; + if (child is ParserRuleContext && + (range.b < startIndex || range.a > stopIndex)) { + if (isAncestorOf(child, root)) { + // replace only if subtree doesn't have displayed root + final abbrev = CommonToken(Token.INVALID_TYPE, text: '...'); + t.children[i] = TerminalNodeImpl(abbrev); + } + } + } + } + + /// Return first node satisfying the pred + /// + /// @since 4.5.1 + static Tree findNodeSuchThat(Tree t, Predicate pred) { + if (pred.test(t)) return t; + + if (t == null) return null; + + final n = t.childCount; + for (var i = 0; i < n; i++) { + final u = findNodeSuchThat(t.getChild(i), pred); + if (u != null) return u; + } + return null; + } +} + +abstract class Predicate { + bool test(T t); +} diff --git a/runtime/Dart/lib/src/tree/tree.dart b/runtime/Dart/lib/src/tree/tree.dart new file mode 100644 index 000000000..9010020ea --- /dev/null +++ b/runtime/Dart/lib/src/tree/tree.dart @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +export 'src/pattern/chunk.dart'; +export 'src/pattern/parse_tree_match.dart'; +export 'src/tree.dart'; +export 'src/trees.dart'; diff --git a/runtime/Dart/lib/src/util/bit_set.dart b/runtime/Dart/lib/src/util/bit_set.dart new file mode 100644 index 000000000..087584945 --- /dev/null +++ b/runtime/Dart/lib/src/util/bit_set.dart @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; +import 'dart:typed_data'; + +class BitSet { + static final Uint32List EmptyBits = Uint32List(0); + static const BitsPerElement = 8 * 32; + + Uint32List _data = EmptyBits; + + BitSet([int nbits = 0]) { + if (nbits == 0) { + return; + } + if (nbits < 0) throw RangeError('nbits'); + + if (nbits > 0) { + final length = ((nbits + BitsPerElement - 1) / BitsPerElement).floor(); + _data = Uint32List(length); + } + } + + static int getBitCount(Uint32List value) { + var data = 0; + final size = value.length; + const m1 = 0x5555555555555555; + const m2 = 0x3333333333333333; + const m4 = 0x0F0F0F0F0F0F0F0F; + const m8 = 0x00FF00FF00FF00FF; + const m16 = 0x0000FFFF0000FFFF; + const h01 = 0x0101010101010101; + + var bitCount = 0; + final limit30 = size - size % 30; + + // 64-bit tree merging (merging3) + for (var i = 0; i < limit30; i += 30, data += 30) { + var acc = 0; + for (var j = 0; j < 30; j += 3) { + var count1 = value[data + j]; + var count2 = value[data + j + 1]; + var half1 = value[data + j + 2]; + var half2 = half1; + half1 &= m1; + half2 = (half2 >> 1) & m1; + count1 -= (count1 >> 1) & m1; + count2 -= (count2 >> 1) & m1; + count1 += half1; + count2 += half2; + count1 = (count1 & m2) + ((count1 >> 2) & m2); + count1 += (count2 & m2) + ((count2 >> 2) & m2); + acc += (count1 & m4) + ((count1 >> 4) & m4); + } + + acc = (acc & m8) + ((acc >> 8) & m8); + acc = (acc + (acc >> 16)) & m16; + acc = acc + (acc >> 32); + bitCount += acc; + } + + // count the bits of the remaining bytes (MAX 29*8) using + // "Counting bits set, in parallel" from the "Bit Twiddling Hacks", + // the code uses wikipedia's 64-bit popcount_3() implementation: + // http://en.wikipedia.org/wiki/Hamming_weight#Efficient_implementation + for (var i = 0; i < size - limit30; i++) { + var x = value[data + i]; + x = x - ((x >> 1) & m1); + x = (x & m2) + ((x >> 2) & m2); + x = (x + (x >> 4)) & m4; + bitCount += ((x * h01) >> 56); + } + + return bitCount; + } + + static final List index64 = [ + 0, + 47, + 1, + 56, + 48, + 27, + 2, + 60, + 57, + 49, + 41, + 37, + 28, + 16, + 3, + 61, + 54, + 58, + 35, + 52, + 50, + 42, + 21, + 44, + 38, + 32, + 29, + 23, + 17, + 11, + 4, + 62, + 46, + 55, + 26, + 59, + 40, + 36, + 15, + 53, + 34, + 51, + 20, + 43, + 31, + 22, + 10, + 45, + 25, + 39, + 14, + 33, + 19, + 30, + 9, + 24, + 13, + 18, + 8, + 12, + 7, + 6, + 5, + 63 + ]; + + static int BitScanForward(int value) { + if (value == 0) return -1; + + const debruijn64 = 0x03f79d71b4cb0a89; + return index64[(((value ^ (value - 1)) * debruijn64) >> 58) % 64]; + } + + BitSet clone() { + final result = BitSet(); + result._data = List.from(_data); + return result; + } + + void clear(int index) { + if (index < 0) throw RangeError('index'); + + final element = (index / BitsPerElement).floor(); + if (element >= _data.length) return; + + _data[element] &= ~(1 << (index % BitsPerElement)); + } + + bool operator [](int index) { + return get(index); + } + + bool get(int index) { + if (index < 0) throw RangeError('index'); + + final element = (index / BitsPerElement).floor(); + if (element >= _data.length) return false; + + return (_data[element] & (1 << (index % BitsPerElement))) != 0; + } + + void set(int index) { + if (index < 0) throw RangeError('index'); + + final element = (index / BitsPerElement).floor(); + if (element >= _data.length) { + final newList = Uint32List(max(_data.length * 2, element + 1)) + ..setRange(0, _data.length, _data); + _data = newList; + } + _data[element] |= 1 << (index % BitsPerElement); + } + + bool get isEmpty { + for (var i = 0; i < _data.length; i++) { + if (_data[i] != 0) return false; + } + + return true; + } + + int get cardinality { + return getBitCount(_data); + } + + int nextset(int fromIndex) { + if (fromIndex < 0) throw RangeError('fromIndex'); + + if (isEmpty) return -1; + + var i = (fromIndex / BitsPerElement).floor(); + if (i >= _data.length) return -1; + + var current = _data[i] & ~((1 << (fromIndex % BitsPerElement)) - 1); + + while (true) { + final bit = BitScanForward(current); + if (bit >= 0) return bit + i * BitsPerElement; + + i++; + if (i >= _data.length) break; + + current = _data[i]; + } + + return -1; + } + + void and(BitSet set) { + if (set == null) throw ArgumentError.notNull('set'); + + final length = min(_data.length, set._data.length); + for (var i = 0; i < length; i++) { + _data[i] &= set._data[i]; + } + + for (var i = length; i < _data.length; i++) { + _data[i] = 0; + } + } + + void or(BitSet set) { + if (set == null) throw ArgumentError.notNull('set'); + + if (set._data.length > _data.length) { + final newList = Uint32List(set._data.length) + ..setRange(0, _data.length, _data); + _data = newList; + } + + for (var i = 0; i < set._data.length; i++) { + _data[i] |= set._data[i]; + } + } + + @override + bool operator ==(obj) { + final other = obj as BitSet; + if (other == null) return false; + + if (isEmpty) return other.isEmpty; + + final minlength = min(_data.length, other._data.length); + for (var i = 0; i < minlength; i++) { + if (_data[i] != other._data[i]) return false; + } + + for (var i = minlength; i < _data.length; i++) { + if (_data[i] != 0) return false; + } + + for (var i = minlength; i < other._data.length; i++) { + if (other._data[i] != 0) return false; + } + + return true; + } + + @override + int get hashCode { + var result = 1; + for (var i = 0; i < _data.length; i++) { + if (_data[i] != 0) { + result = result * 31 ^ i; + result = result * 31 ^ _data[i]; + } + } + + return result.hashCode; + } + + @override + String toString() { + final builder = StringBuffer(); + builder.write('{'); + + for (var i = nextset(0); i >= 0; i = nextset(i + 1)) { + if (builder.length > 1) builder.write(', '); + + builder.write(i); + } + + builder.write('}'); + return builder.toString(); + } +} diff --git a/runtime/Dart/lib/src/util/murmur_hash.dart b/runtime/Dart/lib/src/util/murmur_hash.dart new file mode 100644 index 000000000..daffd0cd2 --- /dev/null +++ b/runtime/Dart/lib/src/util/murmur_hash.dart @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +class MurmurHash { + static final int DEFAULT_SEED = 0; + + /// Initialize the hash using the specified [seed]. + /// + /// @param seed the seed + /// @return the intermediate hash value + static int initialize([int seed]) { + return seed ?? DEFAULT_SEED; + } + + /// Update the intermediate hash value for the next input [value]. + /// + /// @param hash the intermediate hash value + /// @param value the value to add to the current hash + /// @return the updated intermediate hash value + static int update(int hash, [value]) { + final c1 = 0xCC9E2D51; + final c2 = 0x1B873593; + final r1 = 15; + final r2 = 13; + final m = 5; + final n = 0xE6546B64; + + var k = value is int ? value : value?.hashCode ?? 0; + + k = k * c1; + k = (k << r1) | (k >> (32 - r1)); + k = k * c2; + + hash = hash ^ k; + hash = (hash << r2) | (hash >> (32 - r2)); + hash = hash * m + n; + + return hash; + } + + /// Apply the final computation steps to the intermediate value [hash] + /// to form the final result of the MurmurHash 3 hash function. + /// + /// @param hash the intermediate hash value + /// @param numberOfWords the number of integer values added to the hash + /// @return the final hash result + static int finish(int hash, int numberOfWords) { + hash = hash ^ (numberOfWords * 4); + hash = hash ^ (hash >> 16); + hash = hash * 0x85EBCA6B; + hash = hash ^ (hash >> 13); + hash = hash * 0xC2B2AE35; + hash = hash ^ (hash >> 16); + return hash; + } + + /// Utility function to compute the hash code of an array using the + /// MurmurHash algorithm. + /// + /// @param the array element type + /// @param data the array data + /// @param seed the seed for the MurmurHash algorithm + /// @return the hash code of the data + static int getHashCode(List data, int seed) { + var hash = initialize(seed); + + for (var value in data) { + hash = update(hash, value); + } + + hash = finish(hash, data.length); + return hash; + } +} diff --git a/runtime/Dart/lib/src/util/utils.dart b/runtime/Dart/lib/src/util/utils.dart new file mode 100644 index 000000000..1b3e7f7e0 --- /dev/null +++ b/runtime/Dart/lib/src/util/utils.dart @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +/// Convert array of strings to string→index map. Useful for +/// converting rulenames to name→ruleindex map. +Map toMap(List keys) { + final m = {}; + for (var i = 0; i < keys.length; i++) { + m[keys[i]] = i; + } + return m; +} + +String arrayToString(a) { + return '[' + a.join(', ') + ']'; +} + +String escapeWhitespace(String s, [bool escapeSpaces = false]) { + if (escapeSpaces) s = s.replaceAll(' ', '\u00B7'); + s = s.replaceAll('\n', r'\n'); + s = s.replaceAll('\r', r'\r'); + s = s.replaceAll('\t', r'\t'); + return s; +} + +bool isLowerCase(String s) => s.toLowerCase() == s; + +bool isUpperCase(String s) => s.toUpperCase() == s; diff --git a/runtime/Dart/lib/src/vocabulary.dart b/runtime/Dart/lib/src/vocabulary.dart new file mode 100644 index 000000000..a0f170045 --- /dev/null +++ b/runtime/Dart/lib/src/vocabulary.dart @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import 'dart:math'; + +import 'token.dart'; + +/// This interface provides information about the vocabulary used by a +/// recognizer. +/// +/// @see Recognizer#getVocabulary() +abstract class Vocabulary { + /// Returns the highest token type value. It can be used to iterate from + /// zero to that number, inclusively, thus querying all stored entries. + /// @return the highest token type value + int get maxTokenType; + + /// Gets the string literal associated with a token type. The string returned + /// by this method, when not null, can be used unaltered in a parser + /// grammar to represent this token type. + /// + ///

      The following table shows examples of lexer rules and the literal + /// names assigned to the corresponding token types.

      + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + ///
      RuleLiteral NameJava String Literal
      {@code THIS : 'this';}{@code 'this'}{@code "'this'"}
      {@code SQUOTE : '\'';}{@code '\''}{@code "'\\''"}
      {@code ID : [A-Z]+;}n/anull
      + /// + /// @param tokenType The token type. + /// + /// @return The string literal associated with the specified token type, or + /// null if no string literal is associated with the type. + String getLiteralName(int tokenType); + + /// Gets the symbolic name associated with a token type. The string returned + /// by this method, when not null, can be used unaltered in a parser + /// grammar to represent this token type. + /// + ///

      This method supports token types defined by any of the following + /// methods:

      + /// + ///
        + ///
      • Tokens created by lexer rules.
      • + ///
      • Tokens defined in a tokens{} block in a lexer or parser + /// grammar.
      • + ///
      • The implicitly defined [EOF] token, which has the token type + /// {@link Token#EOF}.
      • + ///
      + /// + ///

      The following table shows examples of lexer rules and the literal + /// names assigned to the corresponding token types.

      + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + ///
      RuleSymbolic Name
      {@code THIS : 'this';}[THIS]
      {@code SQUOTE : '\'';}[SQUOTE]
      {@code ID : [A-Z]+;}[ID]
      + /// + /// @param tokenType The token type. + /// + /// @return The symbolic name associated with the specified token type, or + /// null if no symbolic name is associated with the type. + String getSymbolicName(int tokenType); + + /// Gets the display name of a token type. + /// + ///

      ANTLR provides a default implementation of this method, but + /// applications are free to override the behavior in any manner which makes + /// sense for the application. The default implementation returns the first + /// result from the following list which produces a non-null + /// result.

      + /// + ///
        + ///
      1. The result of {@link #getLiteralName}
      2. + ///
      3. The result of {@link #getSymbolicName}
      4. + ///
      5. The result of {@link Integer#toString}
      6. + ///
      + /// + /// @param tokenType The token type. + /// + /// @return The display name of the token type, for use in error reporting or + /// other user-visible messages which reference specific token types. + String getDisplayName(int tokenType); +} + +/// This class provides a default implementation of the [Vocabulary] +/// interface. +class VocabularyImpl implements Vocabulary { + static const List EMPTY_NAMES = []; + + /// Gets an empty [Vocabulary] instance. + /// + ///

      + /// No literal or symbol names are assigned to token types, so + /// {@link #getDisplayName(int)} returns the numeric value for all tokens + /// except {@link Token#EOF}.

      + static final VocabularyImpl EMPTY_VOCABULARY = + VocabularyImpl(EMPTY_NAMES, EMPTY_NAMES, EMPTY_NAMES); + + final List literalNames; + + final List symbolicNames; + + final List displayNames; + + @override + int maxTokenType; + + /// Constructs a new instance of [VocabularyImpl] from the specified + /// literal, symbolic, and display token names. + /// + /// @param literalNames The literal names assigned to tokens, or null + /// if no literal names are assigned. + /// @param symbolicNames The symbolic names assigned to tokens, or + /// null if no symbolic names are assigned. + /// @param displayNames The display names assigned to tokens, or null + /// to use the values in [literalNames] and [symbolicNames] as + /// the source of display names, as described in + /// {@link #getDisplayName(int)}. + /// + /// @see #getLiteralName(int) + /// @see #getSymbolicName(int) + /// @see #getDisplayName(int) + VocabularyImpl(this.literalNames, this.symbolicNames, + [this.displayNames = EMPTY_NAMES]) { + // See note here on -1 part: https://github.com/antlr/antlr4/pull/1146 + maxTokenType = max(displayNames.length, + max(literalNames.length, symbolicNames.length)) - + 1; + } + + /// Returns a [VocabularyImpl] instance from the specified set of token + /// names. This method acts as a compatibility layer for the single + /// [tokenNames] array generated by previous releases of ANTLR. + /// + ///

      The resulting vocabulary instance returns null for + /// {@link #getLiteralName(int)} and {@link #getSymbolicName(int)}, and the + /// value from [tokenNames] for the display names.

      + /// + /// @param tokenNames The token names, or null if no token names are + /// available. + /// @return A [Vocabulary] instance which uses [tokenNames] for + /// the display names of tokens. + static Vocabulary fromTokenNames(List tokenNames) { + if (tokenNames == null || tokenNames.isEmpty) { + return EMPTY_VOCABULARY; + } + + final literalNames = List.from(tokenNames); + final symbolicNames = List.from(tokenNames); + for (var i = 0; i < tokenNames.length; i++) { + final tokenName = tokenNames[i]; + if (tokenName == null) { + continue; + } + + if (tokenName.isNotEmpty) { + final firstChar = tokenName[0]; + if (firstChar == '\'') { + symbolicNames[i] = null; + continue; + } else if (firstChar.toUpperCase() == firstChar) { + literalNames[i] = null; + continue; + } + } + + // wasn't a literal or symbolic name + literalNames[i] = null; + symbolicNames[i] = null; + } + + return VocabularyImpl(literalNames, symbolicNames, tokenNames); + } + + @override + String getLiteralName(int tokenType) { + if (tokenType >= 0 && tokenType < literalNames.length) { + return literalNames[tokenType]; + } + + return null; + } + + @override + String getSymbolicName(int tokenType) { + if (tokenType >= 0 && tokenType < symbolicNames.length) { + return symbolicNames[tokenType]; + } + + if (tokenType == Token.EOF) { + return 'EOF'; + } + + return null; + } + + @override + String getDisplayName(int tokenType) { + if (tokenType >= 0 && tokenType < displayNames.length) { + final displayName = displayNames[tokenType]; + if (displayName != null) { + return displayName; + } + } + + final literalName = getLiteralName(tokenType); + if (literalName != null) { + return literalName; + } + + final symbolicName = getSymbolicName(tokenType); + if (symbolicName != null) { + return symbolicName; + } + + return tokenType.toString(); + } +} diff --git a/runtime/Dart/pubspec.yaml b/runtime/Dart/pubspec.yaml new file mode 100644 index 000000000..3a5c79645 --- /dev/null +++ b/runtime/Dart/pubspec.yaml @@ -0,0 +1,13 @@ +name: "antlr4" +version: "4.8.0-dev.2" +description: "New Dart runtime for ANTLR4." +homepage: "https://github.com/antlr/antlr4" +license: "BSD-3-Clause" +dependencies: + logging: ^0.11.4 + collection: ^1.14.12 +dev_dependencies: + pedantic: ^1.0.0 + +environment: + sdk: ">=2.7.0 <3.0.0" diff --git a/runtime/Go/antlr/lexer.go b/runtime/Go/antlr/lexer.go index 02deaf99c..b04f04572 100644 --- a/runtime/Go/antlr/lexer.go +++ b/runtime/Go/antlr/lexer.go @@ -278,7 +278,8 @@ func (b *BaseLexer) inputStream() CharStream { return b.input } -func (b *BaseLexer) setInputStream(input CharStream) { +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { b.input = nil b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} b.reset() diff --git a/runtime/Go/antlr/recognizer.go b/runtime/Go/antlr/recognizer.go index 9ea9f6f59..d114800f4 100644 --- a/runtime/Go/antlr/recognizer.go +++ b/runtime/Go/antlr/recognizer.go @@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.7.2" + runtimeVersion := "4.8" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } diff --git a/runtime/Go/antlr/tree.go b/runtime/Go/antlr/tree.go index ad0eabf00..bdeb6d788 100644 --- a/runtime/Go/antlr/tree.go +++ b/runtime/Go/antlr/tree.go @@ -214,6 +214,10 @@ func NewParseTreeWalker() *ParseTreeWalker { return new(ParseTreeWalker) } +// Performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, EnterRule is called before +// recursively walking down into child nodes, then +// ExitRule is called after the recursive call to wind up. func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { switch tt := t.(type) { case ErrorNode: @@ -231,10 +235,8 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { } // -// The discovery of a rule node, involves sending two events: the generic -// {@link ParseTreeListener//EnterEveryRule} and a -// {@link RuleContext}-specific event. First we trigger the generic and then -// the rule specific. We to them in reverse order upon finishing the node. +// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// then by triggering the event specific to the given parse tree node // func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) @@ -242,6 +244,9 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx.EnterRule(listener) } +// Exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +// func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) ctx.ExitRule(listener) diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml index 92605583d..ce1e2063a 100644 --- a/runtime/Java/pom.xml +++ b/runtime/Java/pom.xml @@ -9,7 +9,7 @@ org.antlr antlr4-master - 4.7.3-SNAPSHOT + 4.8-2-SNAPSHOT ../../pom.xml antlr4-runtime diff --git a/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java b/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java index 05e81441b..4b38bded8 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java +++ b/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java @@ -67,7 +67,7 @@ public class RuntimeMetaData { * omitted. *
    */ - public static final String VERSION = "4.7.2"; + public static final String VERSION = "4.8"; /** * Gets the currently executing version of the ANTLR 4 runtime library. diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/ParseTreeWalker.java b/runtime/Java/src/org/antlr/v4/runtime/tree/ParseTreeWalker.java index ede5febe8..e36befd34 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/ParseTreeWalker.java +++ b/runtime/Java/src/org/antlr/v4/runtime/tree/ParseTreeWalker.java @@ -7,12 +7,20 @@ package org.antlr.v4.runtime.tree; import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RuleContext; public class ParseTreeWalker { public static final ParseTreeWalker DEFAULT = new ParseTreeWalker(); - public void walk(ParseTreeListener listener, ParseTree t) { + + /** + * Performs a walk on the given parse tree starting at the root and going down recursively + * with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before + * recursively walking down into child nodes, then + * {@link ParseTreeWalker#exitRule} is called after the recursive call to wind up. + * @param listener The listener used by the walker to process grammar rules + * @param t The parse tree to be walked on + */ + public void walk(ParseTreeListener listener, ParseTree t) { if ( t instanceof ErrorNode) { listener.visitErrorNode((ErrorNode)t); return; @@ -31,10 +39,10 @@ public class ParseTreeWalker { } /** - * The discovery of a rule node, involves sending two events: the generic - * {@link ParseTreeListener#enterEveryRule} and a - * {@link RuleContext}-specific event. First we trigger the generic and then - * the rule specific. We to them in reverse order upon finishing the node. + * Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule} + * then by triggering the event specific to the given parse tree node + * @param listener The listener responding to the trigger events + * @param r The grammar rule containing the rule context */ protected void enterRule(ParseTreeListener listener, RuleNode r) { ParserRuleContext ctx = (ParserRuleContext)r.getRuleContext(); @@ -42,7 +50,14 @@ public class ParseTreeWalker { ctx.enterRule(listener); } - protected void exitRule(ParseTreeListener listener, RuleNode r) { + + /** + * Exits a grammar rule by first triggering the event specific to the given parse tree node + * then by triggering the generic event {@link ParseTreeListener#exitEveryRule} + * @param listener The listener responding to the trigger events + * @param r The grammar rule containing the rule context + */ + protected void exitRule(ParseTreeListener listener, RuleNode r) { ParserRuleContext ctx = (ParserRuleContext)r.getRuleContext(); ctx.exitRule(listener); listener.exitEveryRule(ctx); diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/Tree.java b/runtime/Java/src/org/antlr/v4/runtime/tree/Tree.java index 0a1bed64f..48b726b0a 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/Tree.java +++ b/runtime/Java/src/org/antlr/v4/runtime/tree/Tree.java @@ -19,7 +19,7 @@ public interface Tree { Tree getParent(); /** - * This method returns whatever object represents the data at this note. For + * This method returns whatever object represents the data at this node. For * example, for parse trees, the payload can be a {@link Token} representing * a leaf node or a {@link RuleContext} object representing a rule * invocation. For abstract syntax trees (ASTs), this is a {@link Token} diff --git a/runtime/JavaScript/.babelrc b/runtime/JavaScript/.babelrc new file mode 100644 index 000000000..1320b9a32 --- /dev/null +++ b/runtime/JavaScript/.babelrc @@ -0,0 +1,3 @@ +{ + "presets": ["@babel/preset-env"] +} diff --git a/runtime/JavaScript/.gitignore b/runtime/JavaScript/.gitignore new file mode 100644 index 000000000..27e212f08 --- /dev/null +++ b/runtime/JavaScript/.gitignore @@ -0,0 +1,2 @@ +/dist/ +/node_modules diff --git a/runtime/JavaScript/README.md b/runtime/JavaScript/README.md index 5205fd423..0cce215fd 100644 --- a/runtime/JavaScript/README.md +++ b/runtime/JavaScript/README.md @@ -8,6 +8,28 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE. See www.antlr.org for more information on ANTLR -See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript +See [Javascript Target](https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md) +for more information on using ANTLR in JavaScript + + +## publishing + +The JavaScript itself is tested using npm, so assumption is npm is already installed. +The current npm version used is 3.10.9. + +### to npm + +The publishing itself relies on the information in package.json. +To publish run `npm login` from Terminal, then `npm publish antlr4` + +That's it! + +### to browser + +To publish antlr4 for browser usage you need to bundle it into a single +file with `npm run build`. This will create `dist/antlr4.js` file. Upload it +to your favourite server. + +That's it! diff --git a/runtime/JavaScript/package-lock.json b/runtime/JavaScript/package-lock.json new file mode 100644 index 000000000..bbedfca60 --- /dev/null +++ b/runtime/JavaScript/package-lock.json @@ -0,0 +1,5471 @@ +{ + "name": "antlr4", + "version": "4.8.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@babel/code-frame": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", + "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", + "dev": true, + "requires": { + "@babel/highlight": "^7.8.3" + } + }, + "@babel/compat-data": { + "version": "7.8.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.8.5.tgz", + "integrity": "sha512-jWYUqQX/ObOhG1UiEkbH5SANsE/8oKXiQWjj7p7xgj9Zmnt//aUvyz4dBkK0HNsS8/cbyC5NmmH87VekW+mXFg==", + "dev": true, + "requires": { + "browserslist": "^4.8.5", + "invariant": "^2.2.4", + "semver": "^5.5.0" + } + }, + "@babel/core": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.8.4.tgz", + "integrity": "sha512-0LiLrB2PwrVI+a2/IEskBopDYSd8BCb3rOvH7D5tzoWd696TBEduBvuLVm4Nx6rltrLZqvI3MCalB2K2aVzQjA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.8.3", + "@babel/generator": "^7.8.4", + "@babel/helpers": "^7.8.4", + "@babel/parser": "^7.8.4", + "@babel/template": "^7.8.3", + "@babel/traverse": "^7.8.4", + "@babel/types": "^7.8.3", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.0", + "lodash": "^4.17.13", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "dependencies": { + "json5": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.1.tgz", + "integrity": "sha512-l+3HXD0GEI3huGq1njuqtzYK8OYJyXMkOLtQ53pjWh89tvWS2h6l+1zMkYWqlb57+SiQodKZyvMEFb2X+KrFhQ==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + } + } + }, + "@babel/generator": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.4.tgz", + "integrity": "sha512-PwhclGdRpNAf3IxZb0YVuITPZmmrXz9zf6fH8lT4XbrmfQKr6ryBzhv593P5C6poJRciFCL/eHGW2NuGrgEyxA==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3", + "jsesc": "^2.5.1", + "lodash": "^4.17.13", + "source-map": "^0.5.0" + } + }, + "@babel/helper-annotate-as-pure": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.8.3.tgz", + "integrity": "sha512-6o+mJrZBxOoEX77Ezv9zwW7WV8DdluouRKNY/IR5u/YTMuKHgugHOzYWlYvYLpLA9nPsQCAAASpCIbjI9Mv+Uw==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.8.3.tgz", + "integrity": "sha512-5eFOm2SyFPK4Rh3XMMRDjN7lBH0orh3ss0g3rTYZnBQ+r6YPj7lgDyCvPphynHvUrobJmeMignBr6Acw9mAPlw==", + "dev": true, + "requires": { + "@babel/helper-explode-assignable-expression": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-call-delegate": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-call-delegate/-/helper-call-delegate-7.8.3.tgz", + "integrity": "sha512-6Q05px0Eb+N4/GTyKPPvnkig7Lylw+QzihMpws9iiZQv7ZImf84ZsZpQH7QoWN4n4tm81SnSzPgHw2qtO0Zf3A==", + "dev": true, + "requires": { + "@babel/helper-hoist-variables": "^7.8.3", + "@babel/traverse": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.8.4.tgz", + "integrity": "sha512-3k3BsKMvPp5bjxgMdrFyq0UaEO48HciVrOVF0+lon8pp95cyJ2ujAh0TrBHNMnJGT2rr0iKOJPFFbSqjDyf/Pg==", + "dev": true, + "requires": { + "@babel/compat-data": "^7.8.4", + "browserslist": "^4.8.5", + "invariant": "^2.2.4", + "levenary": "^1.1.1", + "semver": "^5.5.0" + } + }, + "@babel/helper-create-regexp-features-plugin": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.3.tgz", + "integrity": "sha512-Gcsm1OHCUr9o9TcJln57xhWHtdXbA2pgQ58S0Lxlks0WMGNXuki4+GLfX0p+L2ZkINUGZvfkz8rzoqJQSthI+Q==", + "dev": true, + "requires": { + "@babel/helper-regex": "^7.8.3", + "regexpu-core": "^4.6.0" + } + }, + "@babel/helper-define-map": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-define-map/-/helper-define-map-7.8.3.tgz", + "integrity": "sha512-PoeBYtxoZGtct3md6xZOCWPcKuMuk3IHhgxsRRNtnNShebf4C8YonTSblsK4tvDbm+eJAw2HAPOfCr+Q/YRG/g==", + "dev": true, + "requires": { + "@babel/helper-function-name": "^7.8.3", + "@babel/types": "^7.8.3", + "lodash": "^4.17.13" + } + }, + "@babel/helper-explode-assignable-expression": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.8.3.tgz", + "integrity": "sha512-N+8eW86/Kj147bO9G2uclsg5pwfs/fqqY5rwgIL7eTBklgXjcOJ3btzS5iM6AitJcftnY7pm2lGsrJVYLGjzIw==", + "dev": true, + "requires": { + "@babel/traverse": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-function-name": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.8.3.tgz", + "integrity": "sha512-BCxgX1BC2hD/oBlIFUgOCQDOPV8nSINxCwM3o93xP4P9Fq6aV5sgv2cOOITDMtCfQ+3PvHp3l689XZvAM9QyOA==", + "dev": true, + "requires": { + "@babel/helper-get-function-arity": "^7.8.3", + "@babel/template": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-get-function-arity": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz", + "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-hoist-variables": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.8.3.tgz", + "integrity": "sha512-ky1JLOjcDUtSc+xkt0xhYff7Z6ILTAHKmZLHPxAhOP0Nd77O+3nCsd6uSVYur6nJnCI029CrNbYlc0LoPfAPQg==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-member-expression-to-functions": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.8.3.tgz", + "integrity": "sha512-fO4Egq88utkQFjbPrSHGmGLFqmrshs11d46WI+WZDESt7Wu7wN2G2Iu+NMMZJFDOVRHAMIkB5SNh30NtwCA7RA==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-module-imports": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.8.3.tgz", + "integrity": "sha512-R0Bx3jippsbAEtzkpZ/6FIiuzOURPcMjHp+Z6xPe6DtApDJx+w7UYyOLanZqO8+wKR9G10s/FmHXvxaMd9s6Kg==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-module-transforms": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.8.3.tgz", + "integrity": "sha512-C7NG6B7vfBa/pwCOshpMbOYUmrYQDfCpVL/JCRu0ek8B5p8kue1+BCXpg2vOYs7w5ACB9GTOBYQ5U6NwrMg+3Q==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-simple-access": "^7.8.3", + "@babel/helper-split-export-declaration": "^7.8.3", + "@babel/template": "^7.8.3", + "@babel/types": "^7.8.3", + "lodash": "^4.17.13" + } + }, + "@babel/helper-optimise-call-expression": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.8.3.tgz", + "integrity": "sha512-Kag20n86cbO2AvHca6EJsvqAd82gc6VMGule4HwebwMlwkpXuVqrNRj6CkCV2sKxgi9MyAUnZVnZ6lJ1/vKhHQ==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-plugin-utils": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.8.3.tgz", + "integrity": "sha512-j+fq49Xds2smCUNYmEHF9kGNkhbet6yVIBp4e6oeQpH1RUs/Ir06xUKzDjDkGcaaokPiTNs2JBWHjaE4csUkZQ==", + "dev": true + }, + "@babel/helper-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-regex/-/helper-regex-7.8.3.tgz", + "integrity": "sha512-BWt0QtYv/cg/NecOAZMdcn/waj/5P26DR4mVLXfFtDokSR6fyuG0Pj+e2FqtSME+MqED1khnSMulkmGl8qWiUQ==", + "dev": true, + "requires": { + "lodash": "^4.17.13" + } + }, + "@babel/helper-remap-async-to-generator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.8.3.tgz", + "integrity": "sha512-kgwDmw4fCg7AVgS4DukQR/roGp+jP+XluJE5hsRZwxCYGg+Rv9wSGErDWhlI90FODdYfd4xG4AQRiMDjjN0GzA==", + "dev": true, + "requires": { + "@babel/helper-annotate-as-pure": "^7.8.3", + "@babel/helper-wrap-function": "^7.8.3", + "@babel/template": "^7.8.3", + "@babel/traverse": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-replace-supers": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.3.tgz", + "integrity": "sha512-xOUssL6ho41U81etpLoT2RTdvdus4VfHamCuAm4AHxGr+0it5fnwoVdwUJ7GFEqCsQYzJUhcbsN9wB9apcYKFA==", + "dev": true, + "requires": { + "@babel/helper-member-expression-to-functions": "^7.8.3", + "@babel/helper-optimise-call-expression": "^7.8.3", + "@babel/traverse": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-simple-access": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.8.3.tgz", + "integrity": "sha512-VNGUDjx5cCWg4vvCTR8qQ7YJYZ+HBjxOgXEl7ounz+4Sn7+LMD3CFrCTEU6/qXKbA2nKg21CwhhBzO0RpRbdCw==", + "dev": true, + "requires": { + "@babel/template": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz", + "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==", + "dev": true, + "requires": { + "@babel/types": "^7.8.3" + } + }, + "@babel/helper-wrap-function": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.8.3.tgz", + "integrity": "sha512-LACJrbUET9cQDzb6kG7EeD7+7doC3JNvUgTEQOx2qaO1fKlzE/Bf05qs9w1oXQMmXlPO65lC3Tq9S6gZpTErEQ==", + "dev": true, + "requires": { + "@babel/helper-function-name": "^7.8.3", + "@babel/template": "^7.8.3", + "@babel/traverse": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/helpers": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.8.4.tgz", + "integrity": "sha512-VPbe7wcQ4chu4TDQjimHv/5tj73qz88o12EPkO2ValS2QiQS/1F2SsjyIGNnAD0vF/nZS6Cf9i+vW6HIlnaR8w==", + "dev": true, + "requires": { + "@babel/template": "^7.8.3", + "@babel/traverse": "^7.8.4", + "@babel/types": "^7.8.3" + } + }, + "@babel/highlight": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", + "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", + "dev": true, + "requires": { + "chalk": "^2.0.0", + "esutils": "^2.0.2", + "js-tokens": "^4.0.0" + } + }, + "@babel/parser": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.4.tgz", + "integrity": "sha512-0fKu/QqildpXmPVaRBoXOlyBb3MC+J0A66x97qEfLOMkn3u6nfY5esWogQwi/K0BjASYy4DbnsEWnpNL6qT5Mw==", + "dev": true + }, + "@babel/plugin-proposal-async-generator-functions": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.8.3.tgz", + "integrity": "sha512-NZ9zLv848JsV3hs8ryEh7Uaz/0KsmPLqv0+PdkDJL1cJy0K4kOCFa8zc1E3mp+RHPQcpdfb/6GovEsW4VDrOMw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-remap-async-to-generator": "^7.8.3", + "@babel/plugin-syntax-async-generators": "^7.8.0" + } + }, + "@babel/plugin-proposal-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz", + "integrity": "sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.0" + } + }, + "@babel/plugin-proposal-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz", + "integrity": "sha512-KGhQNZ3TVCQG/MjRbAUwuH+14y9q0tpxs1nWWs3pbSleRdDro9SAMMDyye8HhY1gqZ7/NqIc8SKhya0wRDgP1Q==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.0" + } + }, + "@babel/plugin-proposal-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0" + } + }, + "@babel/plugin-proposal-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-8qvuPwU/xxUCt78HocNlv0mXXo0wdh9VT1R04WU8HGOfaOob26pF+9P5/lYjN/q7DHOX1bvX60hnhOvuQUJdbA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0" + } + }, + "@babel/plugin-proposal-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-0gkX7J7E+AtAw9fcwlVQj8peP61qhdg/89D5swOkjYbkboA2CVckn3kiyum1DE0wskGb7KJJxBdyEBApDLLVdw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.0" + } + }, + "@babel/plugin-proposal-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.8.3.tgz", + "integrity": "sha512-QIoIR9abkVn+seDE3OjA08jWcs3eZ9+wJCKSRgo3WdEU2csFYgdScb+8qHB3+WXsGJD55u+5hWCISI7ejXS+kg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.0" + } + }, + "@babel/plugin-proposal-unicode-property-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.3.tgz", + "integrity": "sha512-1/1/rEZv2XGweRwwSkLpY+s60za9OZ1hJs4YDqFHCw0kYWYwL5IFljVY1MYBL+weT1l9pokDO2uhSTLVxzoHkQ==", + "dev": true, + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-top-level-await": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz", + "integrity": "sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-arrow-functions": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz", + "integrity": "sha512-0MRF+KC8EqH4dbuITCWwPSzsyO3HIWWlm30v8BbbpOrS1B++isGxPnnuq/IZvOX5J2D/p7DQalQm+/2PnlKGxg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-async-to-generator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.8.3.tgz", + "integrity": "sha512-imt9tFLD9ogt56Dd5CI/6XgpukMwd/fLGSrix2httihVe7LOGVPhyhMh1BU5kDM7iHD08i8uUtmV2sWaBFlHVQ==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-remap-async-to-generator": "^7.8.3" + } + }, + "@babel/plugin-transform-block-scoped-functions": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.8.3.tgz", + "integrity": "sha512-vo4F2OewqjbB1+yaJ7k2EJFHlTP3jR634Z9Cj9itpqNjuLXvhlVxgnjsHsdRgASR8xYDrx6onw4vW5H6We0Jmg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-block-scoping": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.8.3.tgz", + "integrity": "sha512-pGnYfm7RNRgYRi7bids5bHluENHqJhrV4bCZRwc5GamaWIIs07N4rZECcmJL6ZClwjDz1GbdMZFtPs27hTB06w==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "lodash": "^4.17.13" + } + }, + "@babel/plugin-transform-classes": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.8.3.tgz", + "integrity": "sha512-SjT0cwFJ+7Rbr1vQsvphAHwUHvSUPmMjMU/0P59G8U2HLFqSa082JO7zkbDNWs9kH/IUqpHI6xWNesGf8haF1w==", + "dev": true, + "requires": { + "@babel/helper-annotate-as-pure": "^7.8.3", + "@babel/helper-define-map": "^7.8.3", + "@babel/helper-function-name": "^7.8.3", + "@babel/helper-optimise-call-expression": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.3", + "@babel/helper-split-export-declaration": "^7.8.3", + "globals": "^11.1.0" + } + }, + "@babel/plugin-transform-computed-properties": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.8.3.tgz", + "integrity": "sha512-O5hiIpSyOGdrQZRQ2ccwtTVkgUDBBiCuK//4RJ6UfePllUTCENOzKxfh6ulckXKc0DixTFLCfb2HVkNA7aDpzA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-destructuring": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.8.3.tgz", + "integrity": "sha512-H4X646nCkiEcHZUZaRkhE2XVsoz0J/1x3VVujnn96pSoGCtKPA99ZZA+va+gK+92Zycd6OBKCD8tDb/731bhgQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-dotall-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.8.3.tgz", + "integrity": "sha512-kLs1j9Nn4MQoBYdRXH6AeaXMbEJFaFu/v1nQkvib6QzTj8MZI5OQzqmD83/2jEM1z0DLilra5aWO5YpyC0ALIw==", + "dev": true, + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-duplicate-keys": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.8.3.tgz", + "integrity": "sha512-s8dHiBUbcbSgipS4SMFuWGqCvyge5V2ZeAWzR6INTVC3Ltjig/Vw1G2Gztv0vU/hRG9X8IvKvYdoksnUfgXOEQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-exponentiation-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.8.3.tgz", + "integrity": "sha512-zwIpuIymb3ACcInbksHaNcR12S++0MDLKkiqXHl3AzpgdKlFNhog+z/K0+TGW+b0w5pgTq4H6IwV/WhxbGYSjQ==", + "dev": true, + "requires": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-for-of": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.8.4.tgz", + "integrity": "sha512-iAXNlOWvcYUYoV8YIxwS7TxGRJcxyl8eQCfT+A5j8sKUzRFvJdcyjp97jL2IghWSRDaL2PU2O2tX8Cu9dTBq5A==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-function-name": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.8.3.tgz", + "integrity": "sha512-rO/OnDS78Eifbjn5Py9v8y0aR+aSYhDhqAwVfsTl0ERuMZyr05L1aFSCJnbv2mmsLkit/4ReeQ9N2BgLnOcPCQ==", + "dev": true, + "requires": { + "@babel/helper-function-name": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.8.3.tgz", + "integrity": "sha512-3Tqf8JJ/qB7TeldGl+TT55+uQei9JfYaregDcEAyBZ7akutriFrt6C/wLYIer6OYhleVQvH/ntEhjE/xMmy10A==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-member-expression-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz", + "integrity": "sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-modules-amd": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.8.3.tgz", + "integrity": "sha512-MadJiU3rLKclzT5kBH4yxdry96odTUwuqrZM+GllFI/VhxfPz+k9MshJM+MwhfkCdxxclSbSBbUGciBngR+kEQ==", + "dev": true, + "requires": { + "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "babel-plugin-dynamic-import-node": "^2.3.0" + } + }, + "@babel/plugin-transform-modules-commonjs": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.8.3.tgz", + "integrity": "sha512-JpdMEfA15HZ/1gNuB9XEDlZM1h/gF/YOH7zaZzQu2xCFRfwc01NXBMHHSTT6hRjlXJJs5x/bfODM3LiCk94Sxg==", + "dev": true, + "requires": { + "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-simple-access": "^7.8.3", + "babel-plugin-dynamic-import-node": "^2.3.0" + } + }, + "@babel/plugin-transform-modules-systemjs": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.8.3.tgz", + "integrity": "sha512-8cESMCJjmArMYqa9AO5YuMEkE4ds28tMpZcGZB/jl3n0ZzlsxOAi3mC+SKypTfT8gjMupCnd3YiXCkMjj2jfOg==", + "dev": true, + "requires": { + "@babel/helper-hoist-variables": "^7.8.3", + "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "babel-plugin-dynamic-import-node": "^2.3.0" + } + }, + "@babel/plugin-transform-modules-umd": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.8.3.tgz", + "integrity": "sha512-evhTyWhbwbI3/U6dZAnx/ePoV7H6OUG+OjiJFHmhr9FPn0VShjwC2kdxqIuQ/+1P50TMrneGzMeyMTFOjKSnAw==", + "dev": true, + "requires": { + "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.8.3.tgz", + "integrity": "sha512-f+tF/8UVPU86TrCb06JoPWIdDpTNSGGcAtaD9mLP0aYGA0OS0j7j7DHJR0GTFrUZPUU6loZhbsVZgTh0N+Qdnw==", + "dev": true, + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.8.3" + } + }, + "@babel/plugin-transform-new-target": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.8.3.tgz", + "integrity": "sha512-QuSGysibQpyxexRyui2vca+Cmbljo8bcRckgzYV4kRIsHpVeyeC3JDO63pY+xFZ6bWOBn7pfKZTqV4o/ix9sFw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-object-super": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.8.3.tgz", + "integrity": "sha512-57FXk+gItG/GejofIyLIgBKTas4+pEU47IXKDBWFTxdPd7F80H8zybyAY7UoblVfBhBGs2EKM+bJUu2+iUYPDQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.3" + } + }, + "@babel/plugin-transform-parameters": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.8.4.tgz", + "integrity": "sha512-IsS3oTxeTsZlE5KqzTbcC2sV0P9pXdec53SU+Yxv7o/6dvGM5AkTotQKhoSffhNgZ/dftsSiOoxy7evCYJXzVA==", + "dev": true, + "requires": { + "@babel/helper-call-delegate": "^7.8.3", + "@babel/helper-get-function-arity": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-property-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz", + "integrity": "sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-regenerator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.3.tgz", + "integrity": "sha512-qt/kcur/FxrQrzFR432FGZznkVAjiyFtCOANjkAKwCbt465L6ZCiUQh2oMYGU3Wo8LRFJxNDFwWn106S5wVUNA==", + "dev": true, + "requires": { + "regenerator-transform": "^0.14.0" + } + }, + "@babel/plugin-transform-reserved-words": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz", + "integrity": "sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-shorthand-properties": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.8.3.tgz", + "integrity": "sha512-I9DI6Odg0JJwxCHzbzW08ggMdCezoWcuQRz3ptdudgwaHxTjxw5HgdFJmZIkIMlRymL6YiZcped4TTCB0JcC8w==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.8.3.tgz", + "integrity": "sha512-CkuTU9mbmAoFOI1tklFWYYbzX5qCIZVXPVy0jpXgGwkplCndQAa58s2jr66fTeQnA64bDox0HL4U56CFYoyC7g==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-sticky-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.8.3.tgz", + "integrity": "sha512-9Spq0vGCD5Bb4Z/ZXXSK5wbbLFMG085qd2vhL1JYu1WcQ5bXqZBAYRzU1d+p79GcHs2szYv5pVQCX13QgldaWw==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-regex": "^7.8.3" + } + }, + "@babel/plugin-transform-template-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.8.3.tgz", + "integrity": "sha512-820QBtykIQOLFT8NZOcTRJ1UNuztIELe4p9DCgvj4NK+PwluSJ49we7s9FB1HIGNIYT7wFUJ0ar2QpCDj0escQ==", + "dev": true, + "requires": { + "@babel/helper-annotate-as-pure": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-typeof-symbol": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.8.4.tgz", + "integrity": "sha512-2QKyfjGdvuNfHsb7qnBBlKclbD4CfshH2KvDabiijLMGXPHJXGxtDzwIF7bQP+T0ysw8fYTtxPafgfs/c1Lrqg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-unicode-regex": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.8.3.tgz", + "integrity": "sha512-+ufgJjYdmWfSQ+6NS9VGUR2ns8cjJjYbrbi11mZBTaWm+Fui/ncTLFF28Ei1okavY+xkojGr1eJxNsWYeA5aZw==", + "dev": true, + "requires": { + "@babel/helper-create-regexp-features-plugin": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/preset-env": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.8.4.tgz", + "integrity": "sha512-HihCgpr45AnSOHRbS5cWNTINs0TwaR8BS8xIIH+QwiW8cKL0llV91njQMpeMReEPVs+1Ao0x3RLEBLtt1hOq4w==", + "dev": true, + "requires": { + "@babel/compat-data": "^7.8.4", + "@babel/helper-compilation-targets": "^7.8.4", + "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-proposal-async-generator-functions": "^7.8.3", + "@babel/plugin-proposal-dynamic-import": "^7.8.3", + "@babel/plugin-proposal-json-strings": "^7.8.3", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-proposal-object-rest-spread": "^7.8.3", + "@babel/plugin-proposal-optional-catch-binding": "^7.8.3", + "@babel/plugin-proposal-optional-chaining": "^7.8.3", + "@babel/plugin-proposal-unicode-property-regex": "^7.8.3", + "@babel/plugin-syntax-async-generators": "^7.8.0", + "@babel/plugin-syntax-dynamic-import": "^7.8.0", + "@babel/plugin-syntax-json-strings": "^7.8.0", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.0", + "@babel/plugin-syntax-optional-chaining": "^7.8.0", + "@babel/plugin-syntax-top-level-await": "^7.8.3", + "@babel/plugin-transform-arrow-functions": "^7.8.3", + "@babel/plugin-transform-async-to-generator": "^7.8.3", + "@babel/plugin-transform-block-scoped-functions": "^7.8.3", + "@babel/plugin-transform-block-scoping": "^7.8.3", + "@babel/plugin-transform-classes": "^7.8.3", + "@babel/plugin-transform-computed-properties": "^7.8.3", + "@babel/plugin-transform-destructuring": "^7.8.3", + "@babel/plugin-transform-dotall-regex": "^7.8.3", + "@babel/plugin-transform-duplicate-keys": "^7.8.3", + "@babel/plugin-transform-exponentiation-operator": "^7.8.3", + "@babel/plugin-transform-for-of": "^7.8.4", + "@babel/plugin-transform-function-name": "^7.8.3", + "@babel/plugin-transform-literals": "^7.8.3", + "@babel/plugin-transform-member-expression-literals": "^7.8.3", + "@babel/plugin-transform-modules-amd": "^7.8.3", + "@babel/plugin-transform-modules-commonjs": "^7.8.3", + "@babel/plugin-transform-modules-systemjs": "^7.8.3", + "@babel/plugin-transform-modules-umd": "^7.8.3", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.8.3", + "@babel/plugin-transform-new-target": "^7.8.3", + "@babel/plugin-transform-object-super": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.8.4", + "@babel/plugin-transform-property-literals": "^7.8.3", + "@babel/plugin-transform-regenerator": "^7.8.3", + "@babel/plugin-transform-reserved-words": "^7.8.3", + "@babel/plugin-transform-shorthand-properties": "^7.8.3", + "@babel/plugin-transform-spread": "^7.8.3", + "@babel/plugin-transform-sticky-regex": "^7.8.3", + "@babel/plugin-transform-template-literals": "^7.8.3", + "@babel/plugin-transform-typeof-symbol": "^7.8.4", + "@babel/plugin-transform-unicode-regex": "^7.8.3", + "@babel/types": "^7.8.3", + "browserslist": "^4.8.5", + "core-js-compat": "^3.6.2", + "invariant": "^2.2.2", + "levenary": "^1.1.1", + "semver": "^5.5.0" + } + }, + "@babel/template": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.3.tgz", + "integrity": "sha512-04m87AcQgAFdvuoyiQ2kgELr2tV8B4fP/xJAVUL3Yb3bkNdMedD3d0rlSQr3PegP0cms3eHjl1F7PWlvWbU8FQ==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.8.3", + "@babel/parser": "^7.8.3", + "@babel/types": "^7.8.3" + } + }, + "@babel/traverse": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.4.tgz", + "integrity": "sha512-NGLJPZwnVEyBPLI+bl9y9aSnxMhsKz42so7ApAv9D+b4vAFPpY013FTS9LdKxcABoIYFU52HcYga1pPlx454mg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.8.3", + "@babel/generator": "^7.8.4", + "@babel/helper-function-name": "^7.8.3", + "@babel/helper-split-export-declaration": "^7.8.3", + "@babel/parser": "^7.8.4", + "@babel/types": "^7.8.3", + "debug": "^4.1.0", + "globals": "^11.1.0", + "lodash": "^4.17.13" + } + }, + "@babel/types": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.3.tgz", + "integrity": "sha512-jBD+G8+LWpMBBWvVcdr4QysjUE4mU/syrhN17o1u3gx0/WzJB1kwiVZAXRtWbsIPOwW8pF/YJV5+nmetPzepXg==", + "dev": true, + "requires": { + "esutils": "^2.0.2", + "lodash": "^4.17.13", + "to-fast-properties": "^2.0.0" + } + }, + "@webassemblyjs/ast": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.8.5.tgz", + "integrity": "sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ==", + "dev": true, + "requires": { + "@webassemblyjs/helper-module-context": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/wast-parser": "1.8.5" + } + }, + "@webassemblyjs/floating-point-hex-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz", + "integrity": "sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ==", + "dev": true + }, + "@webassemblyjs/helper-api-error": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz", + "integrity": "sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA==", + "dev": true + }, + "@webassemblyjs/helper-buffer": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz", + "integrity": "sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q==", + "dev": true + }, + "@webassemblyjs/helper-code-frame": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz", + "integrity": "sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ==", + "dev": true, + "requires": { + "@webassemblyjs/wast-printer": "1.8.5" + } + }, + "@webassemblyjs/helper-fsm": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz", + "integrity": "sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow==", + "dev": true + }, + "@webassemblyjs/helper-module-context": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz", + "integrity": "sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "mamacro": "^0.0.3" + } + }, + "@webassemblyjs/helper-wasm-bytecode": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz", + "integrity": "sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ==", + "dev": true + }, + "@webassemblyjs/helper-wasm-section": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz", + "integrity": "sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5" + } + }, + "@webassemblyjs/ieee754": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz", + "integrity": "sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g==", + "dev": true, + "requires": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "@webassemblyjs/leb128": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.8.5.tgz", + "integrity": "sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A==", + "dev": true, + "requires": { + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/utf8": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.8.5.tgz", + "integrity": "sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw==", + "dev": true + }, + "@webassemblyjs/wasm-edit": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz", + "integrity": "sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/helper-wasm-section": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5", + "@webassemblyjs/wasm-opt": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5", + "@webassemblyjs/wast-printer": "1.8.5" + } + }, + "@webassemblyjs/wasm-gen": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz", + "integrity": "sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/ieee754": "1.8.5", + "@webassemblyjs/leb128": "1.8.5", + "@webassemblyjs/utf8": "1.8.5" + } + }, + "@webassemblyjs/wasm-opt": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz", + "integrity": "sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz", + "integrity": "sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-api-error": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/ieee754": "1.8.5", + "@webassemblyjs/leb128": "1.8.5", + "@webassemblyjs/utf8": "1.8.5" + } + }, + "@webassemblyjs/wast-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz", + "integrity": "sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/floating-point-hex-parser": "1.8.5", + "@webassemblyjs/helper-api-error": "1.8.5", + "@webassemblyjs/helper-code-frame": "1.8.5", + "@webassemblyjs/helper-fsm": "1.8.5", + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/wast-printer": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz", + "integrity": "sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/wast-parser": "1.8.5", + "@xtuc/long": "4.2.2" + } + }, + "@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true + }, + "@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true + }, + "ajv": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.11.0.tgz", + "integrity": "sha512-nCprB/0syFYy9fVYU1ox1l2KN8S9I+tziH8D4zdZuLT3N6RMlGSGt5FSTpAiHB/Whv8Qs1cWHma1aMKZyaHRKA==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ajv-errors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", + "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==", + "dev": true + }, + "ajv-keywords": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.4.1.tgz", + "integrity": "sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ==", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "anymatch": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", + "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "dev": true, + "requires": { + "micromatch": "^3.1.4", + "normalize-path": "^2.1.1" + }, + "dependencies": { + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "requires": { + "remove-trailing-separator": "^1.0.1" + } + } + } + }, + "aproba": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", + "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", + "dev": true + }, + "arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", + "dev": true + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "dev": true + }, + "arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", + "dev": true + }, + "array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", + "dev": true + }, + "asn1.js": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", + "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "assert": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", + "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", + "dev": true, + "requires": { + "object-assign": "^4.1.1", + "util": "0.10.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=", + "dev": true + }, + "util": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", + "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", + "dev": true, + "requires": { + "inherits": "2.0.1" + } + } + } + }, + "assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", + "dev": true + }, + "async-each": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", + "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==", + "dev": true + }, + "atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "dev": true + }, + "babel-loader": { + "version": "8.0.6", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.0.6.tgz", + "integrity": "sha512-4BmWKtBOBm13uoUwd08UwjZlaw3O9GWf456R9j+5YykFZ6LUIjIKLc0zEZf+hauxPOJs96C8k6FvYD09vWzhYw==", + "dev": true, + "requires": { + "find-cache-dir": "^2.0.0", + "loader-utils": "^1.0.2", + "mkdirp": "^0.5.1", + "pify": "^4.0.1" + } + }, + "babel-plugin-dynamic-import-node": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz", + "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==", + "dev": true, + "requires": { + "object.assign": "^4.1.0" + } + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "base": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "dev": true, + "requires": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "base64-js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", + "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==", + "dev": true + }, + "big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "dev": true + }, + "binary-extensions": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", + "dev": true + }, + "bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dev": true, + "optional": true, + "requires": { + "file-uri-to-path": "1.0.0" + } + }, + "bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", + "dev": true + }, + "bn.js": { + "version": "4.11.8", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", + "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "dev": true, + "requires": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=", + "dev": true + }, + "browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "dev": true, + "requires": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "dev": true, + "requires": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "browserify-rsa": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz", + "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "randombytes": "^2.0.1" + } + }, + "browserify-sign": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.0.4.tgz", + "integrity": "sha1-qk62jl17ZYuqa/alfmMMvXqT0pg=", + "dev": true, + "requires": { + "bn.js": "^4.1.1", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.2", + "elliptic": "^6.0.0", + "inherits": "^2.0.1", + "parse-asn1": "^5.0.0" + } + }, + "browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "dev": true, + "requires": { + "pako": "~1.0.5" + } + }, + "browserslist": { + "version": "4.8.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.8.7.tgz", + "integrity": "sha512-gFOnZNYBHrEyUML0xr5NJ6edFaaKbTFX9S9kQHlYfCP0Rit/boRIz4G+Avq6/4haEKJXdGGUnoolx+5MWW2BoA==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001027", + "electron-to-chromium": "^1.3.349", + "node-releases": "^1.1.49" + } + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "dev": true, + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "dev": true + }, + "buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=", + "dev": true + }, + "builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=", + "dev": true + }, + "cacache": { + "version": "12.0.3", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.3.tgz", + "integrity": "sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==", + "dev": true, + "requires": { + "bluebird": "^3.5.5", + "chownr": "^1.1.1", + "figgy-pudding": "^3.5.1", + "glob": "^7.1.4", + "graceful-fs": "^4.1.15", + "infer-owner": "^1.0.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.3", + "ssri": "^6.0.1", + "unique-filename": "^1.1.1", + "y18n": "^4.0.0" + } + }, + "cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "dev": true, + "requires": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + } + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "caniuse-lite": { + "version": "1.0.30001028", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001028.tgz", + "integrity": "sha512-Vnrq+XMSHpT7E+LWoIYhs3Sne8h9lx9YJV3acH3THNCwU/9zV93/ta4xVfzTtnqd3rvnuVpVjE3DFqf56tr3aQ==", + "dev": true + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chokidar": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", + "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", + "dev": true, + "requires": { + "anymatch": "^2.0.0", + "async-each": "^1.0.1", + "braces": "^2.3.2", + "fsevents": "^1.2.7", + "glob-parent": "^3.1.0", + "inherits": "^2.0.3", + "is-binary-path": "^1.0.0", + "is-glob": "^4.0.0", + "normalize-path": "^3.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.2.1", + "upath": "^1.1.1" + }, + "dependencies": { + "glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", + "dev": true, + "requires": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + }, + "dependencies": { + "is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "requires": { + "is-extglob": "^2.1.0" + } + } + } + } + } + }, + "chownr": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.3.tgz", + "integrity": "sha512-i70fVHhmV3DtTl6nqvZOnIjbY0Pe4kAUjwHj8z0zAdgBtYrJyYwLKCCuRBQ5ppkyL0AkN7HKRnETdmdp1zqNXw==", + "dev": true + }, + "chrome-trace-event": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz", + "integrity": "sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ==", + "dev": true, + "requires": { + "tslib": "^1.9.0" + } + }, + "cipher-base": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", + "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "requires": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + }, + "dependencies": { + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + } + } + }, + "collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "dev": true, + "requires": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", + "dev": true + }, + "component-emitter": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", + "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", + "dev": true + }, + "constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=", + "dev": true + }, + "convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } + }, + "copy-concurrently": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", + "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", + "dev": true + }, + "core-js-compat": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.4.tgz", + "integrity": "sha512-zAa3IZPvsJ0slViBQ2z+vgyyTuhd3MFn1rBQjZSKVEgB0UMYhUkCj9jJUVPgGTGqWvsBVmfnruXgTcNyTlEiSA==", + "dev": true, + "requires": { + "browserslist": "^4.8.3", + "semver": "7.0.0" + }, + "dependencies": { + "semver": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", + "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==", + "dev": true + } + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "create-ecdh": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", + "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "elliptic": "^6.0.0" + } + }, + "create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + } + } + }, + "crypto-browserify": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", + "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "dev": true, + "requires": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + } + }, + "cyclist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", + "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=", + "dev": true + }, + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "decode-uri-component": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", + "dev": true + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "dev": true, + "requires": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "dependencies": { + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "des.js": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", + "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "detect-file": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz", + "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=", + "dev": true + }, + "diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "domain-browser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", + "dev": true + }, + "duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "electron-to-chromium": { + "version": "1.3.359", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.359.tgz", + "integrity": "sha512-ewZp4BQftbLclBwmFYoTrlyiLMXQTiYeqh1hn24sWao9bGhUHzQtpytymN8JsenWlQ9SbBWynTPvfghb0Ipn1Q==", + "dev": true + }, + "elliptic": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz", + "integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==", + "dev": true, + "requires": { + "bn.js": "^4.4.0", + "brorand": "^1.0.1", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.0" + } + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "emojis-list": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", + "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", + "dev": true + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "enhanced-resolve": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz", + "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.5.0", + "tapable": "^1.0.0" + }, + "dependencies": { + "memory-fs": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", + "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", + "dev": true, + "requires": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + } + } + }, + "errno": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz", + "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==", + "dev": true, + "requires": { + "prr": "~1.0.1" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "esrecurse": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", + "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "dev": true, + "requires": { + "estraverse": "^4.1.0" + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true + }, + "events": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz", + "integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg==", + "dev": true + }, + "evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "dev": true, + "requires": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "execa": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "dev": true, + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, + "expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", + "dev": true, + "requires": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + } + } + }, + "expand-tilde": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz", + "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.1" + } + }, + "extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", + "dev": true, + "requires": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "dev": true, + "requires": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "fast-deep-equal": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", + "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "figgy-pudding": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.1.tgz", + "integrity": "sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==", + "dev": true + }, + "file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "optional": true + }, + "fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "dev": true, + "requires": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "dev": true, + "requires": { + "find-up": "^3.0.0" + } + } + } + }, + "findup-sync": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-3.0.0.tgz", + "integrity": "sha512-YbffarhcicEhOrm4CtrwdKBdCuz576RLdhJDsIfvNtxUuhdRet1qZcsMjqbePtAseKdAnDyM/IyXbu7PRPRLYg==", + "dev": true, + "requires": { + "detect-file": "^1.0.0", + "is-glob": "^4.0.0", + "micromatch": "^3.0.4", + "resolve-dir": "^1.0.1" + } + }, + "flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "dev": true + }, + "fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", + "dev": true, + "requires": { + "map-cache": "^0.2.2" + } + }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fs-write-stream-atomic": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", + "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "1.2.11", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.11.tgz", + "integrity": "sha512-+ux3lx6peh0BpvY0JebGyZoiR4D+oYzdPZMKJwkZ+sFkNJzpL7tXc/wehS49gUAxg3tmMHPHZkA8JU2rhhgDHw==", + "dev": true, + "optional": true, + "requires": { + "bindings": "^1.5.0", + "nan": "^2.12.1", + "node-pre-gyp": "*" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "optional": true + }, + "are-we-there-yet": { + "version": "1.1.5", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.1.3", + "bundled": true, + "dev": true, + "optional": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "debug": { + "version": "3.2.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ms": "^2.1.1" + } + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "optional": true + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minipass": "^2.6.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "iconv-lite": { + "version": "0.4.24", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true, + "dev": true, + "optional": true + }, + "ini": { + "version": "1.3.5", + "bundled": true, + "dev": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "0.0.8", + "bundled": true, + "dev": true, + "optional": true + }, + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minipass": "^2.9.0" + } + }, + "mkdirp": { + "version": "0.5.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minimist": "0.0.8" + } + }, + "ms": { + "version": "2.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "needle": { + "version": "2.4.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.14.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.1", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.2.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4.4.2" + } + }, + "nopt": { + "version": "4.0.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-normalize-package-bin": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "npm-packlist": { + "version": "1.4.7", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "process-nextick-args": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "rc": { + "version": "1.2.8", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "sax": { + "version": "1.2.4", + "bundled": true, + "dev": true, + "optional": true + }, + "semver": { + "version": "5.7.1", + "bundled": true, + "dev": true, + "optional": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "wide-align": { + "version": "1.1.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "yallist": { + "version": "3.1.1", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "gensync": { + "version": "1.0.0-beta.1", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.1.tgz", + "integrity": "sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg==", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", + "dev": true + }, + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "requires": { + "global-prefix": "^3.0.0" + }, + "dependencies": { + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + } + } + } + }, + "global-prefix": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz", + "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.2", + "homedir-polyfill": "^1.0.1", + "ini": "^1.3.4", + "is-windows": "^1.0.1", + "which": "^1.2.14" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + }, + "graceful-fs": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "has-symbols": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", + "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", + "dev": true + }, + "has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "dev": true, + "requires": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + } + }, + "has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "dependencies": { + "kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "hash-base": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.0.4.tgz", + "integrity": "sha1-X8hoaEfs1zSZQDMZprCj8/auSRg=", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", + "dev": true, + "requires": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "homedir-polyfill": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz", + "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==", + "dev": true, + "requires": { + "parse-passwd": "^1.0.0" + } + }, + "https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=", + "dev": true + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==", + "dev": true + }, + "iferr": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz", + "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=", + "dev": true + }, + "import-local": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz", + "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==", + "dev": true, + "requires": { + "pkg-dir": "^3.0.0", + "resolve-cwd": "^2.0.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "dev": true, + "requires": { + "find-up": "^3.0.0" + } + } + } + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true + }, + "infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "dev": true + }, + "interpret": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", + "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==", + "dev": true + }, + "invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dev": true, + "requires": { + "loose-envify": "^1.0.0" + } + }, + "invert-kv": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-2.0.0.tgz", + "integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==", + "dev": true + }, + "is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dev": true, + "requires": { + "binary-extensions": "^1.0.0" + } + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "dev": true + } + } + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "dev": true + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true + }, + "is-wsl": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", + "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "dev": true + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + } + } + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true + }, + "lcid": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-2.0.0.tgz", + "integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==", + "dev": true, + "requires": { + "invert-kv": "^2.0.0" + } + }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true + }, + "levenary": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz", + "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==", + "dev": true, + "requires": { + "leven": "^3.1.0" + } + }, + "loader-runner": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", + "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", + "dev": true + }, + "loader-utils": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz", + "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==", + "dev": true, + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^2.0.0", + "json5": "^1.0.1" + } + }, + "lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==", + "dev": true + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "requires": { + "yallist": "^3.0.2" + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "dev": true, + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "dependencies": { + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + } + } + }, + "mamacro": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/mamacro/-/mamacro-0.0.3.tgz", + "integrity": "sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==", + "dev": true + }, + "map-age-cleaner": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", + "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", + "dev": true, + "requires": { + "p-defer": "^1.0.0" + } + }, + "map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", + "dev": true + }, + "map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "dev": true, + "requires": { + "object-visit": "^1.0.0" + } + }, + "md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "mem": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz", + "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==", + "dev": true, + "requires": { + "map-age-cleaner": "^0.1.1", + "mimic-fn": "^2.0.0", + "p-is-promise": "^2.0.0" + }, + "dependencies": { + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + } + } + }, + "memory-fs": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", + "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=", + "dev": true, + "requires": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + }, + "micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + } + }, + "miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + } + }, + "minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "dev": true + }, + "minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + }, + "mississippi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz", + "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", + "dev": true, + "requires": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^3.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + } + }, + "mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "dev": true, + "requires": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + }, + "move-concurrently": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", + "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "nan": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", + "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==", + "dev": true, + "optional": true + }, + "nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + } + }, + "neo-async": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", + "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==", + "dev": true + }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true + }, + "node-libs-browser": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz", + "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", + "dev": true, + "requires": { + "assert": "^1.1.1", + "browserify-zlib": "^0.2.0", + "buffer": "^4.3.0", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "crypto-browserify": "^3.11.0", + "domain-browser": "^1.1.1", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "os-browserify": "^0.3.0", + "path-browserify": "0.0.1", + "process": "^0.11.10", + "punycode": "^1.2.4", + "querystring-es3": "^0.2.0", + "readable-stream": "^2.3.3", + "stream-browserify": "^2.0.1", + "stream-http": "^2.7.2", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.0", + "url": "^0.11.0", + "util": "^0.11.0", + "vm-browserify": "^1.0.1" + }, + "dependencies": { + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + } + } + }, + "node-releases": { + "version": "1.1.50", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.50.tgz", + "integrity": "sha512-lgAmPv9eYZ0bGwUYAKlr8MG6K4CvWliWqnkcT2P8mMAgVrH3lqfBPorFlxiG1pHQnqmavJZ9vbMXUTNyMLbrgQ==", + "dev": true, + "requires": { + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "dev": true, + "requires": { + "path-key": "^2.0.0" + } + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", + "dev": true, + "requires": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true + }, + "object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", + "dev": true, + "requires": { + "isobject": "^3.0.0" + } + }, + "object.assign": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", + "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "dev": true, + "requires": { + "define-properties": "^1.1.2", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.0", + "object-keys": "^1.0.11" + } + }, + "object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=", + "dev": true + }, + "os-locale": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", + "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", + "dev": true, + "requires": { + "execa": "^1.0.0", + "lcid": "^2.0.0", + "mem": "^4.0.0" + } + }, + "p-defer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", + "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=", + "dev": true + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", + "dev": true + }, + "p-is-promise": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz", + "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==", + "dev": true + }, + "pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true + }, + "parallel-transform": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz", + "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", + "dev": true, + "requires": { + "cyclist": "^1.0.1", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "parse-asn1": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz", + "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==", + "dev": true, + "requires": { + "asn1.js": "^4.0.0", + "browserify-aes": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.0", + "pbkdf2": "^3.0.3", + "safe-buffer": "^5.1.1" + } + }, + "parse-passwd": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz", + "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=", + "dev": true + }, + "pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", + "dev": true + }, + "path-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==", + "dev": true + }, + "path-dirname": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", + "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "path-parse": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", + "dev": true + }, + "pbkdf2": { + "version": "3.0.17", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz", + "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==", + "dev": true, + "requires": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true + }, + "posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", + "dev": true + }, + "private": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", + "dev": true + }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=", + "dev": true + }, + "prr": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", + "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=", + "dev": true + }, + "public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "dev": true, + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + }, + "dependencies": { + "pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + } + } + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "dev": true + }, + "querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "dev": true, + "requires": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + } + }, + "regenerate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", + "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==", + "dev": true + }, + "regenerate-unicode-properties": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz", + "integrity": "sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA==", + "dev": true, + "requires": { + "regenerate": "^1.4.0" + } + }, + "regenerator-transform": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.1.tgz", + "integrity": "sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ==", + "dev": true, + "requires": { + "private": "^0.1.6" + } + }, + "regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + } + }, + "regexpu-core": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.6.0.tgz", + "integrity": "sha512-YlVaefl8P5BnFYOITTNzDvan1ulLOiXJzCNZxduTIosN17b87h3bvG9yHMoHaRuo88H4mQ06Aodj5VtYGGGiTg==", + "dev": true, + "requires": { + "regenerate": "^1.4.0", + "regenerate-unicode-properties": "^8.1.0", + "regjsgen": "^0.5.0", + "regjsparser": "^0.6.0", + "unicode-match-property-ecmascript": "^1.0.4", + "unicode-match-property-value-ecmascript": "^1.1.0" + } + }, + "regjsgen": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.1.tgz", + "integrity": "sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==", + "dev": true + }, + "regjsparser": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.3.tgz", + "integrity": "sha512-8uZvYbnfAtEm9Ab8NTb3hdLwL4g/LQzEYP7Xs27T96abJCCE2d6r3cPZPQEsLKy0vRSGVNG+/zVGtLr86HQduA==", + "dev": true, + "requires": { + "jsesc": "~0.5.0" + }, + "dependencies": { + "jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", + "dev": true + } + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "repeat-element": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", + "dev": true + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "resolve": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", + "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", + "dev": true, + "requires": { + "path-parse": "^1.0.6" + } + }, + "resolve-cwd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz", + "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=", + "dev": true, + "requires": { + "resolve-from": "^3.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", + "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", + "dev": true + } + } + }, + "resolve-dir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz", + "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.0", + "global-modules": "^1.0.0" + }, + "dependencies": { + "global-modules": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz", + "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==", + "dev": true, + "requires": { + "global-prefix": "^1.0.1", + "is-windows": "^1.0.1", + "resolve-dir": "^1.0.0" + } + } + } + }, + "resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", + "dev": true + }, + "ret": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "dev": true + }, + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "ripemd160": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", + "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "run-queue": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz", + "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=", + "dev": true, + "requires": { + "aproba": "^1.1.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "dev": true, + "requires": { + "ret": "~0.1.10" + } + }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "dev": true, + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + }, + "serialize-javascript": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-2.1.2.tgz", + "integrity": "sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ==", + "dev": true + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=", + "dev": true + }, + "sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "dev": true, + "requires": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + } + } + }, + "snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "dev": true, + "requires": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "dev": true, + "requires": { + "kind-of": "^3.2.0" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "source-list-map": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", + "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==", + "dev": true + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + }, + "source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "dev": true, + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "source-map-support": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.16.tgz", + "integrity": "sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "source-map-url": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", + "dev": true + }, + "split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.0" + } + }, + "ssri": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.1.tgz", + "integrity": "sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA==", + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1" + } + }, + "static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "dev": true, + "requires": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "stream-browserify": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", + "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", + "dev": true, + "requires": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "stream-each": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz", + "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "stream-http": { + "version": "2.8.3", + "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", + "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "dev": true, + "requires": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + } + } + }, + "strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "dev": true + }, + "terser": { + "version": "4.6.3", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.3.tgz", + "integrity": "sha512-Lw+ieAXmY69d09IIc/yqeBqXpEQIpDGZqT34ui1QWXIUpR2RjbqEkT8X7Lgex19hslSqcWM5iMN2kM11eMsESQ==", + "dev": true, + "requires": { + "commander": "^2.20.0", + "source-map": "~0.6.1", + "source-map-support": "~0.5.12" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "terser-webpack-plugin": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.3.tgz", + "integrity": "sha512-QMxecFz/gHQwteWwSo5nTc6UaICqN1bMedC5sMtUc7y3Ha3Q8y6ZO0iCR8pq4RJC8Hjf0FEPEHZqcMB/+DFCrA==", + "dev": true, + "requires": { + "cacache": "^12.0.2", + "find-cache-dir": "^2.1.0", + "is-wsl": "^1.1.0", + "schema-utils": "^1.0.0", + "serialize-javascript": "^2.1.2", + "source-map": "^0.6.1", + "terser": "^4.1.2", + "webpack-sources": "^1.4.0", + "worker-farm": "^1.7.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "timers-browserify": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz", + "integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==", + "dev": true, + "requires": { + "setimmediate": "^1.0.4" + } + }, + "to-arraybuffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", + "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=", + "dev": true + }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true + }, + "to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "dev": true, + "requires": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + } + }, + "to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + } + }, + "tslib": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.10.0.tgz", + "integrity": "sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==", + "dev": true + }, + "tty-browserify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", + "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=", + "dev": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "unicode-canonical-property-names-ecmascript": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", + "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==", + "dev": true + }, + "unicode-match-property-ecmascript": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", + "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", + "dev": true, + "requires": { + "unicode-canonical-property-names-ecmascript": "^1.0.4", + "unicode-property-aliases-ecmascript": "^1.0.4" + } + }, + "unicode-match-property-value-ecmascript": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz", + "integrity": "sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g==", + "dev": true + }, + "unicode-property-aliases-ecmascript": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz", + "integrity": "sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw==", + "dev": true + }, + "union-value": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^2.0.1" + } + }, + "unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "dev": true, + "requires": { + "unique-slug": "^2.0.0" + } + }, + "unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4" + } + }, + "unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "dev": true, + "requires": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "dependencies": { + "has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", + "dev": true, + "requires": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "dependencies": { + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dev": true, + "requires": { + "isarray": "1.0.0" + } + } + } + }, + "has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", + "dev": true + } + } + }, + "upath": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", + "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", + "dev": true + }, + "uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "urix": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", + "dev": true + }, + "url": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", + "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", + "dev": true, + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + }, + "dependencies": { + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", + "dev": true + } + } + }, + "use": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "dev": true + }, + "util": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz", + "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", + "dev": true, + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", + "dev": true + }, + "watchpack": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.0.tgz", + "integrity": "sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA==", + "dev": true, + "requires": { + "chokidar": "^2.0.2", + "graceful-fs": "^4.1.2", + "neo-async": "^2.5.0" + } + }, + "webpack": { + "version": "4.41.5", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.41.5.tgz", + "integrity": "sha512-wp0Co4vpyumnp3KlkmpM5LWuzvZYayDwM2n17EHFr4qxBBbRokC7DJawPJC7TfSFZ9HZ6GsdH40EBj4UV0nmpw==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-module-context": "1.8.5", + "@webassemblyjs/wasm-edit": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5", + "acorn": "^6.2.1", + "ajv": "^6.10.2", + "ajv-keywords": "^3.4.1", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^4.1.0", + "eslint-scope": "^4.0.3", + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^2.4.0", + "loader-utils": "^1.2.3", + "memory-fs": "^0.4.1", + "micromatch": "^3.1.10", + "mkdirp": "^0.5.1", + "neo-async": "^2.6.1", + "node-libs-browser": "^2.2.1", + "schema-utils": "^1.0.0", + "tapable": "^1.1.3", + "terser-webpack-plugin": "^1.4.3", + "watchpack": "^1.6.0", + "webpack-sources": "^1.4.1" + }, + "dependencies": { + "acorn": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz", + "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==", + "dev": true + }, + "eslint-scope": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", + "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", + "dev": true, + "requires": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + } + } + } + }, + "webpack-cli": { + "version": "3.3.10", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-3.3.10.tgz", + "integrity": "sha512-u1dgND9+MXaEt74sJR4PR7qkPxXUSQ0RXYq8x1L6Jg1MYVEmGPrH6Ah6C4arD4r0J1P5HKjRqpab36k0eIzPqg==", + "dev": true, + "requires": { + "chalk": "2.4.2", + "cross-spawn": "6.0.5", + "enhanced-resolve": "4.1.0", + "findup-sync": "3.0.0", + "global-modules": "2.0.0", + "import-local": "2.0.0", + "interpret": "1.2.0", + "loader-utils": "1.2.3", + "supports-color": "6.1.0", + "v8-compile-cache": "2.0.3", + "yargs": "13.2.4" + }, + "dependencies": { + "enhanced-resolve": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz", + "integrity": "sha512-F/7vkyTtyc/llOIn8oWclcB25KdRaiPBpZYDgJHgh/UHtpgT2p2eldQgtQnLtUvfMKPKxbRaQM/hHkvLHt1Vng==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.4.0", + "tapable": "^1.0.0" + } + }, + "supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "v8-compile-cache": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.0.3.tgz", + "integrity": "sha512-CNmdbwQMBjwr9Gsmohvm0pbL954tJrNzf6gWL3K+QMQf00PF7ERGrEiLgjuU3mKreLC2MeGhUsNV9ybTbLgd3w==", + "dev": true + } + } + }, + "webpack-sources": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz", + "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", + "dev": true, + "requires": { + "source-list-map": "^2.0.0", + "source-map": "~0.6.1" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "worker-farm": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz", + "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", + "dev": true, + "requires": { + "errno": "~0.1.7" + } + }, + "wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "dependencies": { + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true + }, + "y18n": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", + "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", + "dev": true + }, + "yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "yargs": { + "version": "13.2.4", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.2.4.tgz", + "integrity": "sha512-HG/DWAJa1PAnHT9JAhNa8AbAv3FPaiLzioSjCcmuXXhP8MlpHO5vwls4g4j6n30Z74GVQj8Xa62dWVx1QCGklg==", + "dev": true, + "requires": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "os-locale": "^3.1.0", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + } + } + }, + "yargs-parser": { + "version": "13.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.1.tgz", + "integrity": "sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } +} diff --git a/runtime/JavaScript/src/antlr4/package.json b/runtime/JavaScript/package.json similarity index 50% rename from runtime/JavaScript/src/antlr4/package.json rename to runtime/JavaScript/package.json index afb4618da..aafa83d55 100644 --- a/runtime/JavaScript/src/antlr4/package.json +++ b/runtime/JavaScript/package.json @@ -1,6 +1,6 @@ { "name": "antlr4", - "version": "4.7.2", + "version": "4.8.0", "description": "JavaScript runtime for ANTLR4", "main": "src/antlr4/index.js", "repository": "antlr/antlr4.git", @@ -15,5 +15,18 @@ "bugs": { "url": "https://github.com/antlr/antlr4/issues" }, - "homepage": "https://github.com/antlr/antlr4" + "homepage": "https://github.com/antlr/antlr4", + "devDependencies": { + "@babel/core": "^7.8.4", + "@babel/preset-env": "^7.8.4", + "babel-loader": "^8.0.6", + "webpack": "^4.41.5", + "webpack-cli": "^3.3.10" + }, + "scripts": { + "build": "webpack" + }, + "engines": { + "node": ">=14" + } } diff --git a/runtime/JavaScript/src/antlr4/BufferedTokenStream.js b/runtime/JavaScript/src/antlr4/BufferedTokenStream.js index 40f270f81..4d5a13148 100644 --- a/runtime/JavaScript/src/antlr4/BufferedTokenStream.js +++ b/runtime/JavaScript/src/antlr4/BufferedTokenStream.js @@ -1,375 +1,386 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// This implementation of {@link TokenStream} loads tokens from a -// {@link TokenSource} on-demand, and places the tokens in a buffer to provide -// access to any previous token by index. -// -//

    -// This token stream ignores the value of {@link Token//getChannel}. If your -// parser requires the token stream filter tokens to only those on a particular -// channel, such as {@link Token//DEFAULT_CHANNEL} or -// {@link Token//HIDDEN_CHANNEL}, use a filtering token stream such a -// {@link CommonTokenStream}.

    - -var Token = require('./Token').Token; -var Lexer = require('./Lexer').Lexer; -var Interval = require('./IntervalSet').Interval; +const {Token} = require('./Token'); +const Lexer = require('./Lexer'); +const {Interval} = require('./IntervalSet'); // this is just to keep meaningful parameter types to Parser -function TokenStream() { - return this; -} +class TokenStream {} -function BufferedTokenStream(tokenSource) { +/** + * This implementation of {@link TokenStream} loads tokens from a + * {@link TokenSource} on-demand, and places the tokens in a buffer to provide + * access to any previous token by index. + * + *

    + * This token stream ignores the value of {@link Token//getChannel}. If your + * parser requires the token stream filter tokens to only those on a particular + * channel, such as {@link Token//DEFAULT_CHANNEL} or + * {@link Token//HIDDEN_CHANNEL}, use a filtering token stream such a + * {@link CommonTokenStream}.

    + */ +class BufferedTokenStream extends TokenStream { + constructor(tokenSource) { - TokenStream.call(this); - // The {@link TokenSource} from which tokens for this stream are fetched. - this.tokenSource = tokenSource; + super(); + // The {@link TokenSource} from which tokens for this stream are fetched. + this.tokenSource = tokenSource; + /** + * A collection of all tokens fetched from the token source. The list is + * considered a complete view of the input once {@link //fetchedEOF} is set + * to {@code true}. + */ + this.tokens = []; - // A collection of all tokens fetched from the token source. The list is - // considered a complete view of the input once {@link //fetchedEOF} is set - // to {@code true}. - this.tokens = []; + /** + * The index into {@link //tokens} of the current token (next token to + * {@link //consume}). {@link //tokens}{@code [}{@link //p}{@code ]} should + * be + * {@link //LT LT(1)}. + * + *

    This field is set to -1 when the stream is first constructed or when + * {@link //setTokenSource} is called, indicating that the first token has + * not yet been fetched from the token source. For additional information, + * see the documentation of {@link IntStream} for a description of + * Initializing Methods.

    + */ + this.index = -1; - // The index into {@link //tokens} of the current token (next token to - // {@link //consume}). {@link //tokens}{@code [}{@link //p}{@code ]} should - // be - // {@link //LT LT(1)}. - // - //

    This field is set to -1 when the stream is first constructed or when - // {@link //setTokenSource} is called, indicating that the first token has - // not yet been fetched from the token source. For additional information, - // see the documentation of {@link IntStream} for a description of - // Initializing Methods.

    - this.index = -1; - - // Indicates whether the {@link Token//EOF} token has been fetched from - // {@link //tokenSource} and added to {@link //tokens}. This field improves - // performance for the following cases: - // - //
      - //
    • {@link //consume}: The lookahead check in {@link //consume} to - // prevent - // consuming the EOF symbol is optimized by checking the values of - // {@link //fetchedEOF} and {@link //p} instead of calling {@link - // //LA}.
    • - //
    • {@link //fetch}: The check to prevent adding multiple EOF symbols - // into - // {@link //tokens} is trivial with this field.
    • - //
        - this.fetchedEOF = false; - return this; -} - -BufferedTokenStream.prototype = Object.create(TokenStream.prototype); -BufferedTokenStream.prototype.constructor = BufferedTokenStream; - -BufferedTokenStream.prototype.mark = function() { - return 0; -}; - -BufferedTokenStream.prototype.release = function(marker) { - // no resources to release -}; - -BufferedTokenStream.prototype.reset = function() { - this.seek(0); -}; - -BufferedTokenStream.prototype.seek = function(index) { - this.lazyInit(); - this.index = this.adjustSeekIndex(index); -}; - -BufferedTokenStream.prototype.get = function(index) { - this.lazyInit(); - return this.tokens[index]; -}; - -BufferedTokenStream.prototype.consume = function() { - var skipEofCheck = false; - if (this.index >= 0) { - if (this.fetchedEOF) { - // the last token in tokens is EOF. skip check if p indexes any - // fetched token except the last. - skipEofCheck = this.index < this.tokens.length - 1; - } else { - // no EOF token in tokens. skip check if p indexes a fetched token. - skipEofCheck = this.index < this.tokens.length; - } - } else { - // not yet initialized - skipEofCheck = false; + /** + * Indicates whether the {@link Token//EOF} token has been fetched from + * {@link //tokenSource} and added to {@link //tokens}. This field improves + * performance for the following cases: + * + *
          + *
        • {@link //consume}: The lookahead check in {@link //consume} to + * prevent + * consuming the EOF symbol is optimized by checking the values of + * {@link //fetchedEOF} and {@link //p} instead of calling {@link + * //LA}.
        • + *
        • {@link //fetch}: The check to prevent adding multiple EOF symbols + * into + * {@link //tokens} is trivial with this field.
        • + *
            + */ + this.fetchedEOF = false; } - if (!skipEofCheck && this.LA(1) === Token.EOF) { - throw "cannot consume EOF"; - } - if (this.sync(this.index + 1)) { - this.index = this.adjustSeekIndex(this.index + 1); - } -}; -// Make sure index {@code i} in tokens has a token. -// -// @return {@code true} if a token is located at index {@code i}, otherwise -// {@code false}. -// @see //get(int i) -// / -BufferedTokenStream.prototype.sync = function(i) { - var n = i - this.tokens.length + 1; // how many more elements we need? - if (n > 0) { - var fetched = this.fetch(n); - return fetched >= n; - } - return true; -}; - -// Add {@code n} elements to buffer. -// -// @return The actual number of elements added to the buffer. -// / -BufferedTokenStream.prototype.fetch = function(n) { - if (this.fetchedEOF) { + mark() { return 0; } - for (var i = 0; i < n; i++) { - var t = this.tokenSource.nextToken(); - t.tokenIndex = this.tokens.length; - this.tokens.push(t); - if (t.type === Token.EOF) { - this.fetchedEOF = true; - return i + 1; + + release(marker) { + // no resources to release + } + + reset() { + this.seek(0); + } + + seek(index) { + this.lazyInit(); + this.index = this.adjustSeekIndex(index); + } + + get(index) { + this.lazyInit(); + return this.tokens[index]; + } + + consume() { + let skipEofCheck = false; + if (this.index >= 0) { + if (this.fetchedEOF) { + // the last token in tokens is EOF. skip check if p indexes any + // fetched token except the last. + skipEofCheck = this.index < this.tokens.length - 1; + } else { + // no EOF token in tokens. skip check if p indexes a fetched token. + skipEofCheck = this.index < this.tokens.length; + } + } else { + // not yet initialized + skipEofCheck = false; + } + if (!skipEofCheck && this.LA(1) === Token.EOF) { + throw "cannot consume EOF"; + } + if (this.sync(this.index + 1)) { + this.index = this.adjustSeekIndex(this.index + 1); } } - return n; -}; + + /** + * Make sure index {@code i} in tokens has a token. + * + * @return {Boolean} {@code true} if a token is located at index {@code i}, otherwise + * {@code false}. + * @see //get(int i) + */ + sync(i) { + const n = i - this.tokens.length + 1; // how many more elements we need? + if (n > 0) { + const fetched = this.fetch(n); + return fetched >= n; + } + return true; + } + + /** + * Add {@code n} elements to buffer. + * + * @return {Number} The actual number of elements added to the buffer. + */ + fetch(n) { + if (this.fetchedEOF) { + return 0; + } + for (let i = 0; i < n; i++) { + const t = this.tokenSource.nextToken(); + t.tokenIndex = this.tokens.length; + this.tokens.push(t); + if (t.type === Token.EOF) { + this.fetchedEOF = true; + return i + 1; + } + } + return n; + } // Get all tokens from start..stop inclusively/// -BufferedTokenStream.prototype.getTokens = function(start, stop, types) { - if (types === undefined) { - types = null; - } - if (start < 0 || stop < 0) { - return null; - } - this.lazyInit(); - var subset = []; - if (stop >= this.tokens.length) { - stop = this.tokens.length - 1; - } - for (var i = start; i < stop; i++) { - var t = this.tokens[i]; - if (t.type === Token.EOF) { - break; + getTokens(start, stop, types) { + if (types === undefined) { + types = null; } - if (types === null || types.contains(t.type)) { - subset.push(t); + if (start < 0 || stop < 0) { + return null; + } + this.lazyInit(); + const subset = []; + if (stop >= this.tokens.length) { + stop = this.tokens.length - 1; + } + for (let i = start; i < stop; i++) { + const t = this.tokens[i]; + if (t.type === Token.EOF) { + break; + } + if (types === null || types.contains(t.type)) { + subset.push(t); + } + } + return subset; + } + + LA(i) { + return this.LT(i).type; + } + + LB(k) { + if (this.index - k < 0) { + return null; + } + return this.tokens[this.index - k]; + } + + LT(k) { + this.lazyInit(); + if (k === 0) { + return null; + } + if (k < 0) { + return this.LB(-k); + } + const i = this.index + k - 1; + this.sync(i); + if (i >= this.tokens.length) { // return EOF token + // EOF must be last token + return this.tokens[this.tokens.length - 1]; + } + return this.tokens[i]; + } + + /** + * Allowed derived classes to modify the behavior of operations which change + * the current stream position by adjusting the target token index of a seek + * operation. The default implementation simply returns {@code i}. If an + * exception is thrown in this method, the current stream index should not be + * changed. + * + *

            For example, {@link CommonTokenStream} overrides this method to ensure + * that + * the seek target is always an on-channel token.

            + * + * @param {Number} i The target token index. + * @return {Number} The adjusted target token index. + */ + adjustSeekIndex(i) { + return i; + } + + lazyInit() { + if (this.index === -1) { + this.setup(); } } - return subset; -}; -BufferedTokenStream.prototype.LA = function(i) { - return this.LT(i).type; -}; - -BufferedTokenStream.prototype.LB = function(k) { - if (this.index - k < 0) { - return null; + setup() { + this.sync(0); + this.index = this.adjustSeekIndex(0); } - return this.tokens[this.index - k]; -}; - -BufferedTokenStream.prototype.LT = function(k) { - this.lazyInit(); - if (k === 0) { - return null; - } - if (k < 0) { - return this.LB(-k); - } - var i = this.index + k - 1; - this.sync(i); - if (i >= this.tokens.length) { // return EOF token - // EOF must be last token - return this.tokens[this.tokens.length - 1]; - } - return this.tokens[i]; -}; - -// Allowed derived classes to modify the behavior of operations which change -// the current stream position by adjusting the target token index of a seek -// operation. The default implementation simply returns {@code i}. If an -// exception is thrown in this method, the current stream index should not be -// changed. -// -//

            For example, {@link CommonTokenStream} overrides this method to ensure -// that -// the seek target is always an on-channel token.

            -// -// @param i The target token index. -// @return The adjusted target token index. - -BufferedTokenStream.prototype.adjustSeekIndex = function(i) { - return i; -}; - -BufferedTokenStream.prototype.lazyInit = function() { - if (this.index === -1) { - this.setup(); - } -}; - -BufferedTokenStream.prototype.setup = function() { - this.sync(0); - this.index = this.adjustSeekIndex(0); -}; // Reset this token stream by setting its token source./// -BufferedTokenStream.prototype.setTokenSource = function(tokenSource) { - this.tokenSource = tokenSource; - this.tokens = []; - this.index = -1; - this.fetchedEOF = false; -}; - - -// Given a starting index, return the index of the next token on channel. -// Return i if tokens[i] is on channel. Return -1 if there are no tokens -// on channel between i and EOF. -// / -BufferedTokenStream.prototype.nextTokenOnChannel = function(i, channel) { - this.sync(i); - if (i >= this.tokens.length) { - return -1; + setTokenSource(tokenSource) { + this.tokenSource = tokenSource; + this.tokens = []; + this.index = -1; + this.fetchedEOF = false; } - var token = this.tokens[i]; - while (token.channel !== this.channel) { - if (token.type === Token.EOF) { + + /** + * Given a starting index, return the index of the next token on channel. + * Return i if tokens[i] is on channel. Return -1 if there are no tokens + * on channel between i and EOF. + */ + nextTokenOnChannel(i, channel) { + this.sync(i); + if (i >= this.tokens.length) { return -1; } - i += 1; - this.sync(i); - token = this.tokens[i]; + let token = this.tokens[i]; + while (token.channel !== this.channel) { + if (token.type === Token.EOF) { + return -1; + } + i += 1; + this.sync(i); + token = this.tokens[i]; + } + return i; } - return i; -}; -// Given a starting index, return the index of the previous token on channel. -// Return i if tokens[i] is on channel. Return -1 if there are no tokens -// on channel between i and 0. -BufferedTokenStream.prototype.previousTokenOnChannel = function(i, channel) { - while (i >= 0 && this.tokens[i].channel !== channel) { - i -= 1; + /** + * Given a starting index, return the index of the previous token on channel. + * Return i if tokens[i] is on channel. Return -1 if there are no tokens + * on channel between i and 0. + */ + previousTokenOnChannel(i, channel) { + while (i >= 0 && this.tokens[i].channel !== channel) { + i -= 1; + } + return i; } - return i; -}; -// Collect all tokens on specified channel to the right of -// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or -// EOF. If channel is -1, find any non default channel token. -BufferedTokenStream.prototype.getHiddenTokensToRight = function(tokenIndex, - channel) { - if (channel === undefined) { - channel = -1; + /** + * Collect all tokens on specified channel to the right of + * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or + * EOF. If channel is -1, find any non default channel token. + */ + getHiddenTokensToRight(tokenIndex, + channel) { + if (channel === undefined) { + channel = -1; + } + this.lazyInit(); + if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { + throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; + } + const nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL); + const from_ = tokenIndex + 1; + // if none onchannel to right, nextOnChannel=-1 so set to = last token + const to = nextOnChannel === -1 ? this.tokens.length - 1 : nextOnChannel; + return this.filterForChannel(from_, to, channel); } - this.lazyInit(); - if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { - throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; - } - var nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL); - var from_ = tokenIndex + 1; - // if none onchannel to right, nextOnChannel=-1 so set to = last token - var to = nextOnChannel === -1 ? this.tokens.length - 1 : nextOnChannel; - return this.filterForChannel(from_, to, channel); -}; -// Collect all tokens on specified channel to the left of -// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. -// If channel is -1, find any non default channel token. -BufferedTokenStream.prototype.getHiddenTokensToLeft = function(tokenIndex, - channel) { - if (channel === undefined) { - channel = -1; + /** + * Collect all tokens on specified channel to the left of + * the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. + * If channel is -1, find any non default channel token. + */ + getHiddenTokensToLeft(tokenIndex, + channel) { + if (channel === undefined) { + channel = -1; + } + this.lazyInit(); + if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { + throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; + } + const prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL); + if (prevOnChannel === tokenIndex - 1) { + return null; + } + // if none on channel to left, prevOnChannel=-1 then from=0 + const from_ = prevOnChannel + 1; + const to = tokenIndex - 1; + return this.filterForChannel(from_, to, channel); } - this.lazyInit(); - if (tokenIndex < 0 || tokenIndex >= this.tokens.length) { - throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1; - } - var prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL); - if (prevOnChannel === tokenIndex - 1) { - return null; - } - // if none on channel to left, prevOnChannel=-1 then from=0 - var from_ = prevOnChannel + 1; - var to = tokenIndex - 1; - return this.filterForChannel(from_, to, channel); -}; -BufferedTokenStream.prototype.filterForChannel = function(left, right, channel) { - var hidden = []; - for (var i = left; i < right + 1; i++) { - var t = this.tokens[i]; - if (channel === -1) { - if (t.channel !== Lexer.DEFAULT_TOKEN_CHANNEL) { + filterForChannel(left, right, channel) { + const hidden = []; + for (let i = left; i < right + 1; i++) { + const t = this.tokens[i]; + if (channel === -1) { + if (t.channel !== Lexer.DEFAULT_TOKEN_CHANNEL) { + hidden.push(t); + } + } else if (t.channel === channel) { hidden.push(t); } - } else if (t.channel === channel) { - hidden.push(t); } + if (hidden.length === 0) { + return null; + } + return hidden; } - if (hidden.length === 0) { - return null; - } - return hidden; -}; -BufferedTokenStream.prototype.getSourceName = function() { - return this.tokenSource.getSourceName(); -}; + getSourceName() { + return this.tokenSource.getSourceName(); + } // Get the text of all tokens in this buffer./// -BufferedTokenStream.prototype.getText = function(interval) { - this.lazyInit(); - this.fill(); - if (interval === undefined || interval === null) { - interval = new Interval(0, this.tokens.length - 1); - } - var start = interval.start; - if (start instanceof Token) { - start = start.tokenIndex; - } - var stop = interval.stop; - if (stop instanceof Token) { - stop = stop.tokenIndex; - } - if (start === null || stop === null || start < 0 || stop < 0) { - return ""; - } - if (stop >= this.tokens.length) { - stop = this.tokens.length - 1; - } - var s = ""; - for (var i = start; i < stop + 1; i++) { - var t = this.tokens[i]; - if (t.type === Token.EOF) { - break; + getText(interval) { + this.lazyInit(); + this.fill(); + if (interval === undefined || interval === null) { + interval = new Interval(0, this.tokens.length - 1); } - s = s + t.text; + let start = interval.start; + if (start instanceof Token) { + start = start.tokenIndex; + } + let stop = interval.stop; + if (stop instanceof Token) { + stop = stop.tokenIndex; + } + if (start === null || stop === null || start < 0 || stop < 0) { + return ""; + } + if (stop >= this.tokens.length) { + stop = this.tokens.length - 1; + } + let s = ""; + for (let i = start; i < stop + 1; i++) { + const t = this.tokens[i]; + if (t.type === Token.EOF) { + break; + } + s = s + t.text; + } + return s; } - return s; -}; // Get all tokens from lexer until EOF/// -BufferedTokenStream.prototype.fill = function() { - this.lazyInit(); - while (this.fetch(1000) === 1000) { - continue; + fill() { + this.lazyInit(); + while (this.fetch(1000) === 1000) { + continue; + } } -}; +} -exports.BufferedTokenStream = BufferedTokenStream; + +module.exports = BufferedTokenStream; diff --git a/runtime/JavaScript/src/antlr4/CharStreams.js b/runtime/JavaScript/src/antlr4/CharStreams.js index 71c507616..7c6e275ef 100644 --- a/runtime/JavaScript/src/antlr4/CharStreams.js +++ b/runtime/JavaScript/src/antlr4/CharStreams.js @@ -1,57 +1,60 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -var InputStream = require('./InputStream').InputStream; +const {InputStream} = require('./InputStream'); +const fs = require("fs"); -var isNodeJs = typeof window === 'undefined' && typeof importScripts === 'undefined'; -var fs = isNodeJs ? require("fs") : null; - -// Utility functions to create InputStreams from various sources. -// -// All returned InputStreams support the full range of Unicode -// up to U+10FFFF (the default behavior of InputStream only supports -// code points up to U+FFFF). -var CharStreams = { +/** + * Utility functions to create InputStreams from various sources. + * + * All returned InputStreams support the full range of Unicode + * up to U+10FFFF (the default behavior of InputStream only supports + * code points up to U+FFFF). + */ +const CharStreams = { // Creates an InputStream from a string. fromString: function(str) { return new InputStream(str, true); }, - // Asynchronously creates an InputStream from a blob given the - // encoding of the bytes in that blob (defaults to 'utf8' if - // encoding is null). - // - // Invokes onLoad(result) on success, onError(error) on - // failure. + /** + * Asynchronously creates an InputStream from a blob given the + * encoding of the bytes in that blob (defaults to 'utf8' if + * encoding is null). + * + * Invokes onLoad(result) on success, onError(error) on + * failure. + */ fromBlob: function(blob, encoding, onLoad, onError) { - var reader = FileReader(); + const reader = new window.FileReader(); reader.onload = function(e) { - var is = new InputStream(e.target.result, true); + const is = new InputStream(e.target.result, true); onLoad(is); }; reader.onerror = onError; reader.readAsText(blob, encoding); }, - // Creates an InputStream from a Buffer given the - // encoding of the bytes in that buffer (defaults to 'utf8' if - // encoding is null). + /** + * Creates an InputStream from a Buffer given the + * encoding of the bytes in that buffer (defaults to 'utf8' if + * encoding is null). + */ fromBuffer: function(buffer, encoding) { return new InputStream(buffer.toString(encoding), true); }, - // Asynchronously creates an InputStream from a file on disk given - // the encoding of the bytes in that file (defaults to 'utf8' if - // encoding is null). - // - // Invokes callback(error, result) on completion. + /** Asynchronously creates an InputStream from a file on disk given + * the encoding of the bytes in that file (defaults to 'utf8' if + * encoding is null). + * + * Invokes callback(error, result) on completion. + */ fromPath: function(path, encoding, callback) { fs.readFile(path, encoding, function(err, data) { - var is = null; + let is = null; if (data !== null) { is = new InputStream(data, true); } @@ -59,13 +62,15 @@ var CharStreams = { }); }, - // Synchronously creates an InputStream given a path to a file - // on disk and the encoding of the bytes in that file (defaults to - // 'utf8' if encoding is null). + /** + * Synchronously creates an InputStream given a path to a file + * on disk and the encoding of the bytes in that file (defaults to + * 'utf8' if encoding is null). + */ fromPathSync: function(path, encoding) { - var data = fs.readFileSync(path, encoding); + const data = fs.readFileSync(path, encoding); return new InputStream(data, true); } }; -exports.CharStreams = CharStreams; +module.exports = CharStreams diff --git a/runtime/JavaScript/src/antlr4/CommonTokenFactory.js b/runtime/JavaScript/src/antlr4/CommonTokenFactory.js index 985764890..4b7a8dbd1 100644 --- a/runtime/JavaScript/src/antlr4/CommonTokenFactory.js +++ b/runtime/JavaScript/src/antlr4/CommonTokenFactory.js @@ -1,69 +1,63 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// -// This default implementation of {@link TokenFactory} creates -// {@link CommonToken} objects. -// +const CommonToken = require('./Token').CommonToken; -var CommonToken = require('./Token').CommonToken; +class TokenFactory {} -function TokenFactory() { - return this; +/** + * This default implementation of {@link TokenFactory} creates + * {@link CommonToken} objects. + */ +class CommonTokenFactory extends TokenFactory { + constructor(copyText) { + super(); + /** + * Indicates whether {@link CommonToken//setText} should be called after + * constructing tokens to explicitly set the text. This is useful for cases + * where the input stream might not be able to provide arbitrary substrings + * of text from the input after the lexer creates a token (e.g. the + * implementation of {@link CharStream//getText} in + * {@link UnbufferedCharStream} throws an + * {@link UnsupportedOperationException}). Explicitly setting the token text + * allows {@link Token//getText} to be called at any time regardless of the + * input stream implementation. + * + *

            + * The default value is {@code false} to avoid the performance and memory + * overhead of copying text for every token unless explicitly requested.

            + */ + this.copyText = copyText===undefined ? false : copyText; + } + + create(source, type, text, channel, start, stop, line, column) { + const t = new CommonToken(source, type, channel, start, stop); + t.line = line; + t.column = column; + if (text !==null) { + t.text = text; + } else if (this.copyText && source[1] !==null) { + t.text = source[1].getText(start,stop); + } + return t; + } + + createThin(type, text) { + const t = new CommonToken(null, type); + t.text = text; + return t; + } } -function CommonTokenFactory(copyText) { - TokenFactory.call(this); - // Indicates whether {@link CommonToken//setText} should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings - // of text from the input after the lexer creates a token (e.g. the - // implementation of {@link CharStream//getText} in - // {@link UnbufferedCharStream} throws an - // {@link UnsupportedOperationException}). Explicitly setting the token text - // allows {@link Token//getText} to be called at any time regardless of the - // input stream implementation. - // - //

            - // The default value is {@code false} to avoid the performance and memory - // overhead of copying text for every token unless explicitly requested.

            - // - this.copyText = copyText===undefined ? false : copyText; - return this; -} - -CommonTokenFactory.prototype = Object.create(TokenFactory.prototype); -CommonTokenFactory.prototype.constructor = CommonTokenFactory; - -// -// The default {@link CommonTokenFactory} instance. -// -//

            -// This token factory does not explicitly copy token text when constructing -// tokens.

            -// +/** + * The default {@link CommonTokenFactory} instance. + * + *

            + * This token factory does not explicitly copy token text when constructing + * tokens.

            + */ CommonTokenFactory.DEFAULT = new CommonTokenFactory(); -CommonTokenFactory.prototype.create = function(source, type, text, channel, start, stop, line, column) { - var t = new CommonToken(source, type, channel, start, stop); - t.line = line; - t.column = column; - if (text !==null) { - t.text = text; - } else if (this.copyText && source[1] !==null) { - t.text = source[1].getText(start,stop); - } - return t; -}; - -CommonTokenFactory.prototype.createThin = function(type, text) { - var t = new CommonToken(null, type); - t.text = text; - return t; -}; - -exports.CommonTokenFactory = CommonTokenFactory; +module.exports = CommonTokenFactory; diff --git a/runtime/JavaScript/src/antlr4/CommonTokenStream.js b/runtime/JavaScript/src/antlr4/CommonTokenStream.js index ca4ea3e99..76c4ce3a0 100644 --- a/runtime/JavaScript/src/antlr4/CommonTokenStream.js +++ b/runtime/JavaScript/src/antlr4/CommonTokenStream.js @@ -1,104 +1,100 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// -// This class extends {@link BufferedTokenStream} with functionality to filter -// token streams to tokens on a particular channel (tokens where -// {@link Token//getChannel} returns a particular value). -// -//

            -// This token stream provides access to all tokens by index or when calling -// methods like {@link //getText}. The channel filtering is only used for code -// accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and -// {@link //LB}.

            -// -//

            -// By default, tokens are placed on the default channel -// ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the -// {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to -// call {@link Lexer//setChannel}. -//

            -// -//

            -// Note: lexer rules which use the {@code ->skip} lexer command or call -// {@link Lexer//skip} do not produce tokens at all, so input text matched by -// such a rule will not be available as part of the token stream, regardless of -// channel.

            -/// -var Token = require('./Token').Token; -var BufferedTokenStream = require('./BufferedTokenStream').BufferedTokenStream; +const Token = require('./Token').Token; +const BufferedTokenStream = require('./BufferedTokenStream'); -function CommonTokenStream(lexer, channel) { - BufferedTokenStream.call(this, lexer); - this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel; - return this; -} - -CommonTokenStream.prototype = Object.create(BufferedTokenStream.prototype); -CommonTokenStream.prototype.constructor = CommonTokenStream; - -CommonTokenStream.prototype.adjustSeekIndex = function(i) { - return this.nextTokenOnChannel(i, this.channel); -}; - -CommonTokenStream.prototype.LB = function(k) { - if (k===0 || this.index-k<0) { - return null; +/** + * This class extends {@link BufferedTokenStream} with functionality to filter + * token streams to tokens on a particular channel (tokens where + * {@link Token//getChannel} returns a particular value). + * + *

            + * This token stream provides access to all tokens by index or when calling + * methods like {@link //getText}. The channel filtering is only used for code + * accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and + * {@link //LB}.

            + * + *

            + * By default, tokens are placed on the default channel + * ({@link Token//DEFAULT_CHANNEL}), but may be reassigned by using the + * {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to + * call {@link Lexer//setChannel}. + *

            + * + *

            + * Note: lexer rules which use the {@code ->skip} lexer command or call + * {@link Lexer//skip} do not produce tokens at all, so input text matched by + * such a rule will not be available as part of the token stream, regardless of + * channel.

            + */ +class CommonTokenStream extends BufferedTokenStream { + constructor(lexer, channel) { + super(lexer); + this.channel = channel===undefined ? Token.DEFAULT_CHANNEL : channel; } - var i = this.index; - var n = 1; - // find k good tokens looking backwards - while (n <= k) { - // skip off-channel tokens - i = this.previousTokenOnChannel(i - 1, this.channel); - n += 1; - } - if (i < 0) { - return null; - } - return this.tokens[i]; -}; -CommonTokenStream.prototype.LT = function(k) { - this.lazyInit(); - if (k === 0) { - return null; + adjustSeekIndex(i) { + return this.nextTokenOnChannel(i, this.channel); } - if (k < 0) { - return this.LB(-k); - } - var i = this.index; - var n = 1; // we know tokens[pos] is a good one - // find k good tokens - while (n < k) { - // skip off-channel tokens, but make sure to not look past EOF - if (this.sync(i + 1)) { - i = this.nextTokenOnChannel(i + 1, this.channel); + + LB(k) { + if (k===0 || this.index-k<0) { + return null; } - n += 1; - } - return this.tokens[i]; -}; - -// Count EOF just once./// -CommonTokenStream.prototype.getNumberOfOnChannelTokens = function() { - var n = 0; - this.fill(); - for (var i =0; i< this.tokens.length;i++) { - var t = this.tokens[i]; - if( t.channel===this.channel) { + let i = this.index; + let n = 1; + // find k good tokens looking backwards + while (n <= k) { + // skip off-channel tokens + i = this.previousTokenOnChannel(i - 1, this.channel); n += 1; } - if( t.type===Token.EOF) { - break; + if (i < 0) { + return null; } + return this.tokens[i]; } - return n; -}; -exports.CommonTokenStream = CommonTokenStream; \ No newline at end of file + LT(k) { + this.lazyInit(); + if (k === 0) { + return null; + } + if (k < 0) { + return this.LB(-k); + } + let i = this.index; + let n = 1; // we know tokens[pos] is a good one + // find k good tokens + while (n < k) { + // skip off-channel tokens, but make sure to not look past EOF + if (this.sync(i + 1)) { + i = this.nextTokenOnChannel(i + 1, this.channel); + } + n += 1; + } + return this.tokens[i]; + } + + // Count EOF just once. + getNumberOfOnChannelTokens() { + let n = 0; + this.fill(); + for (let i =0; i< this.tokens.length;i++) { + const t = this.tokens[i]; + if( t.channel===this.channel) { + n += 1; + } + if( t.type===Token.EOF) { + break; + } + } + return n; + } +} + +module.exports = CommonTokenStream; diff --git a/runtime/JavaScript/src/antlr4/FileStream.js b/runtime/JavaScript/src/antlr4/FileStream.js index baf18520a..8632ec64c 100644 --- a/runtime/JavaScript/src/antlr4/FileStream.js +++ b/runtime/JavaScript/src/antlr4/FileStream.js @@ -1,26 +1,21 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// -// This is an InputStream that is loaded from a file all at once -// when you construct the object. -// -var InputStream = require('./InputStream').InputStream; -var isNodeJs = typeof window === 'undefined' && typeof importScripts === 'undefined'; -var fs = isNodeJs ? require("fs") : null; +const InputStream = require('./InputStream'); +const fs = require("fs"); -function FileStream(fileName, decodeToUnicodeCodePoints) { - var data = fs.readFileSync(fileName, "utf8"); - InputStream.call(this, data, decodeToUnicodeCodePoints); - this.fileName = fileName; - return this; +/** + * This is an InputStream that is loaded from a file all at once + * when you construct the object. + */ +class FileStream extends InputStream { + constructor(fileName, decodeToUnicodeCodePoints) { + const data = fs.readFileSync(fileName, "utf8"); + super(data, decodeToUnicodeCodePoints); + this.fileName = fileName; + } } -FileStream.prototype = Object.create(InputStream.prototype); -FileStream.prototype.constructor = FileStream; - -exports.FileStream = FileStream; +module.exports = FileStream diff --git a/runtime/JavaScript/src/antlr4/InputStream.js b/runtime/JavaScript/src/antlr4/InputStream.js index 77db1533d..40e6ce859 100644 --- a/runtime/JavaScript/src/antlr4/InputStream.js +++ b/runtime/JavaScript/src/antlr4/InputStream.js @@ -1,135 +1,130 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -var Token = require('./Token').Token; +const {Token} = require('./Token'); require('./polyfills/codepointat'); require('./polyfills/fromcodepoint'); -// Vacuum all input from a string and then treat it like a buffer. - -function _loadString(stream) { - stream._index = 0; - stream.data = []; - if (stream.decodeToUnicodeCodePoints) { - for (var i = 0; i < stream.strdata.length; ) { - var codePoint = stream.strdata.codePointAt(i); - stream.data.push(codePoint); - i += codePoint <= 0xFFFF ? 1 : 2; +/** + * If decodeToUnicodeCodePoints is true, the input is treated + * as a series of Unicode code points. + * + * Otherwise, the input is treated as a series of 16-bit UTF-16 code + * units. + */ +class InputStream { + constructor(data, decodeToUnicodeCodePoints) { + this.name = ""; + this.strdata = data; + this.decodeToUnicodeCodePoints = decodeToUnicodeCodePoints || false; + // _loadString - Vacuum all input from a string and then treat it like a buffer. + this._index = 0; + this.data = []; + if (this.decodeToUnicodeCodePoints) { + for (let i = 0; i < this.strdata.length; ) { + const codePoint = this.strdata.codePointAt(i); + this.data.push(codePoint); + i += codePoint <= 0xFFFF ? 1 : 2; + } + } else { + for (let i = 0; i < this.strdata.length; i++) { + const codeUnit = this.strdata.charCodeAt(i); + this.data.push(codeUnit); + } } - } else { - for (var i = 0; i < stream.strdata.length; i++) { - var codeUnit = stream.strdata.charCodeAt(i); - stream.data.push(codeUnit); + this._size = this.data.length; + } + + /** + * Reset the stream so that it's in the same state it was + * when the object was created *except* the data array is not + * touched. + */ + reset() { + this._index = 0; + } + + consume() { + if (this._index >= this._size) { + // assert this.LA(1) == Token.EOF + throw ("cannot consume EOF"); } + this._index += 1; } - stream._size = stream.data.length; -} -// If decodeToUnicodeCodePoints is true, the input is treated -// as a series of Unicode code points. -// -// Otherwise, the input is treated as a series of 16-bit UTF-16 code -// units. -function InputStream(data, decodeToUnicodeCodePoints) { - this.name = ""; - this.strdata = data; - this.decodeToUnicodeCodePoints = decodeToUnicodeCodePoints || false; - _loadString(this); - return this; -} - -Object.defineProperty(InputStream.prototype, "index", { - get : function() { - return this._index; + LA(offset) { + if (offset === 0) { + return 0; // undefined + } + if (offset < 0) { + offset += 1; // e.g., translate LA(-1) to use offset=0 + } + const pos = this._index + offset - 1; + if (pos < 0 || pos >= this._size) { // invalid + return Token.EOF; + } + return this.data[pos]; } -}); -Object.defineProperty(InputStream.prototype, "size", { - get : function() { - return this._size; + LT(offset) { + return this.LA(offset); } -}); - -// Reset the stream so that it's in the same state it was -// when the object was created *except* the data array is not -// touched. -// -InputStream.prototype.reset = function() { - this._index = 0; -}; - -InputStream.prototype.consume = function() { - if (this._index >= this._size) { - // assert this.LA(1) == Token.EOF - throw ("cannot consume EOF"); - } - this._index += 1; -}; - -InputStream.prototype.LA = function(offset) { - if (offset === 0) { - return 0; // undefined - } - if (offset < 0) { - offset += 1; // e.g., translate LA(-1) to use offset=0 - } - var pos = this._index + offset - 1; - if (pos < 0 || pos >= this._size) { // invalid - return Token.EOF; - } - return this.data[pos]; -}; - -InputStream.prototype.LT = function(offset) { - return this.LA(offset); -}; // mark/release do nothing; we have entire buffer -InputStream.prototype.mark = function() { - return -1; -}; - -InputStream.prototype.release = function(marker) { -}; - -// consume() ahead until p==_index; can't just set p=_index as we must -// update line and column. If we seek backwards, just set p -// -InputStream.prototype.seek = function(_index) { - if (_index <= this._index) { - this._index = _index; // just jump; don't update stream state (line, - // ...) - return; + mark() { + return -1; } - // seek forward - this._index = Math.min(_index, this._size); -}; -InputStream.prototype.getText = function(start, stop) { - if (stop >= this._size) { - stop = this._size - 1; + release(marker) { } - if (start >= this._size) { - return ""; - } else { - if (this.decodeToUnicodeCodePoints) { - var result = ""; - for (var i = start; i <= stop; i++) { - result += String.fromCodePoint(this.data[i]); - } - return result; + + /** + * consume() ahead until p==_index; can't just set p=_index as we must + * update line and column. If we seek backwards, just set p + */ + seek(_index) { + if (_index <= this._index) { + this._index = _index; // just jump; don't update stream state (line, + // ...) + return; + } + // seek forward + this._index = Math.min(_index, this._size); + } + + getText(start, stop) { + if (stop >= this._size) { + stop = this._size - 1; + } + if (start >= this._size) { + return ""; } else { - return this.strdata.slice(start, stop + 1); + if (this.decodeToUnicodeCodePoints) { + let result = ""; + for (let i = start; i <= stop; i++) { + result += String.fromCodePoint(this.data[i]); + } + return result; + } else { + return this.strdata.slice(start, stop + 1); + } } } -}; -InputStream.prototype.toString = function() { - return this.strdata; -}; + toString() { + return this.strdata; + } -exports.InputStream = InputStream; + get index(){ + return this._index; + } + + get size(){ + return this._size; + } +} + + +module.exports = InputStream; diff --git a/runtime/JavaScript/src/antlr4/IntervalSet.js b/runtime/JavaScript/src/antlr4/IntervalSet.js index 6ea21d58a..e301fa8f0 100644 --- a/runtime/JavaScript/src/antlr4/IntervalSet.js +++ b/runtime/JavaScript/src/antlr4/IntervalSet.js @@ -3,296 +3,293 @@ * can be found in the LICENSE.txt file in the project root. */ -/*jslint smarttabs:true */ - -var Token = require('./Token').Token; +const {Token} = require('./Token'); /* stop is not included! */ -function Interval(start, stop) { - this.start = start; - this.stop = stop; - return this; -} - -Interval.prototype.contains = function(item) { - return item >= this.start && item < this.stop; -}; - -Interval.prototype.toString = function() { - if(this.start===this.stop-1) { - return this.start.toString(); - } else { - return this.start.toString() + ".." + (this.stop-1).toString(); +class Interval { + constructor(start, stop) { + this.start = start; + this.stop = stop; } -}; + contains(item) { + return item >= this.start && item < this.stop; + } -Object.defineProperty(Interval.prototype, "length", { - get : function() { + toString() { + if(this.start===this.stop-1) { + return this.start.toString(); + } else { + return this.start.toString() + ".." + (this.stop-1).toString(); + } + } + + get length(){ return this.stop - this.start; } -}); - -function IntervalSet() { - this.intervals = null; - this.readOnly = false; } -IntervalSet.prototype.first = function(v) { - if (this.intervals === null || this.intervals.length===0) { - return Token.INVALID_TYPE; - } else { - return this.intervals[0].start; + +class IntervalSet { + constructor() { + this.intervals = null; + this.readOnly = false; } -}; -IntervalSet.prototype.addOne = function(v) { - this.addInterval(new Interval(v, v + 1)); -}; + first(v) { + if (this.intervals === null || this.intervals.length===0) { + return Token.INVALID_TYPE; + } else { + return this.intervals[0].start; + } + } -IntervalSet.prototype.addRange = function(l, h) { - this.addInterval(new Interval(l, h + 1)); -}; + addOne(v) { + this.addInterval(new Interval(v, v + 1)); + } -IntervalSet.prototype.addInterval = function(v) { - if (this.intervals === null) { - this.intervals = []; - this.intervals.push(v); - } else { - // find insert pos - for (var k = 0; k < this.intervals.length; k++) { - var i = this.intervals[k]; - // distinct range -> insert - if (v.stop < i.start) { - this.intervals.splice(k, 0, v); - return; + addRange(l, h) { + this.addInterval(new Interval(l, h + 1)); + } + + addInterval(v) { + if (this.intervals === null) { + this.intervals = []; + this.intervals.push(v); + } else { + // find insert pos + for (let k = 0; k < this.intervals.length; k++) { + const i = this.intervals[k]; + // distinct range -> insert + if (v.stop < i.start) { + this.intervals.splice(k, 0, v); + return; + } + // contiguous range -> adjust + else if (v.stop === i.start) { + this.intervals[k].start = v.start; + return; + } + // overlapping range -> adjust and reduce + else if (v.start <= i.stop) { + this.intervals[k] = new Interval(Math.min(i.start, v.start), Math.max(i.stop, v.stop)); + this.reduce(k); + return; + } } - // contiguous range -> adjust - else if (v.stop === i.start) { - this.intervals[k].start = v.start; - return; + // greater than any existing + this.intervals.push(v); + } + } + + addSet(other) { + if (other.intervals !== null) { + for (let k = 0; k < other.intervals.length; k++) { + const i = other.intervals[k]; + this.addInterval(new Interval(i.start, i.stop)); } - // overlapping range -> adjust and reduce - else if (v.start <= i.stop) { - this.intervals[k] = new Interval(Math.min(i.start, v.start), Math.max(i.stop, v.stop)); + } + return this; + } + + reduce(k) { + // only need to reduce if k is not the last + if (k < this.intervalslength - 1) { + const l = this.intervals[k]; + const r = this.intervals[k + 1]; + // if r contained in l + if (l.stop >= r.stop) { + this.intervals.pop(k + 1); this.reduce(k); - return; + } else if (l.stop >= r.start) { + this.intervals[k] = new Interval(l.start, r.stop); + this.intervals.pop(k + 1); } } - // greater than any existing - this.intervals.push(v); } -}; -IntervalSet.prototype.addSet = function(other) { - if (other.intervals !== null) { - for (var k = 0; k < other.intervals.length; k++) { - var i = other.intervals[k]; - this.addInterval(new Interval(i.start, i.stop)); + complement(start, stop) { + const result = new IntervalSet(); + result.addInterval(new Interval(start,stop+1)); + for(let i=0; i= r.stop) { - this.intervals.pop(k + 1); - this.reduce(k); - } else if (l.stop >= r.start) { - this.intervals[k] = new Interval(l.start, r.stop); - this.intervals.pop(k + 1); - } - } -}; - -IntervalSet.prototype.complement = function(start, stop) { - var result = new IntervalSet(); - result.addInterval(new Interval(start,stop+1)); - for(var i=0; ii.start && v.stop=i.stop) { + this.intervals.splice(k, 1); + k = k - 1; // need another pass + } + // check for lower boundary + else if(v.start"); + } else { + names.push("'" + String.fromCharCode(v.start) + "'"); + } + } else { + names.push("'" + String.fromCharCode(v.start) + "'..'" + String.fromCharCode(v.stop-1) + "'"); + } + } + if (names.length > 1) { + return "{" + names.join(", ") + "}"; + } else { + return names[0]; + } + } + + toIndexString() { + const names = []; + for (let i = 0; i < this.intervals.length; i++) { + const v = this.intervals[i]; + if(v.stop===v.start+1) { + if ( v.start===Token.EOF ) { + names.push(""); + } else { + names.push(v.start.toString()); + } + } else { + names.push(v.start.toString() + ".." + (v.stop-1).toString()); + } + } + if (names.length > 1) { + return "{" + names.join(", ") + "}"; + } else { + return names[0]; + } + } + + toTokenString(literalNames, symbolicNames) { + const names = []; + for (let i = 0; i < this.intervals.length; i++) { + const v = this.intervals[i]; + for (let j = v.start; j < v.stop; j++) { + names.push(this.elementName(literalNames, symbolicNames, j)); + } + } + if (names.length > 1) { + return "{" + names.join(", ") + "}"; + } else { + return names[0]; + } + } + + elementName(literalNames, symbolicNames, a) { + if (a === Token.EOF) { + return ""; + } else if (a === Token.EPSILON) { + return ""; + } else { + return literalNames[a] || symbolicNames[a]; + } + } + + get length(){ + let len = 0; this.intervals.map(function(i) {len += i.length;}); return len; } -}); +} -IntervalSet.prototype.removeRange = function(v) { - if(v.start===v.stop-1) { - this.removeOne(v.start); - } else if (this.intervals!==null) { - var k = 0; - for(var n=0; ni.start && v.stop=i.stop) { - this.intervals.splice(k, 1); - k = k - 1; // need another pass - } - // check for lower boundary - else if(v.start"); - } else { - names.push("'" + String.fromCharCode(v.start) + "'"); - } - } else { - names.push("'" + String.fromCharCode(v.start) + "'..'" + String.fromCharCode(v.stop-1) + "'"); - } - } - if (names.length > 1) { - return "{" + names.join(", ") + "}"; - } else { - return names[0]; - } -}; - - -IntervalSet.prototype.toIndexString = function() { - var names = []; - for (var i = 0; i < this.intervals.length; i++) { - var v = this.intervals[i]; - if(v.stop===v.start+1) { - if ( v.start===Token.EOF ) { - names.push(""); - } else { - names.push(v.start.toString()); - } - } else { - names.push(v.start.toString() + ".." + (v.stop-1).toString()); - } - } - if (names.length > 1) { - return "{" + names.join(", ") + "}"; - } else { - return names[0]; - } -}; - - -IntervalSet.prototype.toTokenString = function(literalNames, symbolicNames) { - var names = []; - for (var i = 0; i < this.intervals.length; i++) { - var v = this.intervals[i]; - for (var j = v.start; j < v.stop; j++) { - names.push(this.elementName(literalNames, symbolicNames, j)); - } - } - if (names.length > 1) { - return "{" + names.join(", ") + "}"; - } else { - return names[0]; - } -}; - -IntervalSet.prototype.elementName = function(literalNames, symbolicNames, a) { - if (a === Token.EOF) { - return ""; - } else if (a === Token.EPSILON) { - return ""; - } else { - return literalNames[a] || symbolicNames[a]; - } -}; - -exports.Interval = Interval; -exports.IntervalSet = IntervalSet; diff --git a/runtime/JavaScript/src/antlr4/LL1Analyzer.js b/runtime/JavaScript/src/antlr4/LL1Analyzer.js index c7c43b46e..e76238dd7 100644 --- a/runtime/JavaScript/src/antlr4/LL1Analyzer.js +++ b/runtime/JavaScript/src/antlr4/LL1Analyzer.js @@ -1,199 +1,190 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -var Set = require('./Utils').Set; -var BitSet = require('./Utils').BitSet; -var Token = require('./Token').Token; -var ATNConfig = require('./atn/ATNConfig').ATNConfig; -var Interval = require('./IntervalSet').Interval; -var IntervalSet = require('./IntervalSet').IntervalSet; -var RuleStopState = require('./atn/ATNState').RuleStopState; -var RuleTransition = require('./atn/Transition').RuleTransition; -var NotSetTransition = require('./atn/Transition').NotSetTransition; -var WildcardTransition = require('./atn/Transition').WildcardTransition; -var AbstractPredicateTransition = require('./atn/Transition').AbstractPredicateTransition; +const {Set, BitSet} = require('./Utils'); +const {Token} = require('./Token'); +const {ATNConfig} = require('./atn/ATNConfig'); +const {IntervalSet} = require('./IntervalSet'); +const {RuleStopState} = require('./atn/ATNState'); +const {RuleTransition, NotSetTransition, WildcardTransition, AbstractPredicateTransition} = require('./atn/Transition'); +const {predictionContextFromRuleContext, PredictionContext, SingletonPredictionContext} = require('./PredictionContext'); -var pc = require('./PredictionContext'); -var predictionContextFromRuleContext = pc.predictionContextFromRuleContext; -var PredictionContext = pc.PredictionContext; -var SingletonPredictionContext = pc.SingletonPredictionContext; - -function LL1Analyzer (atn) { - this.atn = atn; -} - -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -LL1Analyzer.HIT_PRED = Token.INVALID_TYPE; - - -//* -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before matching a symbol, the -// element at index i of the result will be {@code null}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -/// -LL1Analyzer.prototype.getDecisionLookahead = function(s) { - if (s === null) { - return null; +class LL1Analyzer { + constructor(atn) { + this.atn = atn; } - var count = s.transitions.length; - var look = []; - for(var alt=0; alt< count; alt++) { - look[alt] = new IntervalSet(); - var lookBusy = new Set(); - var seeThruPreds = false; // fail to get lookahead upon pred - this._LOOK(s.transition(alt).target, null, PredictionContext.EMPTY, - look[alt], lookBusy, new BitSet(), seeThruPreds, false); - // Wipe out lookahead for this alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if (look[alt].length===0 || look[alt].contains(LL1Analyzer.HIT_PRED)) { - look[alt] = null; + + /** + * Calculates the SLL(1) expected lookahead set for each outgoing transition + * of an {@link ATNState}. The returned array has one element for each + * outgoing transition in {@code s}. If the closure from transition + * i leads to a semantic predicate before matching a symbol, the + * element at index i of the result will be {@code null}. + * + * @param s the ATN state + * @return the expected symbols for each outgoing transition of {@code s}. + */ + getDecisionLookahead(s) { + if (s === null) { + return null; } + const count = s.transitions.length; + const look = []; + for(let alt=0; alt< count; alt++) { + look[alt] = new IntervalSet(); + const lookBusy = new Set(); + const seeThruPreds = false; // fail to get lookahead upon pred + this._LOOK(s.transition(alt).target, null, PredictionContext.EMPTY, + look[alt], lookBusy, new BitSet(), seeThruPreds, false); + // Wipe out lookahead for this alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if (look[alt].length===0 || look[alt].contains(LL1Analyzer.HIT_PRED)) { + look[alt] = null; + } + } + return look; } - return look; -}; -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

            If {@code ctx} is {@code null} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code null} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

            -// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code null} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -LL1Analyzer.prototype.LOOK = function(s, stopState, ctx) { - var r = new IntervalSet(); - var seeThruPreds = true; // ignore preds; get all lookahead - ctx = ctx || null; - var lookContext = ctx!==null ? predictionContextFromRuleContext(s.atn, ctx) : null; - this._LOOK(s, stopState, lookContext, r, new Set(), new BitSet(), seeThruPreds, true); - return r; -}; - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

            If {@code ctx} is {@code null} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

            -// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code null} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code new Set} for this argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code new BitSet()} for this argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HIT_PRED} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code null}. -/// -LL1Analyzer.prototype._LOOK = function(s, stopState , ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) { - var c = new ATNConfig({state:s, alt:0, context: ctx}, null); - if (lookBusy.contains(c)) { - return; + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *

            If {@code ctx} is {@code null} and the end of the rule containing + * {@code s} is reached, {@link Token//EPSILON} is added to the result set. + * If {@code ctx} is not {@code null} and the end of the outermost rule is + * reached, {@link Token//EOF} is added to the result set.

            + * + * @param s the ATN state + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx the complete parser context, or {@code null} if the context + * should be ignored + * + * @return The set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + */ + LOOK(s, stopState, ctx) { + const r = new IntervalSet(); + const seeThruPreds = true; // ignore preds; get all lookahead + ctx = ctx || null; + const lookContext = ctx!==null ? predictionContextFromRuleContext(s.atn, ctx) : null; + this._LOOK(s, stopState, lookContext, r, new Set(), new BitSet(), seeThruPreds, true); + return r; } - lookBusy.add(c); - if (s === stopState) { - if (ctx ===null) { - look.addOne(Token.EPSILON); - return; - } else if (ctx.isEmpty() && addEOF) { - look.addOne(Token.EOF); + + /** + * Compute set of tokens that can follow {@code s} in the ATN in the + * specified {@code ctx}. + * + *

            If {@code ctx} is {@code null} and {@code stopState} or the end of the + * rule containing {@code s} is reached, {@link Token//EPSILON} is added to + * the result set. If {@code ctx} is not {@code null} and {@code addEOF} is + * {@code true} and {@code stopState} or the end of the outermost rule is + * reached, {@link Token//EOF} is added to the result set.

            + * + * @param s the ATN state. + * @param stopState the ATN state to stop at. This can be a + * {@link BlockEndState} to detect epsilon paths through a closure. + * @param ctx The outer context, or {@code null} if the outer context should + * not be used. + * @param look The result lookahead set. + * @param lookBusy A set used for preventing epsilon closures in the ATN + * from causing a stack overflow. Outside code should pass + * {@code new Set} for this argument. + * @param calledRuleStack A set used for preventing left recursion in the + * ATN from causing a stack overflow. Outside code should pass + * {@code new BitSet()} for this argument. + * @param seeThruPreds {@code true} to true semantic predicates as + * implicitly {@code true} and "see through them", otherwise {@code false} + * to treat semantic predicates as opaque and add {@link //HIT_PRED} to the + * result if one is encountered. + * @param addEOF Add {@link Token//EOF} to the result if the end of the + * outermost context is reached. This parameter has no effect if {@code ctx} + * is {@code null}. + */ + _LOOK(s, stopState , ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) { + const c = new ATNConfig({state:s, alt:0, context: ctx}, null); + if (lookBusy.contains(c)) { return; } - } - if (s instanceof RuleStopState ) { - if (ctx ===null) { - look.addOne(Token.EPSILON); - return; - } else if (ctx.isEmpty() && addEOF) { - look.addOne(Token.EOF); - return; + lookBusy.add(c); + if (s === stopState) { + if (ctx ===null) { + look.addOne(Token.EPSILON); + return; + } else if (ctx.isEmpty() && addEOF) { + look.addOne(Token.EOF); + return; + } } - if (ctx !== PredictionContext.EMPTY) { - // run thru all possible stack tops in ctx - for(var i=0; i"; + } else if (c === '\n') { + return "\\n"; + } else if (c === '\t') { + return "\\t"; + } else if (c === '\r') { + return "\\r"; + } else { + return c; + } + } + + getCharErrorDisplay(c) { + return "'" + this.getErrorDisplayForChar(c) + "'"; + } + + /** + * Lexers can normally match any char in it's vocabulary after matching + * a token, so do the easy thing and just kill a character and hope + * it all works out. You can instead use the rule invocation stack + * to do sophisticated error recovery if you are in a fragment rule. + */ + recover(re) { + if (this._input.LA(1) !== Token.EOF) { + if (re instanceof LexerNoViableAltException) { + // skip a char and try again + this._interp.consume(this._input); + } else { + // TODO: Do we lose character or line position information? + this._input.consume(); + } + } + } + + get inputStream(){ + return this._input; + } + + set inputStream(input) { + this._input = null; + this._tokenFactorySourcePair = [ this, this._input ]; + this.reset(); + this._input = input; + this._tokenFactorySourcePair = [ this, this._input ]; + } + + get sourceName(){ + return this._input.sourceName; + } + + get type(){ + return this.type; + } + + set type(type) { + this._type = type; + } + + get line(){ + return this._interp.line; + } + + set line(line) { + this._interp.line = line; + } + + get column(){ + return this._interp.column; + } + + set column(column) { + this._interp.column = column; + } + + get text(){ + if (this._text !== null) { + return this._text; + } else { + return this._interp.getText(this._input); + } + } + + set text(text) { + this._text = text; + } } -function Lexer(input) { - Recognizer.call(this); - this._input = input; - this._factory = CommonTokenFactory.DEFAULT; - this._tokenFactorySourcePair = [ this, input ]; - this._interp = null; // child classes must populate this - // The goal of all lexer rules/methods is to create a token object. - // this is an instance variable as multiple rules may collaborate to - // create a single token. nextToken will return this object after - // matching lexer rule(s). If you subclass to allow multiple token - // emissions, then set this to the last token to be matched or - // something nonnull so that the auto token emit mechanism will not - // emit another token. - this._token = null; - - // What character index in the stream did the current token start at? - // Needed, for example, to get the text for current token. Set at - // the start of nextToken. - this._tokenStartCharIndex = -1; - - // The line on which the first character of the token resides/// - this._tokenStartLine = -1; - - // The character position of first character within the line/// - this._tokenStartColumn = -1; - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF ; then you see DONE EOF. - this._hitEOF = false; - - // The channel number for the current token/// - this._channel = Token.DEFAULT_CHANNEL; - - // The token type for the current token/// - this._type = Token.INVALID_TYPE; - - this._modeStack = []; - this._mode = Lexer.DEFAULT_MODE; - - // You can set the text for the current token to override what is in - // the input char buffer. Use setText() or can set this instance var. - // / - this._text = null; - - return this; -} - -Lexer.prototype = Object.create(Recognizer.prototype); -Lexer.prototype.constructor = Lexer; Lexer.DEFAULT_MODE = 0; Lexer.MORE = -2; @@ -79,293 +368,7 @@ Lexer.HIDDEN = Token.HIDDEN_CHANNEL; Lexer.MIN_CHAR_VALUE = 0x0000; Lexer.MAX_CHAR_VALUE = 0x10FFFF; -Lexer.prototype.reset = function() { - // wack Lexer state variables - if (this._input !== null) { - this._input.seek(0); // rewind the input - } - this._token = null; - this._type = Token.INVALID_TYPE; - this._channel = Token.DEFAULT_CHANNEL; - this._tokenStartCharIndex = -1; - this._tokenStartColumn = -1; - this._tokenStartLine = -1; - this._text = null; - - this._hitEOF = false; - this._mode = Lexer.DEFAULT_MODE; - this._modeStack = []; - - this._interp.reset(); -}; - -// Return a token from this source; i.e., match a token on the char stream. -Lexer.prototype.nextToken = function() { - if (this._input === null) { - throw "nextToken requires a non-null input stream."; - } - - // Mark start location in char stream so unbuffered streams are - // guaranteed at least have text of current token - var tokenStartMarker = this._input.mark(); - try { - while (true) { - if (this._hitEOF) { - this.emitEOF(); - return this._token; - } - this._token = null; - this._channel = Token.DEFAULT_CHANNEL; - this._tokenStartCharIndex = this._input.index; - this._tokenStartColumn = this._interp.column; - this._tokenStartLine = this._interp.line; - this._text = null; - var continueOuter = false; - while (true) { - this._type = Token.INVALID_TYPE; - var ttype = Lexer.SKIP; - try { - ttype = this._interp.match(this._input, this._mode); - } catch (e) { - if(e instanceof RecognitionException) { - this.notifyListeners(e); // report error - this.recover(e); - } else { - console.log(e.stack); - throw e; - } - } - if (this._input.LA(1) === Token.EOF) { - this._hitEOF = true; - } - if (this._type === Token.INVALID_TYPE) { - this._type = ttype; - } - if (this._type === Lexer.SKIP) { - continueOuter = true; - break; - } - if (this._type !== Lexer.MORE) { - break; - } - } - if (continueOuter) { - continue; - } - if (this._token === null) { - this.emit(); - } - return this._token; - } - } finally { - // make sure we release marker after match or - // unbuffered char stream will keep buffering - this._input.release(tokenStartMarker); - } -}; - -// Instruct the lexer to skip creating a token for current lexer rule -// and look for another token. nextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIP_TOKEN. Recall that -// if token==null at end of any token rule, it creates one for you -// and emits it. -// / -Lexer.prototype.skip = function() { - this._type = Lexer.SKIP; -}; - -Lexer.prototype.more = function() { - this._type = Lexer.MORE; -}; - -Lexer.prototype.mode = function(m) { - this._mode = m; -}; - -Lexer.prototype.pushMode = function(m) { - if (this._interp.debug) { - console.log("pushMode " + m); - } - this._modeStack.push(this._mode); - this.mode(m); -}; - -Lexer.prototype.popMode = function() { - if (this._modeStack.length === 0) { - throw "Empty Stack"; - } - if (this._interp.debug) { - console.log("popMode back to " + this._modeStack.slice(0, -1)); - } - this.mode(this._modeStack.pop()); - return this._mode; -}; - // Set the char stream and reset the lexer -Object.defineProperty(Lexer.prototype, "inputStream", { - get : function() { - return this._input; - }, - set : function(input) { - this._input = null; - this._tokenFactorySourcePair = [ this, this._input ]; - this.reset(); - this._input = input; - this._tokenFactorySourcePair = [ this, this._input ]; - } -}); - -Object.defineProperty(Lexer.prototype, "sourceName", { - get : function sourceName() { - return this._input.sourceName; - } -}); - -// By default does not support multiple emits per nextToken invocation -// for efficiency reasons. Subclass and override this method, nextToken, -// and getToken (to push tokens into a list and pull from that list -// rather than a single variable as this implementation does). -// / -Lexer.prototype.emitToken = function(token) { - this._token = token; -}; - -// The standard method called to automatically emit a token at the -// outermost lexical rule. The token object should point into the -// char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override this method to emit -// custom Token objects or provide a new factory. -// / -Lexer.prototype.emit = function() { - var t = this._factory.create(this._tokenFactorySourcePair, this._type, - this._text, this._channel, this._tokenStartCharIndex, this - .getCharIndex() - 1, this._tokenStartLine, - this._tokenStartColumn); - this.emitToken(t); - return t; -}; - -Lexer.prototype.emitEOF = function() { - var cpos = this.column; - var lpos = this.line; - var eof = this._factory.create(this._tokenFactorySourcePair, Token.EOF, - null, Token.DEFAULT_CHANNEL, this._input.index, - this._input.index - 1, lpos, cpos); - this.emitToken(eof); - return eof; -}; - -Object.defineProperty(Lexer.prototype, "type", { - get : function() { - return this.type; - }, - set : function(type) { - this._type = type; - } -}); - -Object.defineProperty(Lexer.prototype, "line", { - get : function() { - return this._interp.line; - }, - set : function(line) { - this._interp.line = line; - } -}); - -Object.defineProperty(Lexer.prototype, "column", { - get : function() { - return this._interp.column; - }, - set : function(column) { - this._interp.column = column; - } -}); -// What is the index of the current character of lookahead?/// -Lexer.prototype.getCharIndex = function() { - return this._input.index; -}; - -// Return the text matched so far for the current token or any text override. -//Set the complete text of this token; it wipes any previous changes to the text. -Object.defineProperty(Lexer.prototype, "text", { - get : function() { - if (this._text !== null) { - return this._text; - } else { - return this._interp.getText(this._input); - } - }, - set : function(text) { - this._text = text; - } -}); -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / -Lexer.prototype.getAllTokens = function() { - var tokens = []; - var t = this.nextToken(); - while (t.type !== Token.EOF) { - tokens.push(t); - t = this.nextToken(); - } - return tokens; -}; - -Lexer.prototype.notifyListeners = function(e) { - var start = this._tokenStartCharIndex; - var stop = this._input.index; - var text = this._input.getText(start, stop); - var msg = "token recognition error at: '" + this.getErrorDisplay(text) + "'"; - var listener = this.getErrorListenerDispatch(); - listener.syntaxError(this, null, this._tokenStartLine, - this._tokenStartColumn, msg, e); -}; - -Lexer.prototype.getErrorDisplay = function(s) { - var d = []; - for (var i = 0; i < s.length; i++) { - d.push(s[i]); - } - return d.join(''); -}; - -Lexer.prototype.getErrorDisplayForChar = function(c) { - if (c.charCodeAt(0) === Token.EOF) { - return ""; - } else if (c === '\n') { - return "\\n"; - } else if (c === '\t') { - return "\\t"; - } else if (c === '\r') { - return "\\r"; - } else { - return c; - } -}; - -Lexer.prototype.getCharErrorDisplay = function(c) { - return "'" + this.getErrorDisplayForChar(c) + "'"; -}; - -// Lexers can normally match any char in it's vocabulary after matching -// a token, so do the easy thing and just kill a character and hope -// it all works out. You can instead use the rule invocation stack -// to do sophisticated error recovery if you are in a fragment rule. -// / -Lexer.prototype.recover = function(re) { - if (this._input.LA(1) !== Token.EOF) { - if (re instanceof LexerNoViableAltException) { - // skip a char and try again - this._interp.consume(this._input); - } else { - // TODO: Do we lose character or line position information? - this._input.consume(); - } - } -}; - -exports.Lexer = Lexer; +module.exports = Lexer; diff --git a/runtime/JavaScript/src/antlr4/Parser.js b/runtime/JavaScript/src/antlr4/Parser.js index e6b4c01ce..3ad684762 100644 --- a/runtime/JavaScript/src/antlr4/Parser.js +++ b/runtime/JavaScript/src/antlr4/Parser.js @@ -3,672 +3,688 @@ * can be found in the LICENSE.txt file in the project root. */ -var Token = require('./Token').Token; -var ParseTreeListener = require('./tree/Tree').ParseTreeListener; -var Recognizer = require('./Recognizer').Recognizer; -var DefaultErrorStrategy = require('./error/ErrorStrategy').DefaultErrorStrategy; -var ATNDeserializer = require('./atn/ATNDeserializer').ATNDeserializer; -var ATNDeserializationOptions = require('./atn/ATNDeserializationOptions').ATNDeserializationOptions; -var TerminalNode = require('./tree/Tree').TerminalNode; -var ErrorNode = require('./tree/Tree').ErrorNode; +const {Token} = require('./Token'); +const {ParseTreeListener, TerminalNode, ErrorNode} = require('./tree/Tree'); +const Recognizer = require('./Recognizer'); +const {DefaultErrorStrategy} = require('./error/ErrorStrategy'); +const ATNDeserializer = require('./atn/ATNDeserializer'); +const ATNDeserializationOptions = require('./atn/ATNDeserializationOptions'); +const Lexer = require('./Lexer'); -function TraceListener(parser) { - ParseTreeListener.call(this); - this.parser = parser; - return this; +class TraceListener extends ParseTreeListener { + constructor(parser) { + super(); + this.parser = parser; + } + + enterEveryRule(ctx) { + console.log("enter " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); + } + + visitTerminal(node) { + console.log("consume " + node.symbol + " rule " + this.parser.ruleNames[this.parser._ctx.ruleIndex]); + } + + exitEveryRule(ctx) { + console.log("exit " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); + } } -TraceListener.prototype = Object.create(ParseTreeListener.prototype); -TraceListener.prototype.constructor = TraceListener; - -TraceListener.prototype.enterEveryRule = function(ctx) { - console.log("enter " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); -}; - -TraceListener.prototype.visitTerminal = function( node) { - console.log("consume " + node.symbol + " rule " + this.parser.ruleNames[this.parser._ctx.ruleIndex]); -}; - -TraceListener.prototype.exitEveryRule = function(ctx) { - console.log("exit " + this.parser.ruleNames[ctx.ruleIndex] + ", LT(1)=" + this.parser._input.LT(1).text); -}; - -// this is all the parsing support code essentially; most of it is error -// recovery stuff.// -function Parser(input) { - Recognizer.call(this); - // The input stream. - this._input = null; - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - this._errHandler = new DefaultErrorStrategy(); - this._precedenceStack = []; - this._precedenceStack.push(0); - // The {@link ParserRuleContext} object for the currently executing rule. - // this is always non-null during the parsing process. - this._ctx = null; - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - this.buildParseTrees = true; - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so this field is not directly used by - // other parser methods. - this._tracer = null; - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - this._parseListeners = null; - // The number of syntax errors reported during parsing. this value is - // incremented each time {@link //notifyErrorListeners} is called. - this._syntaxErrors = 0; - this.setInputStream(input); - return this; -} - -Parser.prototype = Object.create(Recognizer.prototype); -Parser.prototype.contructor = Parser; - -// this field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// -Parser.bypassAltsAtnCache = {}; - -// reset the parser's state// -Parser.prototype.reset = function() { - if (this._input !== null) { - this._input.seek(0); +class Parser extends Recognizer { + /** + * this is all the parsing support code essentially; most of it is error + * recovery stuff. + */ + constructor(input) { + super(); + // The input stream. + this._input = null; + /** + * The error handling strategy for the parser. The default value is a new + * instance of {@link DefaultErrorStrategy}. + */ + this._errHandler = new DefaultErrorStrategy(); + this._precedenceStack = []; + this._precedenceStack.push(0); + /** + * The {@link ParserRuleContext} object for the currently executing rule. + * this is always non-null during the parsing process. + */ + this._ctx = null; + /** + * Specifies whether or not the parser should construct a parse tree during + * the parsing process. The default value is {@code true}. + */ + this.buildParseTrees = true; + /** + * When {@link //setTrace}{@code (true)} is called, a reference to the + * {@link TraceListener} is stored here so it can be easily removed in a + * later call to {@link //setTrace}{@code (false)}. The listener itself is + * implemented as a parser listener so this field is not directly used by + * other parser methods. + */ + this._tracer = null; + /** + * The list of {@link ParseTreeListener} listeners registered to receive + * events during the parse. + */ + this._parseListeners = null; + /** + * The number of syntax errors reported during parsing. this value is + * incremented each time {@link //notifyErrorListeners} is called. + */ + this._syntaxErrors = 0; + this.setInputStream(input); } - this._errHandler.reset(this); - this._ctx = null; - this._syntaxErrors = 0; - this.setTrace(false); - this._precedenceStack = []; - this._precedenceStack.push(0); - if (this._interp !== null) { - this._interp.reset(); - } -}; -// Match current input symbol against {@code ttype}. If the symbol type -// matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are -// called to complete the match process. -// -//

            If the symbol type does not match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

            -// -// @param ttype the token type to match -// @return the matched symbol -// @throws RecognitionException if the current input symbol did not match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -Parser.prototype.match = function(ttype) { - var t = this.getCurrentToken(); - if (t.type === ttype) { - this._errHandler.reportMatch(this); - this.consume(); - } else { - t = this._errHandler.recoverInline(this); - if (this.buildParseTrees && t.tokenIndex === -1) { - // we must have conjured up a new token during single token - // insertion - // if it's not the current symbol - this._ctx.addErrorNode(t); + // reset the parser's state + reset() { + if (this._input !== null) { + this._input.seek(0); + } + this._errHandler.reset(this); + this._ctx = null; + this._syntaxErrors = 0; + this.setTrace(false); + this._precedenceStack = []; + this._precedenceStack.push(0); + if (this._interp !== null) { + this._interp.reset(); } } - return t; -}; -// Match current input symbol as a wildcard. If the symbol type matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} -// and {@link //consume} are called to complete the match process. -// -//

            If the symbol type does not match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

            -// -// @return the matched symbol -// @throws RecognitionException if the current input symbol did not match -// a wildcard and the error strategy could not recover from the mismatched -// symbol -Parser.prototype.matchWildcard = function() { - var t = this.getCurrentToken(); - if (t.type > 0) { - this._errHandler.reportMatch(this); - this.consume(); - } else { - t = this._errHandler.recoverInline(this); - if (this._buildParseTrees && t.tokenIndex === -1) { - // we must have conjured up a new token during single token - // insertion - // if it's not the current symbol - this._ctx.addErrorNode(t); + /** + * Match current input symbol against {@code ttype}. If the symbol type + * matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are + * called to complete the match process. + * + *

            If the symbol type does not match, + * {@link ANTLRErrorStrategy//recoverInline} is called on the current error + * strategy to attempt recovery. If {@link //getBuildParseTree} is + * {@code true} and the token index of the symbol returned by + * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to + * the parse tree by calling {@link ParserRuleContext//addErrorNode}.

            + * + * @param ttype the token type to match + * @return the matched symbol + * @throws RecognitionException if the current input symbol did not match + * {@code ttype} and the error strategy could not recover from the + * mismatched symbol + */ + match(ttype) { + let t = this.getCurrentToken(); + if (t.type === ttype) { + this._errHandler.reportMatch(this); + this.consume(); + } else { + t = this._errHandler.recoverInline(this); + if (this.buildParseTrees && t.tokenIndex === -1) { + // we must have conjured up a new token during single token + // insertion + // if it's not the current symbol + this._ctx.addErrorNode(t); + } } + return t; } - return t; -}; -Parser.prototype.getParseListeners = function() { - return this._parseListeners || []; -}; - -// Registers {@code listener} to receive events during the parsing process. -// -//

            To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

            -// -//

            With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

            -// -//
              -//
            • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
            • -//
            • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
            • -//
            • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
            • -//
            -// -// @param listener the listener to add -// -// @throws NullPointerException if {@code} listener is {@code null} -// -Parser.prototype.addParseListener = function(listener) { - if (listener === null) { - throw "listener"; - } - if (this._parseListeners === null) { - this._parseListeners = []; - } - this._parseListeners.push(listener); -}; - -// -// Remove {@code listener} from the list of parse listeners. -// -//

            If {@code listener} is {@code null} or has not been added as a parse -// listener, this method does nothing.

            -// @param listener the listener to remove -// -Parser.prototype.removeParseListener = function(listener) { - if (this._parseListeners !== null) { - var idx = this._parseListeners.indexOf(listener); - if (idx >= 0) { - this._parseListeners.splice(idx, 1); + /** + * Match current input symbol as a wildcard. If the symbol type matches + * (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} + * and {@link //consume} are called to complete the match process. + * + *

            If the symbol type does not match, + * {@link ANTLRErrorStrategy//recoverInline} is called on the current error + * strategy to attempt recovery. If {@link //getBuildParseTree} is + * {@code true} and the token index of the symbol returned by + * {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to + * the parse tree by calling {@link ParserRuleContext//addErrorNode}.

            + * + * @return the matched symbol + * @throws RecognitionException if the current input symbol did not match + * a wildcard and the error strategy could not recover from the mismatched + * symbol + */ + matchWildcard() { + let t = this.getCurrentToken(); + if (t.type > 0) { + this._errHandler.reportMatch(this); + this.consume(); + } else { + t = this._errHandler.recoverInline(this); + if (this._buildParseTrees && t.tokenIndex === -1) { + // we must have conjured up a new token during single token + // insertion + // if it's not the current symbol + this._ctx.addErrorNode(t); + } } - if (this._parseListeners.length === 0) { - this._parseListeners = null; + return t; + } + + getParseListeners() { + return this._parseListeners || []; + } + + /** + * Registers {@code listener} to receive events during the parsing process. + * + *

            To support output-preserving grammar transformations (including but not + * limited to left-recursion removal, automated left-factoring, and + * optimized code generation), calls to listener methods during the parse + * may differ substantially from calls made by + * {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In + * particular, rule entry and exit events may occur in a different order + * during the parse than after the parser. In addition, calls to certain + * rule entry methods may be omitted.

            + * + *

            With the following specific exceptions, calls to listener events are + * deterministic, i.e. for identical input the calls to listener + * methods will be the same.

            + * + *
              + *
            • Alterations to the grammar used to generate code may change the + * behavior of the listener calls.
            • + *
            • Alterations to the command line options passed to ANTLR 4 when + * generating the parser may change the behavior of the listener calls.
            • + *
            • Changing the version of the ANTLR Tool used to generate the parser + * may change the behavior of the listener calls.
            • + *
            + * + * @param listener the listener to add + * + * @throws NullPointerException if {@code} listener is {@code null} + */ + addParseListener(listener) { + if (listener === null) { + throw "listener"; } + if (this._parseListeners === null) { + this._parseListeners = []; + } + this._parseListeners.push(listener); } -}; -// Remove all parse listeners. -Parser.prototype.removeParseListeners = function() { - this._parseListeners = null; -}; - -// Notify any parse listeners of an enter rule event. -Parser.prototype.triggerEnterRuleEvent = function() { - if (this._parseListeners !== null) { - var ctx = this._ctx; - this._parseListeners.map(function(listener) { - listener.enterEveryRule(ctx); - ctx.enterRule(listener); - }); - } -}; - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -Parser.prototype.triggerExitRuleEvent = function() { - if (this._parseListeners !== null) { - // reverse order walk of listeners - var ctx = this._ctx; - this._parseListeners.slice(0).reverse().map(function(listener) { - ctx.exitRule(listener); - listener.exitEveryRule(ctx); - }); - } -}; - -Parser.prototype.getTokenFactory = function() { - return this._input.tokenSource._factory; -}; - -// Tell our token source and error strategy about a new way to create tokens.// -Parser.prototype.setTokenFactory = function(factory) { - this._input.tokenSource._factory = factory; -}; - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @throws UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -Parser.prototype.getATNWithBypassAlts = function() { - var serializedAtn = this.getSerializedATN(); - if (serializedAtn === null) { - throw "The current parser does not support an ATN with bypass alternatives."; - } - var result = this.bypassAltsAtnCache[serializedAtn]; - if (result === null) { - var deserializationOptions = new ATNDeserializationOptions(); - deserializationOptions.generateRuleBypassTransitions = true; - result = new ATNDeserializer(deserializationOptions) - .deserialize(serializedAtn); - this.bypassAltsAtnCache[serializedAtn] = result; - } - return result; -}; - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
            -// ParseTree t = parser.expr();
            -// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
            -// MyParser.RULE_expr);
            -// ParseTreeMatch m = p.match(t);
            -// String id = m.get("ID");
            -// 
            - -var Lexer = require('./Lexer').Lexer; - -Parser.prototype.compileParseTreePattern = function(pattern, patternRuleIndex, lexer) { - lexer = lexer || null; - if (lexer === null) { - if (this.getTokenStream() !== null) { - var tokenSource = this.getTokenStream().tokenSource; - if (tokenSource instanceof Lexer) { - lexer = tokenSource; + /** + * Remove {@code listener} from the list of parse listeners. + * + *

            If {@code listener} is {@code null} or has not been added as a parse + * listener, this method does nothing.

            + * @param listener the listener to remove + */ + removeParseListener(listener) { + if (this._parseListeners !== null) { + const idx = this._parseListeners.indexOf(listener); + if (idx >= 0) { + this._parseListeners.splice(idx, 1); + } + if (this._parseListeners.length === 0) { + this._parseListeners = null; } } } - if (lexer === null) { - throw "Parser can't discover a lexer to use"; + +// Remove all parse listeners. + removeParseListeners() { + this._parseListeners = null; } - var m = new ParseTreePatternMatcher(lexer, this); - return m.compile(pattern, patternRuleIndex); -}; -Parser.prototype.getInputStream = function() { - return this.getTokenStream(); -}; - -Parser.prototype.setInputStream = function(input) { - this.setTokenStream(input); -}; - -Parser.prototype.getTokenStream = function() { - return this._input; -}; - -// Set the token stream and reset the parser.// -Parser.prototype.setTokenStream = function(input) { - this._input = null; - this.reset(); - this._input = input; -}; - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref; e.g., x=ID. -// -Parser.prototype.getCurrentToken = function() { - return this._input.LT(1); -}; - -Parser.prototype.notifyErrorListeners = function(msg, offendingToken, err) { - offendingToken = offendingToken || null; - err = err || null; - if (offendingToken === null) { - offendingToken = this.getCurrentToken(); - } - this._syntaxErrors += 1; - var line = offendingToken.line; - var column = offendingToken.column; - var listener = this.getErrorListenerDispatch(); - listener.syntaxError(this, offendingToken, line, column, msg, err); -}; - -// -// Consume and return the {@linkplain //getCurrentToken current symbol}. -// -//

            E.g., given the following input with {@code A} being the current -// lookahead symbol, this function moves the cursor to {@code B} and returns -// {@code A}.

            -// -//
            -// A B
            -// ^
            -// 
            -// -// If the parser is not in error recovery mode, the consumed symbol is added -// to the parse tree using {@link ParserRuleContext//addChild(Token)}, and -// {@link ParseTreeListener//visitTerminal} is called on any parse listeners. -// If the parser is in error recovery mode, the consumed symbol is -// added to the parse tree using -// {@link ParserRuleContext//addErrorNode(Token)}, and -// {@link ParseTreeListener//visitErrorNode} is called on any parse -// listeners. -// -Parser.prototype.consume = function() { - var o = this.getCurrentToken(); - if (o.type !== Token.EOF) { - this.getInputStream().consume(); - } - var hasListener = this._parseListeners !== null && this._parseListeners.length > 0; - if (this.buildParseTrees || hasListener) { - var node; - if (this._errHandler.inErrorRecoveryMode(this)) { - node = this._ctx.addErrorNode(o); - } else { - node = this._ctx.addTokenNode(o); - } - node.invokingState = this.state; - if (hasListener) { +// Notify any parse listeners of an enter rule event. + triggerEnterRuleEvent() { + if (this._parseListeners !== null) { + const ctx = this._ctx; this._parseListeners.map(function(listener) { - if (node instanceof ErrorNode || (node.isErrorNode !== undefined && node.isErrorNode())) { - listener.visitErrorNode(node); - } else if (node instanceof TerminalNode) { - listener.visitTerminal(node); - } + listener.enterEveryRule(ctx); + ctx.enterRule(listener); }); } } - return o; -}; -Parser.prototype.addContextToParseTree = function() { - // add current context to parent if we have a parent - if (this._ctx.parentCtx !== null) { - this._ctx.parentCtx.addChild(this._ctx); + /** + * Notify any parse listeners of an exit rule event. + * @see //addParseListener + */ + triggerExitRuleEvent() { + if (this._parseListeners !== null) { + // reverse order walk of listeners + const ctx = this._ctx; + this._parseListeners.slice(0).reverse().map(function(listener) { + ctx.exitRule(listener); + listener.exitEveryRule(ctx); + }); + } } -}; -// Always called by generated parsers upon entry to a rule. Access field -// {@link //_ctx} get the current context. - -Parser.prototype.enterRule = function(localctx, state, ruleIndex) { - this.state = state; - this._ctx = localctx; - this._ctx.start = this._input.LT(1); - if (this.buildParseTrees) { - this.addContextToParseTree(); + getTokenFactory() { + return this._input.tokenSource._factory; } - if (this._parseListeners !== null) { - this.triggerEnterRuleEvent(); - } -}; -Parser.prototype.exitRule = function() { - this._ctx.stop = this._input.LT(-1); - // trigger event on _ctx, before it reverts to parent - if (this._parseListeners !== null) { - this.triggerExitRuleEvent(); + // Tell our token source and error strategy about a new way to create tokens. + setTokenFactory(factory) { + this._input.tokenSource._factory = factory; } - this.state = this._ctx.invokingState; - this._ctx = this._ctx.parentCtx; -}; -Parser.prototype.enterOuterAlt = function(localctx, altNum) { - localctx.setAltNumber(altNum); - // if we have new localctx, make sure we replace existing ctx - // that is previous child of parse tree - if (this.buildParseTrees && this._ctx !== localctx) { + /** + * The ATN with bypass alternatives is expensive to create so we create it + * lazily. + * + * @throws UnsupportedOperationException if the current parser does not + * implement the {@link //getSerializedATN()} method. + */ + getATNWithBypassAlts() { + const serializedAtn = this.getSerializedATN(); + if (serializedAtn === null) { + throw "The current parser does not support an ATN with bypass alternatives."; + } + let result = this.bypassAltsAtnCache[serializedAtn]; + if (result === null) { + const deserializationOptions = new ATNDeserializationOptions(); + deserializationOptions.generateRuleBypassTransitions = true; + result = new ATNDeserializer(deserializationOptions) + .deserialize(serializedAtn); + this.bypassAltsAtnCache[serializedAtn] = result; + } + return result; + } + + /** + * The preferred method of getting a tree pattern. For example, here's a + * sample use: + * + *
            +	 * ParseTree t = parser.expr();
            +	 * ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
            +	 * MyParser.RULE_expr);
            +	 * ParseTreeMatch m = p.match(t);
            +	 * String id = m.get("ID");
            +	 * 
            + */ + compileParseTreePattern(pattern, patternRuleIndex, lexer) { + lexer = lexer || null; + if (lexer === null) { + if (this.getTokenStream() !== null) { + const tokenSource = this.getTokenStream().tokenSource; + if (tokenSource instanceof Lexer) { + lexer = tokenSource; + } + } + } + if (lexer === null) { + throw "Parser can't discover a lexer to use"; + } + const m = new ParseTreePatternMatcher(lexer, this); + return m.compile(pattern, patternRuleIndex); + } + + getInputStream() { + return this.getTokenStream(); + } + + setInputStream(input) { + this.setTokenStream(input); + } + + getTokenStream() { + return this._input; + } + + // Set the token stream and reset the parser. + setTokenStream(input) { + this._input = null; + this.reset(); + this._input = input; + } + + /** + * Match needs to return the current input symbol, which gets put + * into the label for the associated token ref; e.g., x=ID. + */ + getCurrentToken() { + return this._input.LT(1); + } + + notifyErrorListeners(msg, offendingToken, err) { + offendingToken = offendingToken || null; + err = err || null; + if (offendingToken === null) { + offendingToken = this.getCurrentToken(); + } + this._syntaxErrors += 1; + const line = offendingToken.line; + const column = offendingToken.column; + const listener = this.getErrorListenerDispatch(); + listener.syntaxError(this, offendingToken, line, column, msg, err); + } + + /** + * Consume and return the {@linkplain //getCurrentToken current symbol}. + * + *

            E.g., given the following input with {@code A} being the current + * lookahead symbol, this function moves the cursor to {@code B} and returns + * {@code A}.

            + * + *
            +	 * A B
            +	 * ^
            +	 * 
            + * + * If the parser is not in error recovery mode, the consumed symbol is added + * to the parse tree using {@link ParserRuleContext//addChild(Token)}, and + * {@link ParseTreeListener//visitTerminal} is called on any parse listeners. + * If the parser is in error recovery mode, the consumed symbol is + * added to the parse tree using + * {@link ParserRuleContext//addErrorNode(Token)}, and + * {@link ParseTreeListener//visitErrorNode} is called on any parse + * listeners. + */ + consume() { + const o = this.getCurrentToken(); + if (o.type !== Token.EOF) { + this.getInputStream().consume(); + } + const hasListener = this._parseListeners !== null && this._parseListeners.length > 0; + if (this.buildParseTrees || hasListener) { + let node; + if (this._errHandler.inErrorRecoveryMode(this)) { + node = this._ctx.addErrorNode(o); + } else { + node = this._ctx.addTokenNode(o); + } + node.invokingState = this.state; + if (hasListener) { + this._parseListeners.map(function(listener) { + if (node instanceof ErrorNode || (node.isErrorNode !== undefined && node.isErrorNode())) { + listener.visitErrorNode(node); + } else if (node instanceof TerminalNode) { + listener.visitTerminal(node); + } + }); + } + } + return o; + } + + addContextToParseTree() { + // add current context to parent if we have a parent if (this._ctx.parentCtx !== null) { - this._ctx.parentCtx.removeLastChild(); - this._ctx.parentCtx.addChild(localctx); + this._ctx.parentCtx.addChild(this._ctx); } } - this._ctx = localctx; -}; -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -Parser.prototype.getPrecedence = function() { - if (this._precedenceStack.length === 0) { - return -1; - } else { - return this._precedenceStack[this._precedenceStack.length-1]; + /** + * Always called by generated parsers upon entry to a rule. Access field + * {@link //_ctx} get the current context. + */ + enterRule(localctx, state, ruleIndex) { + this.state = state; + this._ctx = localctx; + this._ctx.start = this._input.LT(1); + if (this.buildParseTrees) { + this.addContextToParseTree(); + } + if (this._parseListeners !== null) { + this.triggerEnterRuleEvent(); + } } -}; -Parser.prototype.enterRecursionRule = function(localctx, state, ruleIndex, - precedence) { - this.state = state; - this._precedenceStack.push(precedence); - this._ctx = localctx; - this._ctx.start = this._input.LT(1); - if (this._parseListeners !== null) { - this.triggerEnterRuleEvent(); // simulates rule entry for - // left-recursive rules - } -}; - -// -// Like {@link //enterRule} but for recursive rules. - -Parser.prototype.pushNewRecursionContext = function(localctx, state, ruleIndex) { - var previous = this._ctx; - previous.parentCtx = localctx; - previous.invokingState = state; - previous.stop = this._input.LT(-1); - - this._ctx = localctx; - this._ctx.start = previous.start; - if (this.buildParseTrees) { - this._ctx.addChild(previous); - } - if (this._parseListeners !== null) { - this.triggerEnterRuleEvent(); // simulates rule entry for - // left-recursive rules - } -}; - -Parser.prototype.unrollRecursionContexts = function(parentCtx) { - this._precedenceStack.pop(); - this._ctx.stop = this._input.LT(-1); - var retCtx = this._ctx; // save current ctx (return value) - // unroll so _ctx is as it was before call to recursive method - if (this._parseListeners !== null) { - while (this._ctx !== parentCtx) { + exitRule() { + this._ctx.stop = this._input.LT(-1); + // trigger event on _ctx, before it reverts to parent + if (this._parseListeners !== null) { this.triggerExitRuleEvent(); - this._ctx = this._ctx.parentCtx; } - } else { - this._ctx = parentCtx; + this.state = this._ctx.invokingState; + this._ctx = this._ctx.parentCtx; } - // hook into tree - retCtx.parentCtx = parentCtx; - if (this.buildParseTrees && parentCtx !== null) { - // add return ctx into invoking rule's tree - parentCtx.addChild(retCtx); - } -}; -Parser.prototype.getInvokingContext = function(ruleIndex) { - var ctx = this._ctx; - while (ctx !== null) { - if (ctx.ruleIndex === ruleIndex) { - return ctx; + enterOuterAlt(localctx, altNum) { + localctx.setAltNumber(altNum); + // if we have new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if (this.buildParseTrees && this._ctx !== localctx) { + if (this._ctx.parentCtx !== null) { + this._ctx.parentCtx.removeLastChild(); + this._ctx.parentCtx.addChild(localctx); + } } - ctx = ctx.parentCtx; + this._ctx = localctx; } - return null; -}; -Parser.prototype.precpred = function(localctx, precedence) { - return precedence >= this._precedenceStack[this._precedenceStack.length-1]; -}; - -Parser.prototype.inContext = function(context) { - // TODO: useful in parser? - return false; -}; - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of this method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
            -// return getExpectedTokens().contains(symbol);
            -// 
            -// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -Parser.prototype.isExpectedToken = function(symbol) { - var atn = this._interp.atn; - var ctx = this._ctx; - var s = atn.states[this.state]; - var following = atn.nextTokens(s); - if (following.contains(symbol)) { - return true; + /** + * Get the precedence level for the top-most precedence rule. + * + * @return The precedence level for the top-most precedence rule, or -1 if + * the parser context is not nested within a precedence rule. + */ + getPrecedence() { + if (this._precedenceStack.length === 0) { + return -1; + } else { + return this._precedenceStack[this._precedenceStack.length-1]; + } } - if (!following.contains(Token.EPSILON)) { + + enterRecursionRule(localctx, state, ruleIndex, precedence) { + this.state = state; + this._precedenceStack.push(precedence); + this._ctx = localctx; + this._ctx.start = this._input.LT(1); + if (this._parseListeners !== null) { + this.triggerEnterRuleEvent(); // simulates rule entry for + // left-recursive rules + } + } + + // Like {@link //enterRule} but for recursive rules. + pushNewRecursionContext(localctx, state, ruleIndex) { + const previous = this._ctx; + previous.parentCtx = localctx; + previous.invokingState = state; + previous.stop = this._input.LT(-1); + + this._ctx = localctx; + this._ctx.start = previous.start; + if (this.buildParseTrees) { + this._ctx.addChild(previous); + } + if (this._parseListeners !== null) { + this.triggerEnterRuleEvent(); // simulates rule entry for + // left-recursive rules + } + } + + unrollRecursionContexts(parentCtx) { + this._precedenceStack.pop(); + this._ctx.stop = this._input.LT(-1); + const retCtx = this._ctx; // save current ctx (return value) + // unroll so _ctx is as it was before call to recursive method + if (this._parseListeners !== null) { + while (this._ctx !== parentCtx) { + this.triggerExitRuleEvent(); + this._ctx = this._ctx.parentCtx; + } + } else { + this._ctx = parentCtx; + } + // hook into tree + retCtx.parentCtx = parentCtx; + if (this.buildParseTrees && parentCtx !== null) { + // add return ctx into invoking rule's tree + parentCtx.addChild(retCtx); + } + } + + getInvokingContext(ruleIndex) { + let ctx = this._ctx; + while (ctx !== null) { + if (ctx.ruleIndex === ruleIndex) { + return ctx; + } + ctx = ctx.parentCtx; + } + return null; + } + + precpred(localctx, precedence) { + return precedence >= this._precedenceStack[this._precedenceStack.length-1]; + } + + inContext(context) { + // TODO: useful in parser? return false; } - while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { - var invokingState = atn.states[ctx.invokingState]; - var rt = invokingState.transitions[0]; - following = atn.nextTokens(rt.followState); + + /** + * Checks whether or not {@code symbol} can follow the current state in the + * ATN. The behavior of this method is equivalent to the following, but is + * implemented such that the complete context-sensitive follow set does not + * need to be explicitly constructed. + * + *
            +	 * return getExpectedTokens().contains(symbol);
            +	 * 
            + * + * @param symbol the symbol type to check + * @return {@code true} if {@code symbol} can follow the current state in + * the ATN, otherwise {@code false}. + */ + isExpectedToken(symbol) { + const atn = this._interp.atn; + let ctx = this._ctx; + const s = atn.states[this.state]; + let following = atn.nextTokens(s); if (following.contains(symbol)) { return true; } - ctx = ctx.parentCtx; - } - if (following.contains(Token.EPSILON) && symbol === Token.EOF) { - return true; - } else { - return false; - } -}; - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //getState} and {@link //getContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -Parser.prototype.getExpectedTokens = function() { - return this._interp.atn.getExpectedTokens(this.state, this._ctx); -}; - -Parser.prototype.getExpectedTokensWithinCurrentRule = function() { - var atn = this._interp.atn; - var s = atn.states[this.state]; - return atn.nextTokens(s); -}; - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -Parser.prototype.getRuleIndex = function(ruleName) { - var ruleIndex = this.getRuleIndexMap()[ruleName]; - if (ruleIndex !== null) { - return ruleIndex; - } else { - return -1; - } -}; - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this is very useful for error messages. -// -Parser.prototype.getRuleInvocationStack = function(p) { - p = p || null; - if (p === null) { - p = this._ctx; - } - var stack = []; - while (p !== null) { - // compute what follows who invoked us - var ruleIndex = p.ruleIndex; - if (ruleIndex < 0) { - stack.push("n/a"); - } else { - stack.push(this.ruleNames[ruleIndex]); + if (!following.contains(Token.EPSILON)) { + return false; } - p = p.parentCtx; - } - return stack; -}; - -// For debugging and other purposes.// -Parser.prototype.getDFAStrings = function() { - return this._interp.decisionToDFA.toString(); -}; -// For debugging and other purposes.// -Parser.prototype.dumpDFA = function() { - var seenOne = false; - for (var i = 0; i < this._interp.decisionToDFA.length; i++) { - var dfa = this._interp.decisionToDFA[i]; - if (dfa.states.length > 0) { - if (seenOne) { - console.log(); + while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { + const invokingState = atn.states[ctx.invokingState]; + const rt = invokingState.transitions[0]; + following = atn.nextTokens(rt.followState); + if (following.contains(symbol)) { + return true; } - this.printer.println("Decision " + dfa.decision + ":"); - this.printer.print(dfa.toString(this.literalNames, this.symbolicNames)); - seenOne = true; + ctx = ctx.parentCtx; + } + if (following.contains(Token.EPSILON) && symbol === Token.EOF) { + return true; + } else { + return false; } } -}; -/* -" printer = function() {\r\n" + -" this.println = function(s) { document.getElementById('output') += s + '\\n'; }\r\n" + -" this.print = function(s) { document.getElementById('output') += s; }\r\n" + -" };\r\n" + -*/ + /** + * Computes the set of input symbols which could follow the current parser + * state and context, as given by {@link //getState} and {@link //getContext}, + * respectively. + * + * @see ATN//getExpectedTokens(int, RuleContext) + */ + getExpectedTokens() { + return this._interp.atn.getExpectedTokens(this.state, this._ctx); + } -Parser.prototype.getSourceName = function() { - return this._input.sourceName; -}; + getExpectedTokensWithinCurrentRule() { + const atn = this._interp.atn; + const s = atn.states[this.state]; + return atn.nextTokens(s); + } -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token matches. this is for quick and dirty debugging. -// -Parser.prototype.setTrace = function(trace) { - if (!trace) { - this.removeParseListener(this._tracer); - this._tracer = null; - } else { - if (this._tracer !== null) { + // Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. + getRuleIndex(ruleName) { + const ruleIndex = this.getRuleIndexMap()[ruleName]; + if (ruleIndex !== null) { + return ruleIndex; + } else { + return -1; + } + } + + /** + * Return List<String> of the rule names in your parser instance + * leading up to a call to the current rule. You could override if + * you want more details such as the file/line info of where + * in the ATN a rule is invoked. + * + * this is very useful for error messages. + */ + getRuleInvocationStack(p) { + p = p || null; + if (p === null) { + p = this._ctx; + } + const stack = []; + while (p !== null) { + // compute what follows who invoked us + const ruleIndex = p.ruleIndex; + if (ruleIndex < 0) { + stack.push("n/a"); + } else { + stack.push(this.ruleNames[ruleIndex]); + } + p = p.parentCtx; + } + return stack; + } + + // For debugging and other purposes. + getDFAStrings() { + return this._interp.decisionToDFA.toString(); + } + + // For debugging and other purposes. + dumpDFA() { + let seenOne = false; + for (let i = 0; i < this._interp.decisionToDFA.length; i++) { + const dfa = this._interp.decisionToDFA[i]; + if (dfa.states.length > 0) { + if (seenOne) { + console.log(); + } + this.printer.println("Decision " + dfa.decision + ":"); + this.printer.print(dfa.toString(this.literalNames, this.symbolicNames)); + seenOne = true; + } + } + } + + /* + " printer = function() {\r\n" + + " this.println = function(s) { document.getElementById('output') += s + '\\n'; }\r\n" + + " this.print = function(s) { document.getElementById('output') += s; }\r\n" + + " };\r\n" + + */ + getSourceName() { + return this._input.sourceName; + } + + /** + * During a parse is sometimes useful to listen in on the rule entry and exit + * events as well as token matches. this is for quick and dirty debugging. + */ + setTrace(trace) { + if (!trace) { this.removeParseListener(this._tracer); + this._tracer = null; + } else { + if (this._tracer !== null) { + this.removeParseListener(this._tracer); + } + this._tracer = new TraceListener(this); + this.addParseListener(this._tracer); } - this._tracer = new TraceListener(this); - this.addParseListener(this._tracer); } -}; +} -exports.Parser = Parser; \ No newline at end of file +/** + * this field maps from the serialized ATN string to the deserialized {@link + * ATN} with + * bypass alternatives. + * + * @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() + */ +Parser.bypassAltsAtnCache = {}; + +module.exports = Parser; diff --git a/runtime/JavaScript/src/antlr4/ParserRuleContext.js b/runtime/JavaScript/src/antlr4/ParserRuleContext.js index 97e31c74a..d0fce90b7 100644 --- a/runtime/JavaScript/src/antlr4/ParserRuleContext.js +++ b/runtime/JavaScript/src/antlr4/ParserRuleContext.js @@ -3,223 +3,223 @@ * can be found in the LICENSE.txt file in the project root. */ -//* A rule invocation record for parsing. -// -// Contains all of the information about the current rule not stored in the -// RuleContext. It handles parse tree children list, Any ATN state -// tracing, and the default values available for rule indications: -// start, stop, rule index, current alt number, current -// ATN state. -// -// Subclasses made for each rule and grammar track the parameters, -// return values, locals, and labels specific to that rule. These -// are the objects that are returned from rules. -// -// Note text is not an actual field of a rule return value; it is computed -// from start and stop using the input stream's toString() method. I -// could add a ctor to this so that we can pass in and store the input -// stream, but I'm not sure we want to do that. It would seem to be undefined -// to get the .text property anyway if the rule matches tokens from multiple -// input streams. -// -// I do not use getters for fields of objects that are used simply to -// group values such as this aggregate. The getters/setters are there to -// satisfy the superclass interface. +const RuleContext = require('./RuleContext'); +const Tree = require('./tree/Tree'); +const INVALID_INTERVAL = Tree.INVALID_INTERVAL; +const TerminalNode = Tree.TerminalNode; +const TerminalNodeImpl = Tree.TerminalNodeImpl; +const ErrorNodeImpl = Tree.ErrorNodeImpl; +const Interval = require("./IntervalSet").Interval; -var RuleContext = require('./RuleContext').RuleContext; -var Tree = require('./tree/Tree'); -var INVALID_INTERVAL = Tree.INVALID_INTERVAL; -var TerminalNode = Tree.TerminalNode; -var TerminalNodeImpl = Tree.TerminalNodeImpl; -var ErrorNodeImpl = Tree.ErrorNodeImpl; -var Interval = require("./IntervalSet").Interval; - -function ParserRuleContext(parent, invokingStateNumber) { - parent = parent || null; - invokingStateNumber = invokingStateNumber || null; - RuleContext.call(this, parent, invokingStateNumber); - this.ruleIndex = -1; - // * If we are debugging or building a parse tree for a visitor, - // we need to track all of the tokens and rule invocations associated - // with this rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse this rule. - // / - this.children = null; - this.start = null; - this.stop = null; - // The exception that forced this rule to return. If the rule successfully - // completed, this is {@code null}. - this.exception = null; -} - -ParserRuleContext.prototype = Object.create(RuleContext.prototype); -ParserRuleContext.prototype.constructor = ParserRuleContext; - -// * COPY a ctx (I'm deliberately not using copy constructor)/// -ParserRuleContext.prototype.copyFrom = function(ctx) { - // from RuleContext - this.parentCtx = ctx.parentCtx; - this.invokingState = ctx.invokingState; - this.children = null; - this.start = ctx.start; - this.stop = ctx.stop; - // copy any error nodes to alt label node - if(ctx.children) { - this.children = []; - // reset parent pointer for any error nodes - ctx.children.map(function(child) { - if (child instanceof ErrorNodeImpl) { - this.children.push(child); - child.parentCtx = this; - } - }, this); +/** + * A rule invocation record for parsing. + * + * Contains all of the information about the current rule not stored in the + * RuleContext. It handles parse tree children list, Any ATN state + * tracing, and the default values available for rule indications: + * start, stop, rule index, current alt number, current + * ATN state. + * + * Subclasses made for each rule and grammar track the parameters, + * return values, locals, and labels specific to that rule. These + * are the objects that are returned from rules. + * + * Note text is not an actual field of a rule return value; it is computed + * from start and stop using the input stream's toString() method. I + * could add a ctor to this so that we can pass in and store the input + * stream, but I'm not sure we want to do that. It would seem to be undefined + * to get the .text property anyway if the rule matches tokens from multiple + * input streams. + * + * I do not use getters for fields of objects that are used simply to + * group values such as this aggregate. The getters/setters are there to + * satisfy the superclass interface. + */ +class ParserRuleContext extends RuleContext { + constructor(parent, invokingStateNumber) { + parent = parent || null; + invokingStateNumber = invokingStateNumber || null; + super(parent, invokingStateNumber); + this.ruleIndex = -1; + /** + * If we are debugging or building a parse tree for a visitor, + * we need to track all of the tokens and rule invocations associated + * with this rule's context. This is empty for parsing w/o tree constr. + * operation because we don't the need to track the details about + * how we parse this rule. + */ + this.children = null; + this.start = null; + this.stop = null; + /** + * The exception that forced this rule to return. If the rule successfully + * completed, this is {@code null}. + */ + this.exception = null; } -}; -// Double dispatch methods for listeners -ParserRuleContext.prototype.enterRule = function(listener) { -}; - -ParserRuleContext.prototype.exitRule = function(listener) { -}; - -// * Does not set parent link; other add methods do that/// -ParserRuleContext.prototype.addChild = function(child) { - if (this.children === null) { - this.children = []; - } - this.children.push(child); - return child; -}; - -// * Used by enterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -ParserRuleContext.prototype.removeLastChild = function() { - if (this.children !== null) { - this.children.pop(); - } -}; - -ParserRuleContext.prototype.addTokenNode = function(token) { - var node = new TerminalNodeImpl(token); - this.addChild(node); - node.parentCtx = this; - return node; -}; - -ParserRuleContext.prototype.addErrorNode = function(badToken) { - var node = new ErrorNodeImpl(badToken); - this.addChild(node); - node.parentCtx = this; - return node; -}; - -ParserRuleContext.prototype.getChild = function(i, type) { - type = type || null; - if (this.children === null || i < 0 || i >= this.children.length) { - return null; - } - if (type === null) { - return this.children[i]; - } else { - for(var j=0; j= this.children.length) { - return null; } - for(var j=0; j= this.children.length) { + return null; + } + if (type === null) { + return this.children[i]; + } else { + for(let j=0; j= this.children.length) { + return null; + } + for(let j=0; j + * private int referenceHashCode() { + * int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link + * //INITIAL_HASH}); + * + * for (int i = 0; i < {@link //size()}; i++) { + * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent + * getParent}(i)); + * } + * + * for (int i = 0; i < {@link //size()}; i++) { + * hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link + * //getReturnState getReturnState}(i)); + * } + * + * hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link + * //size()}); + * return hash; + * } + * + * This means only the {@link //EMPTY} context is in set. + */ + isEmpty() { + return this === PredictionContext.EMPTY; + } + + hasEmptyPath() { + return this.getReturnState(this.length - 1) === PredictionContext.EMPTY_RETURN_STATE; + } + + hashCode() { + return this.cachedHashCode; + } + + updateHashCode(hash) { + hash.update(this.cachedHashCode); + } } -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / +/** + * Represents {@code $} in local context prediction, which means wildcard. + * {@code//+x =//}. + */ PredictionContext.EMPTY = null; -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EMPTY_RETURN_STATE}. -// / +/** + * Represents {@code $} in an array in full context mode, when {@code $} + * doesn't mean wildcard: {@code $ + x = [$,x]}. Here, + * {@code $} = {@link //EMPTY_RETURN_STATE}. + */ PredictionContext.EMPTY_RETURN_STATE = 0x7FFFFFFF; PredictionContext.globalNodeCount = 1; PredictionContext.id = PredictionContext.globalNodeCount; -// Stores the computed hash code of this {@link PredictionContext}. The hash -// code is computed in parts to match the following reference algorithm. -// -//
            -// private int referenceHashCode() {
            -// int hash = {@link MurmurHash//initialize MurmurHash.initialize}({@link
            -// //INITIAL_HASH});
            -//
            -// for (int i = 0; i < {@link //size()}; i++) {
            -// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent
            -// getParent}(i));
            -// }
            -//
            -// for (int i = 0; i < {@link //size()}; i++) {
            -// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link
            -// //getReturnState getReturnState}(i));
            -// }
            -//
            -// hash = {@link MurmurHash//finish MurmurHash.finish}(hash, 2// {@link
            -// //size()});
            -// return hash;
            -// }
            -// 
            -// / -// This means only the {@link //EMPTY} context is in set. -PredictionContext.prototype.isEmpty = function() { - return this === PredictionContext.EMPTY; -}; - -PredictionContext.prototype.hasEmptyPath = function() { - return this.getReturnState(this.length - 1) === PredictionContext.EMPTY_RETURN_STATE; -}; - -PredictionContext.prototype.hashCode = function() { - return this.cachedHashCode; -}; - - -PredictionContext.prototype.updateHashCode = function(hash) { - hash.update(this.cachedHashCode); -}; /* function calculateHashString(parent, returnState) { return "" + parent + returnState; } */ -// Used to cache {@link PredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. +/** + * Used to cache {@link PredictionContext} objects. Its used for the shared + * context cash associated with contexts in DFA states. This cache + * can be used for both lexers and parsers. + */ +class PredictionContextCache { -function PredictionContextCache() { - this.cache = new Map(); - return this; -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a new context to the cache. -// Protect shared cache from unsafe thread access. -// -PredictionContextCache.prototype.add = function(ctx) { - if (ctx === PredictionContext.EMPTY) { - return PredictionContext.EMPTY; + constructor() { + this.cache = new Map(); } - var existing = this.cache.get(ctx) || null; - if (existing !== null) { - return existing; + + /** + * Add a context to the cache and return it. If the context already exists, + * return that one instead and do not add a new context to the cache. + * Protect shared cache from unsafe thread access. + */ + add(ctx) { + if (ctx === PredictionContext.EMPTY) { + return PredictionContext.EMPTY; + } + const existing = this.cache.get(ctx) || null; + if (existing !== null) { + return existing; + } + this.cache.put(ctx, ctx); + return ctx; } - this.cache.put(ctx, ctx); - return ctx; -}; -PredictionContextCache.prototype.get = function(ctx) { - return this.cache.get(ctx) || null; -}; + get(ctx) { + return this.cache.get(ctx) || null; + } -Object.defineProperty(PredictionContextCache.prototype, "length", { - get : function() { + get length(){ return this.cache.length; } -}); - -function SingletonPredictionContext(parent, returnState) { - var hashCode = 0; - var hash = new Hash(); - if(parent !== null) { - hash.update(parent, returnState); - } else { - hash.update(1); - } - hashCode = hash.finish(); - PredictionContext.call(this, hashCode); - this.parentCtx = parent; - this.returnState = returnState; } -SingletonPredictionContext.prototype = Object.create(PredictionContext.prototype); -SingletonPredictionContext.prototype.contructor = SingletonPredictionContext; -SingletonPredictionContext.create = function(parent, returnState) { - if (returnState === PredictionContext.EMPTY_RETURN_STATE && parent === null) { - // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; - } else { - return new SingletonPredictionContext(parent, returnState); +class SingletonPredictionContext extends PredictionContext { + + constructor(parent, returnState) { + let hashCode = 0; + const hash = new Hash(); + if(parent !== null) { + hash.update(parent, returnState); + } else { + hash.update(1); + } + hashCode = hash.finish(); + super(hashCode); + this.parentCtx = parent; + this.returnState = returnState; } -}; -Object.defineProperty(SingletonPredictionContext.prototype, "length", { - get : function() { + getParent(index) { + return this.parentCtx; + } + + getReturnState(index) { + return this.returnState; + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof SingletonPredictionContext)) { + return false; + } else if (this.hashCode() !== other.hashCode()) { + return false; // can't be same if hash is different + } else { + if(this.returnState !== other.returnState) + return false; + else if(this.parentCtx==null) + return other.parentCtx==null + else + return this.parentCtx.equals(other.parentCtx); + } + } + + toString() { + const up = this.parentCtx === null ? "" : this.parentCtx.toString(); + if (up.length === 0) { + if (this.returnState === PredictionContext.EMPTY_RETURN_STATE) { + return "$"; + } else { + return "" + this.returnState; + } + } else { + return "" + this.returnState + " " + up; + } + } + + get length(){ return 1; } -}); -SingletonPredictionContext.prototype.getParent = function(index) { - return this.parentCtx; -}; - -SingletonPredictionContext.prototype.getReturnState = function(index) { - return this.returnState; -}; - -SingletonPredictionContext.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof SingletonPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - if(this.returnState !== other.returnState) - return false; - else if(this.parentCtx==null) - return other.parentCtx==null - else - return this.parentCtx.equals(other.parentCtx); - } -}; - -SingletonPredictionContext.prototype.toString = function() { - var up = this.parentCtx === null ? "" : this.parentCtx.toString(); - if (up.length === 0) { - if (this.returnState === PredictionContext.EMPTY_RETURN_STATE) { - return "$"; + static create(parent, returnState) { + if (returnState === PredictionContext.EMPTY_RETURN_STATE && parent === null) { + // someone can pass in the bits of an array ctx that mean $ + return PredictionContext.EMPTY; } else { - return "" + this.returnState; + return new SingletonPredictionContext(parent, returnState); } - } else { - return "" + this.returnState + " " + up; } -}; - -function EmptyPredictionContext() { - SingletonPredictionContext.call(this, null, PredictionContext.EMPTY_RETURN_STATE); - return this; } -EmptyPredictionContext.prototype = Object.create(SingletonPredictionContext.prototype); -EmptyPredictionContext.prototype.constructor = EmptyPredictionContext; +class EmptyPredictionContext extends SingletonPredictionContext { -EmptyPredictionContext.prototype.isEmpty = function() { - return true; -}; + constructor() { + super(null, PredictionContext.EMPTY_RETURN_STATE); + } -EmptyPredictionContext.prototype.getParent = function(index) { - return null; -}; + isEmpty() { + return true; + } -EmptyPredictionContext.prototype.getReturnState = function(index) { - return this.returnState; -}; + getParent(index) { + return null; + } -EmptyPredictionContext.prototype.equals = function(other) { - return this === other; -}; + getReturnState(index) { + return this.returnState; + } + + equals(other) { + return this === other; + } + + toString() { + return "$"; + } +} -EmptyPredictionContext.prototype.toString = function() { - return "$"; -}; PredictionContext.EMPTY = new EmptyPredictionContext(); -function ArrayPredictionContext(parents, returnStates) { - // Parent can be null only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // null parent and - // returnState == {@link //EMPTY_RETURN_STATE}. - var h = new Hash(); - h.update(parents, returnStates); - var hashCode = h.finish(); - PredictionContext.call(this, hashCode); - this.parents = parents; - this.returnStates = returnStates; - return this; -} +class ArrayPredictionContext extends PredictionContext { -ArrayPredictionContext.prototype = Object.create(PredictionContext.prototype); -ArrayPredictionContext.prototype.constructor = ArrayPredictionContext; + constructor(parents, returnStates) { + /** + * Parent can be null only if full ctx mode and we make an array + * from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + * null parent and + * returnState == {@link //EMPTY_RETURN_STATE}. + */ + const h = new Hash(); + h.update(parents, returnStates); + const hashCode = h.finish(); + super(hashCode); + this.parents = parents; + this.returnStates = returnStates; + return this; + } -ArrayPredictionContext.prototype.isEmpty = function() { - // since EMPTY_RETURN_STATE can only appear in the last position, we - // don't need to verify that size==1 - return this.returnStates[0] === PredictionContext.EMPTY_RETURN_STATE; -}; + isEmpty() { + // since EMPTY_RETURN_STATE can only appear in the last position, we + // don't need to verify that size==1 + return this.returnStates[0] === PredictionContext.EMPTY_RETURN_STATE; + } -Object.defineProperty(ArrayPredictionContext.prototype, "length", { - get : function() { + getParent(index) { + return this.parents[index]; + } + + getReturnState(index) { + return this.returnStates[index]; + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof ArrayPredictionContext)) { + return false; + } else if (this.hashCode() !== other.hashCode()) { + return false; // can't be same if hash is different + } else { + return equalArrays(this.returnStates, other.returnStates) && + equalArrays(this.parents, other.parents); + } + } + + toString() { + if (this.isEmpty()) { + return "[]"; + } else { + let s = "["; + for (let i = 0; i < this.returnStates.length; i++) { + if (i > 0) { + s = s + ", "; + } + if (this.returnStates[i] === PredictionContext.EMPTY_RETURN_STATE) { + s = s + "$"; + continue; + } + s = s + this.returnStates[i]; + if (this.parents[i] !== null) { + s = s + " " + this.parents[i]; + } else { + s = s + "null"; + } + } + return s + "]"; + } + } + + get length(){ return this.returnStates.length; } -}); +} -ArrayPredictionContext.prototype.getParent = function(index) { - return this.parents[index]; -}; -ArrayPredictionContext.prototype.getReturnState = function(index) { - return this.returnStates[index]; -}; - -ArrayPredictionContext.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof ArrayPredictionContext)) { - return false; - } else if (this.hashCode() !== other.hashCode()) { - return false; // can't be same if hash is different - } else { - return this.returnStates === other.returnStates && - this.parents === other.parents; - } -}; - -ArrayPredictionContext.prototype.toString = function() { - if (this.isEmpty()) { - return "[]"; - } else { - var s = "["; - for (var i = 0; i < this.returnStates.length; i++) { - if (i > 0) { - s = s + ", "; - } - if (this.returnStates[i] === PredictionContext.EMPTY_RETURN_STATE) { - s = s + "$"; - continue; - } - s = s + this.returnStates[i]; - if (this.parents[i] !== null) { - s = s + " " + this.parents[i]; - } else { - s = s + "null"; - } - } - return s + "]"; - } -}; - -// Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or null. -// / +/** + * Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. + * Return {@link //EMPTY} if {@code outerContext} is empty or null. + */ function predictionContextFromRuleContext(atn, outerContext) { if (outerContext === undefined || outerContext === null) { outerContext = RuleContext.EMPTY; @@ -297,14 +303,14 @@ function predictionContextFromRuleContext(atn, outerContext) { return PredictionContext.EMPTY; } // If we have a parent, convert it to a PredictionContext graph - var parent = predictionContextFromRuleContext(atn, outerContext.parentCtx); - var state = atn.states[outerContext.invokingState]; - var transition = state.transitions[0]; + const parent = predictionContextFromRuleContext(atn, outerContext.parentCtx); + const state = atn.states[outerContext.invokingState]; + const transition = state.transitions[0]; return SingletonPredictionContext.create(parent, transition.followState.stateNumber); } /* function calculateListsHashString(parents, returnStates) { - var s = ""; + const s = ""; parents.map(function(p) { s = s + p; }); @@ -342,40 +348,40 @@ function merge(a, b, rootIsWildcard, mergeCache) { return mergeArrays(a, b, rootIsWildcard, mergeCache); } -// -// Merge two {@link SingletonPredictionContext} instances. -// -//

            Stack tops equal, parents merge is same; return left graph.
            -//

            -// -//

            Same stack top, parents differ; merge parents giving array node, then -// remainders of those graphs. A new root node is created to point to the -// merged parents.
            -//

            -// -//

            Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
            -//

            -// -//

            Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
            -//

            -// -// @param a the first {@link SingletonPredictionContext} -// @param b the second {@link SingletonPredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / +/** + * Merge two {@link SingletonPredictionContext} instances. + * + *

            Stack tops equal, parents merge is same; return left graph.
            + *

            + * + *

            Same stack top, parents differ; merge parents giving array node, then + * remainders of those graphs. A new root node is created to point to the + * merged parents.
            + *

            + * + *

            Different stack tops pointing to same parent. Make array node for the + * root where both element in the root point to the same (original) + * parent.
            + *

            + * + *

            Different stack tops pointing to different parents. Make array node for + * the root where each element points to the corresponding original + * parent.
            + *

            + * + * @param a the first {@link SingletonPredictionContext} + * @param b the second {@link SingletonPredictionContext} + * @param rootIsWildcard {@code true} if this is a local-context merge, + * otherwise false to indicate a full-context merge + * @param mergeCache + */ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { if (mergeCache !== null) { - var previous = mergeCache.get(a, b); + let previous = mergeCache.get(a, b); if (previous !== null) { return previous; } @@ -385,7 +391,7 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { } } - var rootMerge = mergeRoot(a, b, rootIsWildcard); + const rootMerge = mergeRoot(a, b, rootIsWildcard); if (rootMerge !== null) { if (mergeCache !== null) { mergeCache.set(a, b, rootMerge); @@ -393,7 +399,7 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { return rootMerge; } if (a.returnState === b.returnState) { - var parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache); + const parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache); // if parent is same as existing a or b parent or reduced to a parent, // return it if (parent === a.parentCtx) { @@ -406,14 +412,14 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { // merge parents x and y, giving array node with x,y then remainders // of those graphs. dup a, a' points at merged array // new joined parent so create new singleton pointing to it, a' - var spc = SingletonPredictionContext.create(parent, a.returnState); + const spc = SingletonPredictionContext.create(parent, a.returnState); if (mergeCache !== null) { mergeCache.set(a, b, spc); } return spc; } else { // a != b payloads differ // see if we can collapse parents due to $+x parents if local ctx - var singleParent = null; + let singleParent = null; if (a === b || (a.parentCtx !== null && a.parentCtx === b.parentCtx)) { // ax + // bx = // [a,b]x @@ -421,13 +427,13 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { } if (singleParent !== null) { // parents are same // sort payloads and use same parent - var payloads = [ a.returnState, b.returnState ]; + const payloads = [ a.returnState, b.returnState ]; if (a.returnState > b.returnState) { payloads[0] = b.returnState; payloads[1] = a.returnState; } - var parents = [ singleParent, singleParent ]; - var apc = new ArrayPredictionContext(parents, payloads); + const parents = [ singleParent, singleParent ]; + const apc = new ArrayPredictionContext(parents, payloads); if (mergeCache !== null) { mergeCache.set(a, b, apc); } @@ -436,14 +442,14 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { // parents differ and can't merge them. Just pack together // into array; can't merge. // ax + by = [ax,by] - var payloads = [ a.returnState, b.returnState ]; - var parents = [ a.parentCtx, b.parentCtx ]; + const payloads = [ a.returnState, b.returnState ]; + let parents = [ a.parentCtx, b.parentCtx ]; if (a.returnState > b.returnState) { // sort by payload payloads[0] = b.returnState; payloads[1] = a.returnState; parents = [ b.parentCtx, a.parentCtx ]; } - var a_ = new ArrayPredictionContext(parents, payloads); + const a_ = new ArrayPredictionContext(parents, payloads); if (mergeCache !== null) { mergeCache.set(a, b, a_); } @@ -451,44 +457,44 @@ function mergeSingletons(a, b, rootIsWildcard, mergeCache) { } } -// -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

            Local-Context Merges

            -// -//

            These local-context merge operations are used when {@code rootIsWildcard} -// is true.

            -// -//

            {@link //EMPTY} is superset of any graph; return {@link //EMPTY}.
            -//

            -// -//

            {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY}; return left graph.
            -//

            -// -//

            Special case of last merge if local context.
            -//

            -// -//

            Full-Context Merges

            -// -//

            These full-context merge operations are used when {@code rootIsWildcard} -// is false.

            -// -//

            -// -//

            Must keep all contexts; {@link //EMPTY} in array is a special value (and -// null parent).
            -//

            -// -//

            -// -// @param a the first {@link SingletonPredictionContext} -// @param b the second {@link SingletonPredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / +/** + * Handle case where at least one of {@code a} or {@code b} is + * {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used + * to represent {@link //EMPTY}. + * + *

            Local-Context Merges

            + * + *

            These local-context merge operations are used when {@code rootIsWildcard} + * is true.

            + * + *

            {@link //EMPTY} is superset of any graph; return {@link //EMPTY}.
            + *

            + * + *

            {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is + * {@code //EMPTY}; return left graph.
            + *

            + * + *

            Special case of last merge if local context.
            + *

            + * + *

            Full-Context Merges

            + * + *

            These full-context merge operations are used when {@code rootIsWildcard} + * is false.

            + * + *

            + * + *

            Must keep all contexts; {@link //EMPTY} in array is a special value (and + * null parent).
            + *

            + * + *

            + * + * @param a the first {@link SingletonPredictionContext} + * @param b the second {@link SingletonPredictionContext} + * @param rootIsWildcard {@code true} if this is a local-context merge, + * otherwise false to indicate a full-context merge + */ function mergeRoot(a, b, rootIsWildcard) { if (rootIsWildcard) { if (a === PredictionContext.EMPTY) { @@ -501,42 +507,42 @@ function mergeRoot(a, b, rootIsWildcard) { if (a === PredictionContext.EMPTY && b === PredictionContext.EMPTY) { return PredictionContext.EMPTY; // $ + $ = $ } else if (a === PredictionContext.EMPTY) { // $ + x = [$,x] - var payloads = [ b.returnState, + const payloads = [ b.returnState, PredictionContext.EMPTY_RETURN_STATE ]; - var parents = [ b.parentCtx, null ]; + const parents = [ b.parentCtx, null ]; return new ArrayPredictionContext(parents, payloads); } else if (b === PredictionContext.EMPTY) { // x + $ = [$,x] ($ is always first if present) - var payloads = [ a.returnState, PredictionContext.EMPTY_RETURN_STATE ]; - var parents = [ a.parentCtx, null ]; + const payloads = [ a.returnState, PredictionContext.EMPTY_RETURN_STATE ]; + const parents = [ a.parentCtx, null ]; return new ArrayPredictionContext(parents, payloads); } } return null; } -// -// Merge two {@link ArrayPredictionContext} instances. -// -//

            Different tops, different parents.
            -//

            -// -//

            Shared top, same parents.
            -//

            -// -//

            Shared top, different parents.
            -//

            -// -//

            Shared top, all shared parents.
            -//

            -// -//

            Equal tops, merge parents and reduce top to -// {@link SingletonPredictionContext}.
            -//

            -// / +/** + * Merge two {@link ArrayPredictionContext} instances. + * + *

            Different tops, different parents.
            + *

            + * + *

            Shared top, same parents.
            + *

            + * + *

            Shared top, different parents.
            + *

            + * + *

            Shared top, all shared parents.
            + *

            + * + *

            Equal tops, merge parents and reduce top to + * {@link SingletonPredictionContext}.
            + *

            + */ function mergeArrays(a, b, rootIsWildcard, mergeCache) { if (mergeCache !== null) { - var previous = mergeCache.get(a, b); + let previous = mergeCache.get(a, b); if (previous !== null) { return previous; } @@ -546,31 +552,30 @@ function mergeArrays(a, b, rootIsWildcard, mergeCache) { } } // merge sorted payloads a + b => M - var i = 0; // walks a - var j = 0; // walks b - var k = 0; // walks target M array + let i = 0; // walks a + let j = 0; // walks b + let k = 0; // walks target M array - var mergedReturnStates = []; - var mergedParents = []; + let mergedReturnStates = []; + let mergedParents = []; // walk and merge to yield mergedParents, mergedReturnStates while (i < a.returnStates.length && j < b.returnStates.length) { - var a_parent = a.parents[i]; - var b_parent = b.parents[j]; - if (a.returnStates[i] === b.returnStates[j]) { + const a_parent = a.parents[i]; + const b_parent = b.parents[j]; + if (equalArrays(a.returnStates[i], b.returnStates[j])) { // same payload (stack tops are equal), must yield merged singleton - var payload = a.returnStates[i]; + const payload = a.returnStates[i]; // $+$ = $ - var bothDollars = payload === PredictionContext.EMPTY_RETURN_STATE && + const bothDollars = payload === PredictionContext.EMPTY_RETURN_STATE && a_parent === null && b_parent === null; - var ax_ax = (a_parent !== null && b_parent !== null && a_parent === b_parent); // ax+ax + const ax_ax = (a_parent !== null && b_parent !== null && a_parent === b_parent); // ax+ax // -> // ax if (bothDollars || ax_ax) { mergedParents[k] = a_parent; // choose left mergedReturnStates[k] = payload; } else { // ax+ay -> a'[x,y] - var mergedParent = merge(a_parent, b_parent, rootIsWildcard, mergeCache); - mergedParents[k] = mergedParent; + mergedParents[k] = merge(a_parent, b_parent, rootIsWildcard, mergeCache); mergedReturnStates[k] = payload; } i += 1; // hop over left one as usual @@ -588,13 +593,13 @@ function mergeArrays(a, b, rootIsWildcard, mergeCache) { } // copy over any payloads remaining in either array if (i < a.returnStates.length) { - for (var p = i; p < a.returnStates.length; p++) { + for (let p = i; p < a.returnStates.length; p++) { mergedParents[k] = a.parents[p]; mergedReturnStates[k] = a.returnStates[p]; k += 1; } } else { - for (var p = j; p < b.returnStates.length; p++) { + for (let p = j; p < b.returnStates.length; p++) { mergedParents[k] = b.parents[p]; mergedReturnStates[k] = b.returnStates[p]; k += 1; @@ -603,7 +608,7 @@ function mergeArrays(a, b, rootIsWildcard, mergeCache) { // trim merged if we combined a few that had same stack tops if (k < mergedParents.length) { // write index < last position; trim if (k === 1) { // for just one merged element, return singleton top - var a_ = SingletonPredictionContext.create(mergedParents[0], + const a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0]); if (mergeCache !== null) { mergeCache.set(a, b, a_); @@ -614,7 +619,7 @@ function mergeArrays(a, b, rootIsWildcard, mergeCache) { mergedReturnStates = mergedReturnStates.slice(0, k); } - var M = new ArrayPredictionContext(mergedParents, mergedReturnStates); + const M = new ArrayPredictionContext(mergedParents, mergedReturnStates); // if we created same array as a or b, return that instead // TODO: track whether this is possible above during merge sort for speed @@ -638,20 +643,20 @@ function mergeArrays(a, b, rootIsWildcard, mergeCache) { return M; } -// -// Make pass over all M {@code parents}; merge any {@code equals()} -// ones. -// / +/** + * Make pass over all M {@code parents}; merge any {@code equals()} + * ones. + */ function combineCommonParents(parents) { - var uniqueParents = new Map(); + const uniqueParents = new Map(); - for (var p = 0; p < parents.length; p++) { - var parent = parents[p]; + for (let p = 0; p < parents.length; p++) { + const parent = parents[p]; if (!(uniqueParents.containsKey(parent))) { uniqueParents.put(parent, parent); } } - for (var q = 0; q < parents.length; q++) { + for (let q = 0; q < parents.length; q++) { parents[q] = uniqueParents.get(parents[q]); } } @@ -660,7 +665,7 @@ function getCachedPredictionContext(context, contextCache, visited) { if (context.isEmpty()) { return context; } - var existing = visited.get(context) || null; + let existing = visited.get(context) || null; if (existing !== null) { return existing; } @@ -669,14 +674,14 @@ function getCachedPredictionContext(context, contextCache, visited) { visited.put(context, existing); return existing; } - var changed = false; - var parents = []; - for (var i = 0; i < parents.length; i++) { - var parent = getCachedPredictionContext(context.getParent(i), contextCache, visited); + let changed = false; + let parents = []; + for (let i = 0; i < parents.length; i++) { + const parent = getCachedPredictionContext(context.getParent(i), contextCache, visited); if (changed || parent !== context.getParent(i)) { if (!changed) { parents = []; - for (var j = 0; j < context.length; j++) { + for (let j = 0; j < context.length; j++) { parents[j] = context.getParent(j); } changed = true; @@ -689,7 +694,7 @@ function getCachedPredictionContext(context, contextCache, visited) { visited.put(context, context); return context; } - var updated = null; + let updated = null; if (parents.length === 0) { updated = PredictionContext.EMPTY; } else if (parents.length === 1) { @@ -719,16 +724,18 @@ function getAllContextNodes(context, nodes, visited) { } visited.put(context, context); nodes.push(context); - for (var i = 0; i < context.length; i++) { + for (let i = 0; i < context.length; i++) { getAllContextNodes(context.getParent(i), nodes, visited); } return nodes; } } -exports.merge = merge; -exports.PredictionContext = PredictionContext; -exports.PredictionContextCache = PredictionContextCache; -exports.SingletonPredictionContext = SingletonPredictionContext; -exports.predictionContextFromRuleContext = predictionContextFromRuleContext; -exports.getCachedPredictionContext = getCachedPredictionContext; +module.exports = { + merge, + PredictionContext, + PredictionContextCache, + SingletonPredictionContext, + predictionContextFromRuleContext, + getCachedPredictionContext +} diff --git a/runtime/JavaScript/src/antlr4/README.md b/runtime/JavaScript/src/antlr4/README.md deleted file mode 100644 index 5205fd423..000000000 --- a/runtime/JavaScript/src/antlr4/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# JavaScript target for ANTLR 4 - -JavaScript runtime libraries for ANTLR 4 - -This runtime is available through npm. The package name is 'antlr4'. - -This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE. - -See www.antlr.org for more information on ANTLR - -See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript - - diff --git a/runtime/JavaScript/src/antlr4/Recognizer.js b/runtime/JavaScript/src/antlr4/Recognizer.js index bc2392529..6116b87c3 100644 --- a/runtime/JavaScript/src/antlr4/Recognizer.js +++ b/runtime/JavaScript/src/antlr4/Recognizer.js @@ -1,147 +1,136 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -var Token = require('./Token').Token; -var ConsoleErrorListener = require('./error/ErrorListener').ConsoleErrorListener; -var ProxyErrorListener = require('./error/ErrorListener').ProxyErrorListener; +const {Token} = require('./Token'); +const {ConsoleErrorListener} = require('./error/ErrorListener'); +const {ProxyErrorListener} = require('./error/ErrorListener'); -function Recognizer() { - this._listeners = [ ConsoleErrorListener.INSTANCE ]; - this._interp = null; - this._stateNumber = -1; - return this; +class Recognizer { + constructor() { + this._listeners = [ ConsoleErrorListener.INSTANCE ]; + this._interp = null; + this._stateNumber = -1; + } + + checkVersion(toolVersion) { + const runtimeVersion = "4.8"; + if (runtimeVersion!==toolVersion) { + console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion); + } + } + + addErrorListener(listener) { + this._listeners.push(listener); + } + + removeErrorListeners() { + this._listeners = []; + } + + getTokenTypeMap() { + const tokenNames = this.getTokenNames(); + if (tokenNames===null) { + throw("The current recognizer does not provide a list of token names."); + } + let result = this.tokenTypeMapCache[tokenNames]; + if(result===undefined) { + result = tokenNames.reduce(function(o, k, i) { o[k] = i; }); + result.EOF = Token.EOF; + this.tokenTypeMapCache[tokenNames] = result; + } + return result; + } + + /** + * Get a map from rule names to rule indexes. + *

            Used for XPath and tree pattern compilation.

            + */ + getRuleIndexMap() { + const ruleNames = this.ruleNames; + if (ruleNames===null) { + throw("The current recognizer does not provide a list of rule names."); + } + let result = this.ruleIndexMapCache[ruleNames]; // todo: should it be Recognizer.ruleIndexMapCache ? + if(result===undefined) { + result = ruleNames.reduce(function(o, k, i) { o[k] = i; }); + this.ruleIndexMapCache[ruleNames] = result; + } + return result; + } + + getTokenType(tokenName) { + const ttype = this.getTokenTypeMap()[tokenName]; + if (ttype !==undefined) { + return ttype; + } else { + return Token.INVALID_TYPE; + } + } + + // What is the error header, normally line/character position information? + getErrorHeader(e) { + const line = e.getOffendingToken().line; + const column = e.getOffendingToken().column; + return "line " + line + ":" + column; + } + + /** + * How should a token be displayed in an error message? The default + * is to display just the text, but during development you might + * want to have a lot of information spit out. Override in that case + * to use t.toString() (which, for CommonToken, dumps everything about + * the token). This is better than forcing you to override a method in + * your token objects because you don't have to go modify your lexer + * so that it creates a new Java type. + * + * @deprecated This method is not called by the ANTLR 4 Runtime. Specific + * implementations of {@link ANTLRErrorStrategy} may provide a similar + * feature when necessary. For example, see + * {@link DefaultErrorStrategy//getTokenErrorDisplay}.*/ + getTokenErrorDisplay(t) { + if (t===null) { + return ""; + } + let s = t.text; + if (s===null) { + if (t.type===Token.EOF) { + s = ""; + } else { + s = "<" + t.type + ">"; + } + } + s = s.replace("\n","\\n").replace("\r","\\r").replace("\t","\\t"); + return "'" + s + "'"; + } + + getErrorListenerDispatch() { + return new ProxyErrorListener(this._listeners); + } + + /** + * subclass needs to override these if there are sempreds or actions + * that the ATN interp needs to execute + */ + sempred(localctx, ruleIndex, actionIndex) { + return true; + } + + precpred(localctx , precedence) { + return true; + } + + get state(){ + return this._stateNumber; + } + + set state(state) { + this._stateNumber = state; + } } Recognizer.tokenTypeMapCache = {}; Recognizer.ruleIndexMapCache = {}; - -Recognizer.prototype.checkVersion = function(toolVersion) { - var runtimeVersion = "4.7.2"; - if (runtimeVersion!==toolVersion) { - console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion); - } -}; - -Recognizer.prototype.addErrorListener = function(listener) { - this._listeners.push(listener); -}; - -Recognizer.prototype.removeErrorListeners = function() { - this._listeners = []; -}; - -Recognizer.prototype.getTokenTypeMap = function() { - var tokenNames = this.getTokenNames(); - if (tokenNames===null) { - throw("The current recognizer does not provide a list of token names."); - } - var result = this.tokenTypeMapCache[tokenNames]; - if(result===undefined) { - result = tokenNames.reduce(function(o, k, i) { o[k] = i; }); - result.EOF = Token.EOF; - this.tokenTypeMapCache[tokenNames] = result; - } - return result; -}; - -// Get a map from rule names to rule indexes. -// -//

            Used for XPath and tree pattern compilation.

            -// -Recognizer.prototype.getRuleIndexMap = function() { - var ruleNames = this.ruleNames; - if (ruleNames===null) { - throw("The current recognizer does not provide a list of rule names."); - } - var result = this.ruleIndexMapCache[ruleNames]; - if(result===undefined) { - result = ruleNames.reduce(function(o, k, i) { o[k] = i; }); - this.ruleIndexMapCache[ruleNames] = result; - } - return result; -}; - -Recognizer.prototype.getTokenType = function(tokenName) { - var ttype = this.getTokenTypeMap()[tokenName]; - if (ttype !==undefined) { - return ttype; - } else { - return Token.INVALID_TYPE; - } -}; - - -// What is the error header, normally line/character position information?// -Recognizer.prototype.getErrorHeader = function(e) { - var line = e.getOffendingToken().line; - var column = e.getOffendingToken().column; - return "line " + line + ":" + column; -}; - - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.toString() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a new Java type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//getTokenErrorDisplay}. -// -Recognizer.prototype.getTokenErrorDisplay = function(t) { - if (t===null) { - return ""; - } - var s = t.text; - if (s===null) { - if (t.type===Token.EOF) { - s = ""; - } else { - s = "<" + t.type + ">"; - } - } - s = s.replace("\n","\\n").replace("\r","\\r").replace("\t","\\t"); - return "'" + s + "'"; -}; - -Recognizer.prototype.getErrorListenerDispatch = function() { - return new ProxyErrorListener(this._listeners); -}; - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -Recognizer.prototype.sempred = function(localctx, ruleIndex, actionIndex) { - return true; -}; - -Recognizer.prototype.precpred = function(localctx , precedence) { - return true; -}; - -//Indicate that the recognizer has changed internal state that is -//consistent with the ATN state passed in. This way we always know -//where we are in the ATN as the parser goes along. The rule -//context objects form a stack that lets us see the stack of -//invoking rules. Combine this and we have complete ATN -//configuration information. - -Object.defineProperty(Recognizer.prototype, "state", { - get : function() { - return this._stateNumber; - }, - set : function(state) { - this._stateNumber = state; - } -}); - - -exports.Recognizer = Recognizer; +module.exports = Recognizer; diff --git a/runtime/JavaScript/src/antlr4/RuleContext.js b/runtime/JavaScript/src/antlr4/RuleContext.js index 02188f8d3..f18355e04 100644 --- a/runtime/JavaScript/src/antlr4/RuleContext.js +++ b/runtime/JavaScript/src/antlr4/RuleContext.js @@ -2,156 +2,159 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with this context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked this. Contrast this with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -/// +const {RuleNode} = require('./tree/Tree'); +const {INVALID_INTERVAL} = require('./tree/Tree'); +const Trees = require('./tree/Trees'); -var RuleNode = require('./tree/Tree').RuleNode; -var INVALID_INTERVAL = require('./tree/Tree').INVALID_INTERVAL; -var INVALID_ALT_NUMBER = require('./atn/ATN').INVALID_ALT_NUMBER; - -function RuleContext(parent, invokingState) { - RuleNode.call(this); - // What context invoked this rule? - this.parentCtx = parent || null; - // What state invoked the rule associated with this context? - // The "return address" is the followState of invokingState - // If parent is null, this should be -1. - this.invokingState = invokingState || -1; - return this; -} - -RuleContext.prototype = Object.create(RuleNode.prototype); -RuleContext.prototype.constructor = RuleContext; - -RuleContext.prototype.depth = function() { - var n = 0; - var p = this; - while (p !== null) { - p = p.parentCtx; - n += 1; +class RuleContext extends RuleNode { + /** A rule context is a record of a single rule invocation. It knows + * which context invoked it, if any. If there is no parent context, then + * naturally the invoking state is not valid. The parent link + * provides a chain upwards from the current rule invocation to the root + * of the invocation tree, forming a stack. We actually carry no + * information about the rule associated with this context (except + * when parsing). We keep only the state number of the invoking state from + * the ATN submachine that invoked this. Contrast this with the s + * pointer inside ParserRuleContext that tracks the current state + * being "executed" for the current rule. + * + * The parent contexts are useful for computing lookahead sets and + * getting error information. + * + * These objects are used during parsing and prediction. + * For the special case of parsers, we use the subclass + * ParserRuleContext. + * + * @see ParserRuleContext + */ + constructor(parent, invokingState) { + // What context invoked this rule? + super(); + this.parentCtx = parent || null; + /** + * What state invoked the rule associated with this context? + * The "return address" is the followState of invokingState + * If parent is null, this should be -1. + */ + this.invokingState = invokingState || -1; } - return n; -}; -// A context is empty if there is no invoking state; meaning nobody call -// current context. -RuleContext.prototype.isEmpty = function() { - return this.invokingState === -1; -}; + depth() { + let n = 0; + let p = this; + while (p !== null) { + p = p.parentCtx; + n += 1; + } + return n; + } + + /** + * A context is empty if there is no invoking state; meaning nobody call + * current context. + */ + isEmpty() { + return this.invokingState === -1; + } // satisfy the ParseTree / SyntaxTree interface - -RuleContext.prototype.getSourceInterval = function() { - return INVALID_INTERVAL; -}; - -RuleContext.prototype.getRuleContext = function() { - return this; -}; - -RuleContext.prototype.getPayload = function() { - return this; -}; - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

            -// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of this -// method. -// / -RuleContext.prototype.getText = function() { - if (this.getChildCount() === 0) { - return ""; - } else { - return this.children.map(function(child) { - return child.getText(); - }).join(""); + getSourceInterval() { + return INVALID_INTERVAL; } -}; -// For rule associated with this parse tree internal node, return -// the outer alternative number used to match the input. Default -// implementation does not compute nor store this alt num. Create -// a subclass of ParserRuleContext with backing field and set -// option contextSuperClass. -// to set it. -RuleContext.prototype.getAltNumber = function() { return INVALID_ALT_NUMBER; } + getRuleContext() { + return this; + } -// Set the outer alternative number for this context node. Default -// implementation does nothing to avoid backing field overhead for -// trees that don't need it. Create -// a subclass of ParserRuleContext with backing field and set -// option contextSuperClass. -RuleContext.prototype.setAltNumber = function(altNumber) { } + getPayload() { + return this; + } -RuleContext.prototype.getChild = function(i) { - return null; -}; - -RuleContext.prototype.getChildCount = function() { - return 0; -}; - -RuleContext.prototype.accept = function(visitor) { - return visitor.visitChildren(this); -}; - -//need to manage circular dependencies, so export now -exports.RuleContext = RuleContext; -var Trees = require('./tree/Trees').Trees; - - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if this is a leaf. -// - -RuleContext.prototype.toStringTree = function(ruleNames, recog) { - return Trees.toStringTree(this, ruleNames, recog); -}; - -RuleContext.prototype.toString = function(ruleNames, stop) { - ruleNames = ruleNames || null; - stop = stop || null; - var p = this; - var s = "["; - while (p !== null && p !== stop) { - if (ruleNames === null) { - if (!p.isEmpty()) { - s += p.invokingState; - } + /** + * Return the combined text of all child nodes. This method only considers + * tokens which have been added to the parse tree. + *

            + * Since tokens on hidden channels (e.g. whitespace or comments) are not + * added to the parse trees, they will not appear in the output of this + * method. + */ + getText() { + if (this.getChildCount() === 0) { + return ""; } else { - var ri = p.ruleIndex; - var ruleName = (ri >= 0 && ri < ruleNames.length) ? ruleNames[ri] - : "" + ri; - s += ruleName; + return this.children.map(function(child) { + return child.getText(); + }).join(""); } - if (p.parentCtx !== null && (ruleNames !== null || !p.parentCtx.isEmpty())) { - s += " "; - } - p = p.parentCtx; } - s += "]"; - return s; -}; + /** + * For rule associated with this parse tree internal node, return + * the outer alternative number used to match the input. Default + * implementation does not compute nor store this alt num. Create + * a subclass of ParserRuleContext with backing field and set + * option contextSuperClass. + * to set it. + */ + getAltNumber() { + // use constant value of ATN.INVALID_ALT_NUMBER to avoid circular dependency + return 0; + } + + /** + * Set the outer alternative number for this context node. Default + * implementation does nothing to avoid backing field overhead for + * trees that don't need it. Create + * a subclass of ParserRuleContext with backing field and set + * option contextSuperClass. + */ + setAltNumber(altNumber) { } + + getChild(i) { + return null; + } + + getChildCount() { + return 0; + } + + accept(visitor) { + return visitor.visitChildren(this); + } + + /** + * Print out a whole tree, not just a node, in LISP format + * (root child1 .. childN). Print just a node if this is a leaf. + */ + toStringTree(ruleNames, recog) { + return Trees.toStringTree(this, ruleNames, recog); + } + + toString(ruleNames, stop) { + ruleNames = ruleNames || null; + stop = stop || null; + let p = this; + let s = "["; + while (p !== null && p !== stop) { + if (ruleNames === null) { + if (!p.isEmpty()) { + s += p.invokingState; + } + } else { + const ri = p.ruleIndex; + const ruleName = (ri >= 0 && ri < ruleNames.length) ? ruleNames[ri] + : "" + ri; + s += ruleName; + } + if (p.parentCtx !== null && (ruleNames !== null || !p.parentCtx.isEmpty())) { + s += " "; + } + p = p.parentCtx; + } + s += "]"; + return s; + } +} + +module.exports = RuleContext; diff --git a/runtime/JavaScript/src/antlr4/Token.js b/runtime/JavaScript/src/antlr4/Token.js index 16952209f..e52c22526 100644 --- a/runtime/JavaScript/src/antlr4/Token.js +++ b/runtime/JavaScript/src/antlr4/Token.js @@ -2,150 +2,148 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. +/** + * A token has properties: text, type, line, character position in the line + * (so we can ignore tabs), token channel, index, and source from which + * we obtained this token. + */ +class Token { + constructor() { + this.source = null; + this.type = null; // token type of the token + this.channel = null; // The parser ignores everything not on DEFAULT_CHANNEL + this.start = null; // optional; return -1 if not implemented. + this.stop = null; // optional; return -1 if not implemented. + this.tokenIndex = null; // from 0..n-1 of the token object in the input stream + this.line = null; // line=1..n of the 1st character + this.column = null; // beginning of the line at which it occurs, 0..n-1 + this._text = null; // text of the token. + } -function Token() { - this.source = null; - this.type = null; // token type of the token - this.channel = null; // The parser ignores everything not on DEFAULT_CHANNEL - this.start = null; // optional; return -1 if not implemented. - this.stop = null; // optional; return -1 if not implemented. - this.tokenIndex = null; // from 0..n-1 of the token object in the input stream - this.line = null; // line=1..n of the 1st character - this.column = null; // beginning of the line at which it occurs, 0..n-1 - this._text = null; // text of the token. - return this; + getTokenSource() { + return this.source[0]; + } + + getInputStream() { + return this.source[1]; + } + + get text(){ + return this._text; + } + + set text(text) { + this._text = text; + } } Token.INVALID_TYPE = 0; -// During lookahead operations, this "token" signifies we hit rule end ATN state -// and did not follow it despite needing to. +/** + * During lookahead operations, this "token" signifies we hit rule end ATN state + * and did not follow it despite needing to. + */ Token.EPSILON = -2; Token.MIN_USER_TOKEN_TYPE = 1; Token.EOF = -1; -// All tokens go to the parser (unless skip() is called in that rule) -// on a particular "channel". The parser tunes to a particular channel -// so that whitespace etc... can go to the parser on a "hidden" channel. - +/** + * All tokens go to the parser (unless skip() is called in that rule) + * on a particular "channel". The parser tunes to a particular channel + * so that whitespace etc... can go to the parser on a "hidden" channel. + */ Token.DEFAULT_CHANNEL = 0; -// Anything on different channel than DEFAULT_CHANNEL is not parsed -// by parser. - +/** + * Anything on different channel than DEFAULT_CHANNEL is not parsed + * by parser. + */ Token.HIDDEN_CHANNEL = 1; -// Explicitly set the text for this token. If {code text} is not -// {@code null}, then {@link //getText} will return this value rather than -// extracting the text from the input. -// -// @param text The explicit text of the token, or {@code null} if the text -// should be obtained from the input along with the start and stop indexes -// of the token. -Object.defineProperty(Token.prototype, "text", { - get : function() { - return this._text; - }, - set : function(text) { - this._text = text; +class CommonToken extends Token { + constructor(source, type, channel, start, stop) { + super(); + this.source = source !== undefined ? source : CommonToken.EMPTY_SOURCE; + this.type = type !== undefined ? type : null; + this.channel = channel !== undefined ? channel : Token.DEFAULT_CHANNEL; + this.start = start !== undefined ? start : -1; + this.stop = stop !== undefined ? stop : -1; + this.tokenIndex = -1; + if (this.source[0] !== null) { + this.line = source[0].line; + this.column = source[0].column; + } else { + this.column = -1; + } } -}); -Token.prototype.getTokenSource = function() { - return this.source[0]; -}; - -Token.prototype.getInputStream = function() { - return this.source[1]; -}; - -function CommonToken(source, type, channel, start, stop) { - Token.call(this); - this.source = source !== undefined ? source : CommonToken.EMPTY_SOURCE; - this.type = type !== undefined ? type : null; - this.channel = channel !== undefined ? channel : Token.DEFAULT_CHANNEL; - this.start = start !== undefined ? start : -1; - this.stop = stop !== undefined ? stop : -1; - this.tokenIndex = -1; - if (this.source[0] !== null) { - this.line = source[0].line; - this.column = source[0].column; - } else { - this.column = -1; + /** + * Constructs a new {@link CommonToken} as a copy of another {@link Token}. + * + *

            + * If {@code oldToken} is also a {@link CommonToken} instance, the newly + * constructed token will share a reference to the {@link //text} field and + * the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will + * be assigned the result of calling {@link //getText}, and {@link //source} + * will be constructed from the result of {@link Token//getTokenSource} and + * {@link Token//getInputStream}.

            + * + * @param oldToken The token to copy. + */ + clone() { + const t = new CommonToken(this.source, this.type, this.channel, this.start, this.stop); + t.tokenIndex = this.tokenIndex; + t.line = this.line; + t.column = this.column; + t.text = this.text; + return t; } - return this; -} -CommonToken.prototype = Object.create(Token.prototype); -CommonToken.prototype.constructor = CommonToken; + toString() { + let txt = this.text; + if (txt !== null) { + txt = txt.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t"); + } else { + txt = ""; + } + return "[@" + this.tokenIndex + "," + this.start + ":" + this.stop + "='" + + txt + "',<" + this.type + ">" + + (this.channel > 0 ? ",channel=" + this.channel : "") + "," + + this.line + ":" + this.column + "]"; + } -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. -CommonToken.EMPTY_SOURCE = [ null, null ]; - -// Constructs a new {@link CommonToken} as a copy of another {@link Token}. -// -//

            -// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //getText}, and {@link //source} -// will be constructed from the result of {@link Token//getTokenSource} and -// {@link Token//getInputStream}.

            -// -// @param oldToken The token to copy. -// -CommonToken.prototype.clone = function() { - var t = new CommonToken(this.source, this.type, this.channel, this.start, - this.stop); - t.tokenIndex = this.tokenIndex; - t.line = this.line; - t.column = this.column; - t.text = this.text; - return t; -}; - -Object.defineProperty(CommonToken.prototype, "text", { - get : function() { + get text(){ if (this._text !== null) { return this._text; } - var input = this.getInputStream(); + const input = this.getInputStream(); if (input === null) { return null; } - var n = input.size; + const n = input.size; if (this.start < n && this.stop < n) { return input.getText(this.start, this.stop); } else { return ""; } - }, - set : function(text) { + } + + set text(text) { this._text = text; } -}); +} -CommonToken.prototype.toString = function() { - var txt = this.text; - if (txt !== null) { - txt = txt.replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/\t/g, "\\t"); - } else { - txt = ""; - } - return "[@" + this.tokenIndex + "," + this.start + ":" + this.stop + "='" + - txt + "',<" + this.type + ">" + - (this.channel > 0 ? ",channel=" + this.channel : "") + "," + - this.line + ":" + this.column + "]"; -}; +/** + * An empty {@link Pair} which is used as the default value of + * {@link //source} for tokens that do not have a source. + */ +CommonToken.EMPTY_SOURCE = [ null, null ]; -exports.Token = Token; -exports.CommonToken = CommonToken; +module.exports = { + Token, + CommonToken +} diff --git a/runtime/JavaScript/src/antlr4/Utils.js b/runtime/JavaScript/src/antlr4/Utils.js index e89772244..99595260f 100644 --- a/runtime/JavaScript/src/antlr4/Utils.js +++ b/runtime/JavaScript/src/antlr4/Utils.js @@ -10,15 +10,15 @@ function arrayToString(a) { String.prototype.seed = String.prototype.seed || Math.round(Math.random() * Math.pow(2, 32)); String.prototype.hashCode = function () { - var remainder, bytes, h1, h1b, c1, c1b, c2, c2b, k1, i, - key = this.toString(); + const key = this.toString(); + let h1b, k1; - remainder = key.length & 3; // key.length % 4 - bytes = key.length - remainder; - h1 = String.prototype.seed; - c1 = 0xcc9e2d51; - c2 = 0x1b873593; - i = 0; + const remainder = key.length & 3; // key.length % 4 + const bytes = key.length - remainder; + let h1 = String.prototype.seed; + const c1 = 0xcc9e2d51; + const c2 = 0x1b873593; + let i = 0; while (i < bytes) { k1 = @@ -66,344 +66,343 @@ String.prototype.hashCode = function () { }; function standardEqualsFunction(a, b) { - return a.equals(b); + return a ? a.equals(b) : a==b; } function standardHashCodeFunction(a) { - return a.hashCode(); + return a ? a.hashCode() : -1; } -function Set(hashFunction, equalsFunction) { - this.data = {}; - this.hashFunction = hashFunction || standardHashCodeFunction; - this.equalsFunction = equalsFunction || standardEqualsFunction; - return this; -} +class Set { + constructor(hashFunction, equalsFunction) { + this.data = {}; + this.hashFunction = hashFunction || standardHashCodeFunction; + this.equalsFunction = equalsFunction || standardEqualsFunction; + } -Object.defineProperty(Set.prototype, "length", { - get: function () { - var l = 0; - for (var key in this.data) { + add(value) { + const hash = this.hashFunction(value); + const key = "hash_" + hash; + if (key in this.data) { + const values = this.data[key]; + for (let i = 0; i < values.length; i++) { + if (this.equalsFunction(value, values[i])) { + return values[i]; + } + } + values.push(value); + return value; + } else { + this.data[key] = [value]; + return value; + } + } + + contains(value) { + return this.get(value) != null; + } + + get(value) { + const hash = this.hashFunction(value); + const key = "hash_" + hash; + if (key in this.data) { + const values = this.data[key]; + for (let i = 0; i < values.length; i++) { + if (this.equalsFunction(value, values[i])) { + return values[i]; + } + } + } + return null; + } + + values() { + let l = []; + for (const key in this.data) { + if (key.indexOf("hash_") === 0) { + l = l.concat(this.data[key]); + } + } + return l; + } + + toString() { + return arrayToString(this.values()); + } + + get length(){ + let l = 0; + for (const key in this.data) { if (key.indexOf("hash_") === 0) { l = l + this.data[key].length; } } return l; } -}); - -Set.prototype.add = function (value) { - var hash = this.hashFunction(value); - var key = "hash_" + hash; - if (key in this.data) { - var values = this.data[key]; - for (var i = 0; i < values.length; i++) { - if (this.equalsFunction(value, values[i])) { - return values[i]; - } - } - values.push(value); - return value; - } else { - this.data[key] = [value]; - return value; - } -}; - -Set.prototype.contains = function (value) { - return this.get(value) != null; -}; - -Set.prototype.get = function (value) { - var hash = this.hashFunction(value); - var key = "hash_" + hash; - if (key in this.data) { - var values = this.data[key]; - for (var i = 0; i < values.length; i++) { - if (this.equalsFunction(value, values[i])) { - return values[i]; - } - } - } - return null; -}; - -Set.prototype.values = function () { - var l = []; - for (var key in this.data) { - if (key.indexOf("hash_") === 0) { - l = l.concat(this.data[key]); - } - } - return l; -}; - -Set.prototype.toString = function () { - return arrayToString(this.values()); -}; - -function BitSet() { - this.data = []; - return this; } -BitSet.prototype.add = function (value) { - this.data[value] = true; -}; -BitSet.prototype.or = function (set) { - var bits = this; - Object.keys(set.data).map(function (alt) { - bits.add(alt); - }); -}; - -BitSet.prototype.remove = function (value) { - delete this.data[value]; -}; - -BitSet.prototype.contains = function (value) { - return this.data[value] === true; -}; - -BitSet.prototype.values = function () { - return Object.keys(this.data); -}; - -BitSet.prototype.minValue = function () { - return Math.min.apply(null, this.values()); -}; - -BitSet.prototype.hashCode = function () { - var hash = new Hash(); - hash.update(this.values()); - return hash.finish(); -}; - -BitSet.prototype.equals = function (other) { - if (!(other instanceof BitSet)) { - return false; +class BitSet { + constructor() { + this.data = []; } - return this.hashCode() === other.hashCode(); -}; -Object.defineProperty(BitSet.prototype, "length", { - get: function () { + add(value) { + this.data[value] = true; + } + + or(set) { + const bits = this; + Object.keys(set.data).map(function (alt) { + bits.add(alt); + }); + } + + remove(value) { + delete this.data[value]; + } + + contains(value) { + return this.data[value] === true; + } + + values() { + return Object.keys(this.data); + } + + minValue() { + return Math.min.apply(null, this.values()); + } + + hashCode() { + const hash = new Hash(); + hash.update(this.values()); + return hash.finish(); + } + + equals(other) { + if (!(other instanceof BitSet)) { + return false; + } + return this.hashCode() === other.hashCode(); + } + + toString() { + return "{" + this.values().join(", ") + "}"; + } + + get length(){ return this.values().length; } -}); - -BitSet.prototype.toString = function () { - return "{" + this.values().join(", ") + "}"; -}; - -function Map(hashFunction, equalsFunction) { - this.data = {}; - this.hashFunction = hashFunction || standardHashCodeFunction; - this.equalsFunction = equalsFunction || standardEqualsFunction; - return this; } -Object.defineProperty(Map.prototype, "length", { - get: function () { - var l = 0; - for (var hashKey in this.data) { + +class Map { + constructor(hashFunction, equalsFunction) { + this.data = {}; + this.hashFunction = hashFunction || standardHashCodeFunction; + this.equalsFunction = equalsFunction || standardEqualsFunction; + } + + put(key, value) { + const hashKey = "hash_" + this.hashFunction(key); + if (hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) { + const oldValue = entry.value; + entry.value = value; + return oldValue; + } + } + entries.push({key:key, value:value}); + return value; + } else { + this.data[hashKey] = [{key:key, value:value}]; + return value; + } + } + + containsKey(key) { + const hashKey = "hash_" + this.hashFunction(key); + if(hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) + return true; + } + } + return false; + } + + get(key) { + const hashKey = "hash_" + this.hashFunction(key); + if(hashKey in this.data) { + const entries = this.data[hashKey]; + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (this.equalsFunction(key, entry.key)) + return entry.value; + } + } + return null; + } + + entries() { + let l = []; + for (const key in this.data) { + if (key.indexOf("hash_") === 0) { + l = l.concat(this.data[key]); + } + } + return l; + } + + getKeys() { + return this.entries().map(function(e) { + return e.key; + }); + } + + getValues() { + return this.entries().map(function(e) { + return e.value; + }); + } + + toString() { + const ss = this.entries().map(function(entry) { + return '{' + entry.key + ':' + entry.value + '}'; + }); + return '[' + ss.join(", ") + ']'; + } + + get length(){ + let l = 0; + for (const hashKey in this.data) { if (hashKey.indexOf("hash_") === 0) { l = l + this.data[hashKey].length; } } return l; } -}); +} -Map.prototype.put = function (key, value) { - var hashKey = "hash_" + this.hashFunction(key); - if (hashKey in this.data) { - var entries = this.data[hashKey]; - for (var i = 0; i < entries.length; i++) { - var entry = entries[i]; - if (this.equalsFunction(key, entry.key)) { - var oldValue = entry.value; - entry.value = value; - return oldValue; + +class AltDict { + constructor() { + this.data = {}; + } + + get(key) { + key = "k-" + key; + if (key in this.data) { + return this.data[key]; + } else { + return null; + } + } + + put(key, value) { + key = "k-" + key; + this.data[key] = value; + } + + values() { + const data = this.data; + const keys = Object.keys(this.data); + return keys.map(function (key) { + return data[key]; + }); + } +} + + +class DoubleDict { + constructor(defaultMapCtor) { + this.defaultMapCtor = defaultMapCtor || Map; + this.cacheMap = new this.defaultMapCtor(); + } + + get(a, b) { + const d = this.cacheMap.get(a) || null; + return d === null ? null : (d.get(b) || null); + } + + set(a, b, o) { + let d = this.cacheMap.get(a) || null; + if (d === null) { + d = new this.defaultMapCtor(); + this.cacheMap.put(a, d); + } + d.put(b, o); + } +} + +class Hash { + constructor() { + this.count = 0; + this.hash = 0; + } + + update() { + for(let i=0;i>> (32 - 15)); + k = k * 0x1B873593; + this.count = this.count + 1; + let hash = this.hash ^ k; + hash = (hash << 13) | (hash >>> (32 - 13)); + hash = hash * 5 + 0xE6546B64; + this.hash = hash; } } - entries.push({key:key, value:value}); - return value; - } else { - this.data[hashKey] = [{key:key, value:value}]; - return value; } -}; -Map.prototype.containsKey = function (key) { - var hashKey = "hash_" + this.hashFunction(key); - if(hashKey in this.data) { - var entries = this.data[hashKey]; - for (var i = 0; i < entries.length; i++) { - var entry = entries[i]; - if (this.equalsFunction(key, entry.key)) - return true; - } + finish() { + let hash = this.hash ^ (this.count * 4); + hash = hash ^ (hash >>> 16); + hash = hash * 0x85EBCA6B; + hash = hash ^ (hash >>> 13); + hash = hash * 0xC2B2AE35; + hash = hash ^ (hash >>> 16); + return hash; } - return false; -}; - -Map.prototype.get = function (key) { - var hashKey = "hash_" + this.hashFunction(key); - if(hashKey in this.data) { - var entries = this.data[hashKey]; - for (var i = 0; i < entries.length; i++) { - var entry = entries[i]; - if (this.equalsFunction(key, entry.key)) - return entry.value; - } - } - return null; -}; - -Map.prototype.entries = function () { - var l = []; - for (var key in this.data) { - if (key.indexOf("hash_") === 0) { - l = l.concat(this.data[key]); - } - } - return l; -}; - - -Map.prototype.getKeys = function () { - return this.entries().map(function(e) { - return e.key; - }); -}; - - -Map.prototype.getValues = function () { - return this.entries().map(function(e) { - return e.value; - }); -}; - - -Map.prototype.toString = function () { - var ss = this.entries().map(function(entry) { - return '{' + entry.key + ':' + entry.value + '}'; - }); - return '[' + ss.join(", ") + ']'; -}; - - -function AltDict() { - this.data = {}; - return this; } - -AltDict.prototype.get = function (key) { - key = "k-" + key; - if (key in this.data) { - return this.data[key]; - } else { - return null; - } -}; - -AltDict.prototype.put = function (key, value) { - key = "k-" + key; - this.data[key] = value; -}; - -AltDict.prototype.values = function () { - var data = this.data; - var keys = Object.keys(this.data); - return keys.map(function (key) { - return data[key]; - }); -}; - -function DoubleDict(defaultMapCtor) { - this.defaultMapCtor = defaultMapCtor || Map; - this.cacheMap = new this.defaultMapCtor(); - return this; -} - -function Hash() { - this.count = 0; - this.hash = 0; - return this; -} - -Hash.prototype.update = function () { - for(var i=0;i>> (32 - 15)); - k = k * 0x1B873593; - this.count = this.count + 1; - var hash = this.hash ^ k; - hash = (hash << 13) | (hash >>> (32 - 13)); - hash = hash * 5 + 0xE6546B64; - this.hash = hash; - } - } -}; - -Hash.prototype.finish = function () { - var hash = this.hash ^ (this.count * 4); - hash = hash ^ (hash >>> 16); - hash = hash * 0x85EBCA6B; - hash = hash ^ (hash >>> 13); - hash = hash * 0xC2B2AE35; - hash = hash ^ (hash >>> 16); - return hash; -}; - function hashStuff() { - var hash = new Hash(); + const hash = new Hash(); hash.update.apply(hash, arguments); return hash.finish(); } -DoubleDict.prototype.get = function (a, b) { - var d = this.cacheMap.get(a) || null; - return d === null ? null : (d.get(b) || null); -}; - -DoubleDict.prototype.set = function (a, b, o) { - var d = this.cacheMap.get(a) || null; - if (d === null) { - d = new this.defaultMapCtor(); - this.cacheMap.put(a, d); - } - d.put(b, o); -}; - function escapeWhitespace(s, escapeSpaces) { s = s.replace(/\t/g, "\\t") @@ -419,33 +418,34 @@ function titleCase(str) { return str.replace(/\w\S*/g, function (txt) { return txt.charAt(0).toUpperCase() + txt.substr(1); }); -}; +} -function equalArrays(a, b) -{ +function equalArrays(a, b) { if (!Array.isArray(a) || !Array.isArray(b)) return false; if (a == b) return true; if (a.length != b.length) return false; - for (var i = 0; i < a.length; i++) { + for (let i = 0; i < a.length; i++) { if (a[i] == b[i]) continue; - if (!a[i].equals(b[i])) + if (!a[i].equals || !a[i].equals(b[i])) return false; } return true; -}; +} -exports.Hash = Hash; -exports.Set = Set; -exports.Map = Map; -exports.BitSet = BitSet; -exports.AltDict = AltDict; -exports.DoubleDict = DoubleDict; -exports.hashStuff = hashStuff; -exports.escapeWhitespace = escapeWhitespace; -exports.arrayToString = arrayToString; -exports.titleCase = titleCase; -exports.equalArrays = equalArrays; +module.exports = { + Hash, + Set, + Map, + BitSet, + AltDict, + DoubleDict, + hashStuff, + escapeWhitespace, + arrayToString, + titleCase, + equalArrays +} diff --git a/runtime/JavaScript/src/antlr4/atn/ATN.js b/runtime/JavaScript/src/antlr4/atn/ATN.js index ec200a50f..6125f4f4a 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATN.js +++ b/runtime/JavaScript/src/antlr4/atn/ATN.js @@ -3,140 +3,154 @@ * can be found in the LICENSE.txt file in the project root. */ -var LL1Analyzer = require('./../LL1Analyzer').LL1Analyzer; -var IntervalSet = require('./../IntervalSet').IntervalSet; +const LL1Analyzer = require('./../LL1Analyzer'); +const {IntervalSet} = require('./../IntervalSet'); +const {Token} = require('./../Token'); -function ATN(grammarType , maxTokenType) { +class ATN { - // Used for runtime deserialization of ATNs from strings/// - // The type of the ATN. - this.grammarType = grammarType; - // The maximum value for any symbol recognized by a transition in the ATN. - this.maxTokenType = maxTokenType; - this.states = []; - // Each subrule/rule is a decision point and we must track them so we - // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... - this.decisionToState = []; - // Maps from rule index to starting state number. - this.ruleToStartState = []; - // Maps from rule index to stop state number. - this.ruleToStopState = null; - this.modeNameToStartState = {}; - // For lexer ATNs, this maps the rule index to the resulting token type. - // For parser ATNs, this maps the rule index to the generated bypass token - // type if the - // {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions} - // deserialization option was specified; otherwise, this is {@code null}. - this.ruleToTokenType = null; - // For lexer ATNs, this is an array of {@link LexerAction} objects which may - // be referenced by action transitions in the ATN. - this.lexerActions = null; - this.modeToStartState = []; + constructor(grammarType , maxTokenType) { + /** + * Used for runtime deserialization of ATNs from strings + * The type of the ATN. + */ + this.grammarType = grammarType; + // The maximum value for any symbol recognized by a transition in the ATN. + this.maxTokenType = maxTokenType; + this.states = []; + /** + * Each subrule/rule is a decision point and we must track them so we + * can go back later and build DFA predictors for them. This includes + * all the rules, subrules, optional blocks, ()+, ()* etc... + */ + this.decisionToState = []; + // Maps from rule index to starting state number. + this.ruleToStartState = []; + // Maps from rule index to stop state number. + this.ruleToStopState = null; + this.modeNameToStartState = {}; + /** + * For lexer ATNs, this maps the rule index to the resulting token type. + * For parser ATNs, this maps the rule index to the generated bypass token + * type if the {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions} + * deserialization option was specified; otherwise, this is {@code null} + */ + this.ruleToTokenType = null; + /** + * For lexer ATNs, this is an array of {@link LexerAction} objects which may + * be referenced by action transitions in the ATN + */ + this.lexerActions = null; + this.modeToStartState = []; + } - return this; -} + /** + * Compute the set of valid tokens that can occur starting in state {@code s}. + * If {@code ctx} is null, the set of tokens will not include what can follow + * the rule surrounding {@code s}. In other words, the set will be + * restricted to tokens reachable staying within {@code s}'s rule + */ + nextTokensInContext(s, ctx) { + const anal = new LL1Analyzer(this); + return anal.LOOK(s, null, ctx); + } -// Compute the set of valid tokens that can occur starting in state {@code s}. -// If {@code ctx} is null, the set of tokens will not include what can follow -// the rule surrounding {@code s}. In other words, the set will be -// restricted to tokens reachable staying within {@code s}'s rule. -ATN.prototype.nextTokensInContext = function(s, ctx) { - var anal = new LL1Analyzer(this); - return anal.LOOK(s, null, ctx); -}; - -// Compute the set of valid tokens that can occur starting in {@code s} and -// staying in same rule. {@link Token//EPSILON} is in set if we reach end of -// rule. -ATN.prototype.nextTokensNoContext = function(s) { - if (s.nextTokenWithinRule !== null ) { + /** + * Compute the set of valid tokens that can occur starting in {@code s} and + * staying in same rule. {@link Token//EPSILON} is in set if we reach end of + * rule + */ + nextTokensNoContext(s) { + if (s.nextTokenWithinRule !== null ) { + return s.nextTokenWithinRule; + } + s.nextTokenWithinRule = this.nextTokensInContext(s, null); + s.nextTokenWithinRule.readOnly = true; return s.nextTokenWithinRule; } - s.nextTokenWithinRule = this.nextTokensInContext(s, null); - s.nextTokenWithinRule.readOnly = true; - return s.nextTokenWithinRule; -}; -ATN.prototype.nextTokens = function(s, ctx) { - if ( ctx===undefined ) { - return this.nextTokensNoContext(s); - } else { - return this.nextTokensInContext(s, ctx); + nextTokens(s, ctx) { + if ( ctx===undefined ) { + return this.nextTokensNoContext(s); + } else { + return this.nextTokensInContext(s, ctx); + } } -}; -ATN.prototype.addState = function( state) { - if ( state !== null ) { - state.atn = this; - state.stateNumber = this.states.length; + addState(state) { + if ( state !== null ) { + state.atn = this; + state.stateNumber = this.states.length; + } + this.states.push(state); } - this.states.push(state); -}; -ATN.prototype.removeState = function( state) { - this.states[state.stateNumber] = null; // just free mem, don't shift states in list -}; - -ATN.prototype.defineDecisionState = function( s) { - this.decisionToState.push(s); - s.decision = this.decisionToState.length-1; - return s.decision; -}; - -ATN.prototype.getDecisionState = function( decision) { - if (this.decisionToState.length===0) { - return null; - } else { - return this.decisionToState[decision]; + removeState(state) { + this.states[state.stateNumber] = null; // just free mem, don't shift states in list } -}; -// Computes the set of input symbols which could follow ATN state number -// {@code stateNumber} in the specified full {@code context}. This method -// considers the complete parser context, but does not evaluate semantic -// predicates (i.e. all predicates encountered during the calculation are -// assumed true). If a path in the ATN exists from the starting state to the -// {@link RuleStopState} of the outermost context without matching any -// symbols, {@link Token//EOF} is added to the returned set. -// -//

            If {@code context} is {@code null}, it is treated as -// {@link ParserRuleContext//EMPTY}.

            -// -// @param stateNumber the ATN state number -// @param context the full parse context -// @return The set of potentially valid input symbols which could follow the -// specified state in the specified context. -// @throws IllegalArgumentException if the ATN does not contain a state with -// number {@code stateNumber} -var Token = require('./../Token').Token; + defineDecisionState(s) { + this.decisionToState.push(s); + s.decision = this.decisionToState.length-1; + return s.decision; + } -ATN.prototype.getExpectedTokens = function( stateNumber, ctx ) { - if ( stateNumber < 0 || stateNumber >= this.states.length ) { - throw("Invalid state number."); + getDecisionState(decision) { + if (this.decisionToState.length===0) { + return null; + } else { + return this.decisionToState[decision]; + } } - var s = this.states[stateNumber]; - var following = this.nextTokens(s); - if (!following.contains(Token.EPSILON)) { - return following; - } - var expected = new IntervalSet(); - expected.addSet(following); - expected.removeOne(Token.EPSILON); - while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { - var invokingState = this.states[ctx.invokingState]; - var rt = invokingState.transitions[0]; - following = this.nextTokens(rt.followState); + + /** + * Computes the set of input symbols which could follow ATN state number + * {@code stateNumber} in the specified full {@code context}. This method + * considers the complete parser context, but does not evaluate semantic + * predicates (i.e. all predicates encountered during the calculation are + * assumed true). If a path in the ATN exists from the starting state to the + * {@link RuleStopState} of the outermost context without matching any + * symbols, {@link Token//EOF} is added to the returned set. + * + *

            If {@code context} is {@code null}, it is treated as + * {@link ParserRuleContext//EMPTY}.

            + * + * @param stateNumber the ATN state number + * @param ctx the full parse context + * + * @return {IntervalSet} The set of potentially valid input symbols which could follow the + * specified state in the specified context. + * + * @throws IllegalArgumentException if the ATN does not contain a state with + * number {@code stateNumber} + */ + getExpectedTokens(stateNumber, ctx ) { + if ( stateNumber < 0 || stateNumber >= this.states.length ) { + throw("Invalid state number."); + } + const s = this.states[stateNumber]; + let following = this.nextTokens(s); + if (!following.contains(Token.EPSILON)) { + return following; + } + const expected = new IntervalSet(); expected.addSet(following); expected.removeOne(Token.EPSILON); - ctx = ctx.parentCtx; + while (ctx !== null && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) { + const invokingState = this.states[ctx.invokingState]; + const rt = invokingState.transitions[0]; + following = this.nextTokens(rt.followState); + expected.addSet(following); + expected.removeOne(Token.EPSILON); + ctx = ctx.parentCtx; + } + if (following.contains(Token.EPSILON)) { + expected.addOne(Token.EOF); + } + return expected; } - if (following.contains(Token.EPSILON)) { - expected.addOne(Token.EOF); - } - return expected; -}; +} ATN.INVALID_ALT_NUMBER = 0; -exports.ATN = ATN; \ No newline at end of file +module.exports = ATN; diff --git a/runtime/JavaScript/src/antlr4/atn/ATNConfig.js b/runtime/JavaScript/src/antlr4/atn/ATNConfig.js index 3a796a031..73e4c86f6 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNConfig.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNConfig.js @@ -1,32 +1,22 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// A tuple: (ATN state, predicted alt, syntactic, semantic context). -// The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) -// chain used to arrive at the state. The semantic context is -// the tree of semantic predicates encountered before reaching -// an ATN state. -/// - -var DecisionState = require('./ATNState').DecisionState; -var SemanticContext = require('./SemanticContext').SemanticContext; -var Hash = require("../Utils").Hash; +const {DecisionState} = require('./ATNState'); +const {SemanticContext} = require('./SemanticContext'); +const {Hash} = require("../Utils"); function checkParams(params, isCfg) { if(params===null) { - var result = { state:null, alt:null, context:null, semanticContext:null }; + const result = { state:null, alt:null, context:null, semanticContext:null }; if(isCfg) { result.reachesIntoOuterContext = 0; } return result; } else { - var props = {}; + const props = {}; props.state = params.state || null; props.alt = (params.alt === undefined) ? null : params.alt; props.context = params.context || null; @@ -39,138 +29,144 @@ function checkParams(params, isCfg) { } } -function ATNConfig(params, config) { - this.checkContext(params, config); - params = checkParams(params); - config = checkParams(config, true); - // The ATN state associated with this configuration/// - this.state = params.state!==null ? params.state : config.state; - // What alt (or lexer rule) is predicted by this configuration/// - this.alt = params.alt!==null ? params.alt : config.alt; - // The stack of invoking states leading to the rule/states associated - // with this config. We track only those contexts pushed during - // execution of the ATN simulator. - this.context = params.context!==null ? params.context : config.context; - this.semanticContext = params.semanticContext!==null ? params.semanticContext : - (config.semanticContext!==null ? config.semanticContext : SemanticContext.NONE); - // We cannot execute predicates dependent upon local context unless - // we know for sure we are in the correct context. Because there is - // no way to do this efficiently, we simply cannot evaluate - // dependent predicates unless we are in the rule that initially - // invokes the ATN simulator. - // - // closure() tracks the depth of how far we dip into the - // outer context: depth > 0. Note that it may not be totally - // accurate depth since I don't ever decrement. TODO: make it a boolean then - this.reachesIntoOuterContext = config.reachesIntoOuterContext; - this.precedenceFilterSuppressed = config.precedenceFilterSuppressed; - return this; +class ATNConfig { + /** + * @param {Object} params A tuple: (ATN state, predicted alt, syntactic, semantic context). + * The syntactic context is a graph-structured stack node whose + * path(s) to the root is the rule invocation(s) + * chain used to arrive at the state. The semantic context is + * the tree of semantic predicates encountered before reaching + * an ATN state + */ + constructor(params, config) { + this.checkContext(params, config); + params = checkParams(params); + config = checkParams(config, true); + // The ATN state associated with this configuration/// + this.state = params.state!==null ? params.state : config.state; + // What alt (or lexer rule) is predicted by this configuration/// + this.alt = params.alt!==null ? params.alt : config.alt; + /** + * The stack of invoking states leading to the rule/states associated + * with this config. We track only those contexts pushed during + * execution of the ATN simulator + */ + this.context = params.context!==null ? params.context : config.context; + this.semanticContext = params.semanticContext!==null ? params.semanticContext : + (config.semanticContext!==null ? config.semanticContext : SemanticContext.NONE); + // TODO: make it a boolean then + /** + * We cannot execute predicates dependent upon local context unless + * we know for sure we are in the correct context. Because there is + * no way to do this efficiently, we simply cannot evaluate + * dependent predicates unless we are in the rule that initially + * invokes the ATN simulator. + * closure() tracks the depth of how far we dip into the + * outer context: depth > 0. Note that it may not be totally + * accurate depth since I don't ever decrement + */ + this.reachesIntoOuterContext = config.reachesIntoOuterContext; + this.precedenceFilterSuppressed = config.precedenceFilterSuppressed; + } + + checkContext(params, config) { + if((params.context===null || params.context===undefined) && + (config===null || config.context===null || config.context===undefined)) { + this.context = null; + } + } + + hashCode() { + const hash = new Hash(); + this.updateHashCode(hash); + return hash.finish(); + } + + updateHashCode(hash) { + hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext); + } + + /** + * An ATN configuration is equal to another if both have + * the same state, they predict the same alternative, and + * syntactic/semantic contexts are the same + */ + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof ATNConfig)) { + return false; + } else { + return this.state.stateNumber===other.state.stateNumber && + this.alt===other.alt && + (this.context===null ? other.context===null : this.context.equals(other.context)) && + this.semanticContext.equals(other.semanticContext) && + this.precedenceFilterSuppressed===other.precedenceFilterSuppressed; + } + } + + hashCodeForConfigSet() { + const hash = new Hash(); + hash.update(this.state.stateNumber, this.alt, this.semanticContext); + return hash.finish(); + } + + equalsForConfigSet(other) { + if (this === other) { + return true; + } else if (! (other instanceof ATNConfig)) { + return false; + } else { + return this.state.stateNumber===other.state.stateNumber && + this.alt===other.alt && + this.semanticContext.equals(other.semanticContext); + } + } + + toString() { + return "(" + this.state + "," + this.alt + + (this.context!==null ? ",[" + this.context.toString() + "]" : "") + + (this.semanticContext !== SemanticContext.NONE ? + ("," + this.semanticContext.toString()) + : "") + + (this.reachesIntoOuterContext>0 ? + (",up=" + this.reachesIntoOuterContext) + : "") + ")"; + } } -ATNConfig.prototype.checkContext = function(params, config) { - if((params.context===null || params.context===undefined) && - (config===null || config.context===null || config.context===undefined)) { - this.context = null; - } -}; +class LexerATNConfig extends ATNConfig { + constructor(params, config) { + super(params, config); -ATNConfig.prototype.hashCode = function() { - var hash = new Hash(); - this.updateHashCode(hash); - return hash.finish(); -}; - - -ATNConfig.prototype.updateHashCode = function(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext); -}; - -// An ATN configuration is equal to another if both have -// the same state, they predict the same alternative, and -// syntactic/semantic contexts are the same. - -ATNConfig.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - (this.context===null ? other.context===null : this.context.equals(other.context)) && - this.semanticContext.equals(other.semanticContext) && - this.precedenceFilterSuppressed===other.precedenceFilterSuppressed; + // This is the backing field for {@link //getLexerActionExecutor}. + const lexerActionExecutor = params.lexerActionExecutor || null; + this.lexerActionExecutor = lexerActionExecutor || (config!==null ? config.lexerActionExecutor : null); + this.passedThroughNonGreedyDecision = config!==null ? this.checkNonGreedyDecision(config, this.state) : false; + this.hashCodeForConfigSet = LexerATNConfig.prototype.hashCode; + this.equalsForConfigSet = LexerATNConfig.prototype.equals; + return this; } -}; - -ATNConfig.prototype.hashCodeForConfigSet = function() { - var hash = new Hash(); - hash.update(this.state.stateNumber, this.alt, this.semanticContext); - return hash.finish(); -}; - - -ATNConfig.prototype.equalsForConfigSet = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof ATNConfig)) { - return false; - } else { - return this.state.stateNumber===other.state.stateNumber && - this.alt===other.alt && - this.semanticContext.equals(other.semanticContext); + updateHashCode(hash) { + hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext, this.passedThroughNonGreedyDecision, this.lexerActionExecutor); } -}; + equals(other) { + return this === other || + (other instanceof LexerATNConfig && + this.passedThroughNonGreedyDecision == other.passedThroughNonGreedyDecision && + (this.lexerActionExecutor ? this.lexerActionExecutor.equals(other.lexerActionExecutor) : !other.lexerActionExecutor) && + super.equals(other)); + } -ATNConfig.prototype.toString = function() { - return "(" + this.state + "," + this.alt + - (this.context!==null ? ",[" + this.context.toString() + "]" : "") + - (this.semanticContext !== SemanticContext.NONE ? - ("," + this.semanticContext.toString()) - : "") + - (this.reachesIntoOuterContext>0 ? - (",up=" + this.reachesIntoOuterContext) - : "") + ")"; -}; - - -function LexerATNConfig(params, config) { - ATNConfig.call(this, params, config); - - // This is the backing field for {@link //getLexerActionExecutor}. - var lexerActionExecutor = params.lexerActionExecutor || null; - this.lexerActionExecutor = lexerActionExecutor || (config!==null ? config.lexerActionExecutor : null); - this.passedThroughNonGreedyDecision = config!==null ? this.checkNonGreedyDecision(config, this.state) : false; - return this; + checkNonGreedyDecision(source, target) { + return source.passedThroughNonGreedyDecision || + (target instanceof DecisionState) && target.nonGreedy; + } } -LexerATNConfig.prototype = Object.create(ATNConfig.prototype); -LexerATNConfig.prototype.constructor = LexerATNConfig; -LexerATNConfig.prototype.updateHashCode = function(hash) { - hash.update(this.state.stateNumber, this.alt, this.context, this.semanticContext, this.passedThroughNonGreedyDecision, this.lexerActionExecutor); -}; - -LexerATNConfig.prototype.equals = function(other) { - return this === other || - (other instanceof LexerATNConfig && - this.passedThroughNonGreedyDecision == other.passedThroughNonGreedyDecision && - (this.lexerActionExecutor ? this.lexerActionExecutor.equals(other.lexerActionExecutor) : !other.lexerActionExecutor) && - ATNConfig.prototype.equals.call(this, other)); -}; - -LexerATNConfig.prototype.hashCodeForConfigSet = LexerATNConfig.prototype.hashCode; - -LexerATNConfig.prototype.equalsForConfigSet = LexerATNConfig.prototype.equals; - - -LexerATNConfig.prototype.checkNonGreedyDecision = function(source, target) { - return source.passedThroughNonGreedyDecision || - (target instanceof DecisionState) && target.nonGreedy; -}; - -exports.ATNConfig = ATNConfig; -exports.LexerATNConfig = LexerATNConfig; \ No newline at end of file +module.exports.ATNConfig = ATNConfig; +module.exports.LexerATNConfig = LexerATNConfig; diff --git a/runtime/JavaScript/src/antlr4/atn/ATNConfigSet.js b/runtime/JavaScript/src/antlr4/atn/ATNConfigSet.js index 5a45f797e..1b5b46e83 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNConfigSet.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNConfigSet.js @@ -1,21 +1,12 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track -// info about the set, with support for combining similar configurations using a -// graph-structured stack. -/// - -var ATN = require('./ATN').ATN; -var Utils = require('./../Utils'); -var Hash = Utils.Hash; -var Set = Utils.Set; -var SemanticContext = require('./SemanticContext').SemanticContext; -var merge = require('./../PredictionContext').merge; +const ATN = require('./ATN'); +const Utils = require('./../Utils'); +const {SemanticContext} = require('./SemanticContext'); +const {merge} = require('./../PredictionContext'); function hashATNConfig(c) { return c.hashCodeForConfigSet(); @@ -30,224 +21,233 @@ function equalATNConfigs(a, b) { return a.equalsForConfigSet(b); } +/** + * Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track + * info about the set, with support for combining similar configurations using a + * graph-structured stack + */ +class ATNConfigSet { + constructor(fullCtx) { + /** + * The reason that we need this is because we don't want the hash map to use + * the standard hash code and equals. We need all configurations with the + * same + * {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively + * doubles + * the number of objects associated with ATNConfigs. The other solution is + * to + * use a hash table that lets us specify the equals/hashcode operation. + * All configs but hashed by (s, i, _, pi) not including context. Wiped out + * when we go readonly as this set becomes a DFA state + */ + this.configLookup = new Utils.Set(hashATNConfig, equalATNConfigs); + /** + * Indicates that this configuration set is part of a full context + * LL prediction. It will be used to determine how to merge $. With SLL + * it's a wildcard whereas it is not for LL context merge + */ + this.fullCtx = fullCtx === undefined ? true : fullCtx; + /** + * Indicates that the set of configurations is read-only. Do not + * allow any code to manipulate the set; DFA states will point at + * the sets and they must not change. This does not protect the other + * fields; in particular, conflictingAlts is set after + * we've made this readonly + */ + this.readOnly = false; + // Track the elements as they are added to the set; supports get(i)/// + this.configs = []; -function ATNConfigSet(fullCtx) { - // - // The reason that we need this is because we don't want the hash map to use - // the standard hash code and equals. We need all configurations with the - // same - // {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively - // doubles - // the number of objects associated with ATNConfigs. The other solution is - // to - // use a hash table that lets us specify the equals/hashcode operation. - // All configs but hashed by (s, i, _, pi) not including context. Wiped out - // when we go readonly as this set becomes a DFA state. - this.configLookup = new Set(hashATNConfig, equalATNConfigs); - // Indicates that this configuration set is part of a full context - // LL prediction. It will be used to determine how to merge $. With SLL - // it's a wildcard whereas it is not for LL context merge. - this.fullCtx = fullCtx === undefined ? true : fullCtx; - // Indicates that the set of configurations is read-only. Do not - // allow any code to manipulate the set; DFA states will point at - // the sets and they must not change. This does not protect the other - // fields; in particular, conflictingAlts is set after - // we've made this readonly. - this.readOnly = false; - // Track the elements as they are added to the set; supports get(i)/// - this.configs = []; + // TODO: these fields make me pretty uncomfortable but nice to pack up info + // together, saves recomputation + // TODO: can we track conflicts as they are added to save scanning configs + // later? + this.uniqueAlt = 0; + this.conflictingAlts = null; - // TODO: these fields make me pretty uncomfortable but nice to pack up info - // together, saves recomputation - // TODO: can we track conflicts as they are added to save scanning configs - // later? - this.uniqueAlt = 0; - this.conflictingAlts = null; + /** + * Used in parser and lexer. In lexer, it indicates we hit a pred + * while computing a closure operation. Don't make a DFA state from this + */ + this.hasSemanticContext = false; + this.dipsIntoOuterContext = false; - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from this. - this.hasSemanticContext = false; - this.dipsIntoOuterContext = false; - - this.cachedHashCode = -1; - - return this; -} - -// Adding a new config means merging contexts with existing configs for -// {@code (s, i, pi, _)}, where {@code s} is the -// {@link ATNConfig//state}, {@code i} is the {@link ATNConfig//alt}, and -// {@code pi} is the {@link ATNConfig//semanticContext}. We use -// {@code (s,i,pi)} as key. -// -//

            This method updates {@link //dipsIntoOuterContext} and -// {@link //hasSemanticContext} when necessary.

            -// / -ATNConfigSet.prototype.add = function(config, mergeCache) { - if (mergeCache === undefined) { - mergeCache = null; - } - if (this.readOnly) { - throw "This set is readonly"; - } - if (config.semanticContext !== SemanticContext.NONE) { - this.hasSemanticContext = true; - } - if (config.reachesIntoOuterContext > 0) { - this.dipsIntoOuterContext = true; - } - var existing = this.configLookup.add(config); - if (existing === config) { this.cachedHashCode = -1; - this.configs.push(config); // track order here + } + + /** + * Adding a new config means merging contexts with existing configs for + * {@code (s, i, pi, _)}, where {@code s} is the + * {@link ATNConfig//state}, {@code i} is the {@link ATNConfig//alt}, and + * {@code pi} is the {@link ATNConfig//semanticContext}. We use + * {@code (s,i,pi)} as key. + * + *

            This method updates {@link //dipsIntoOuterContext} and + * {@link //hasSemanticContext} when necessary.

            + */ + add(config, mergeCache) { + if (mergeCache === undefined) { + mergeCache = null; + } + if (this.readOnly) { + throw "This set is readonly"; + } + if (config.semanticContext !== SemanticContext.NONE) { + this.hasSemanticContext = true; + } + if (config.reachesIntoOuterContext > 0) { + this.dipsIntoOuterContext = true; + } + const existing = this.configLookup.add(config); + if (existing === config) { + this.cachedHashCode = -1; + this.configs.push(config); // track order here + return true; + } + // a previous (s,i,pi,_), merge with it and save result + const rootIsWildcard = !this.fullCtx; + const merged = merge(existing.context, config.context, rootIsWildcard, mergeCache); + /** + * no need to check for existing.context, config.context in cache + * since only way to create new graphs is "call rule" and here. We + * cache at both places + */ + existing.reachesIntoOuterContext = Math.max( existing.reachesIntoOuterContext, config.reachesIntoOuterContext); + // make sure to preserve the precedence filter suppression during the merge + if (config.precedenceFilterSuppressed) { + existing.precedenceFilterSuppressed = true; + } + existing.context = merged; // replace context; no need to alt mapping return true; } - // a previous (s,i,pi,_), merge with it and save result - var rootIsWildcard = !this.fullCtx; - var merged = merge(existing.context, config.context, rootIsWildcard, mergeCache); - // no need to check for existing.context, config.context in cache - // since only way to create new graphs is "call rule" and here. We - // cache at both places. - existing.reachesIntoOuterContext = Math.max( existing.reachesIntoOuterContext, config.reachesIntoOuterContext); - // make sure to preserve the precedence filter suppression during the merge - if (config.precedenceFilterSuppressed) { - existing.precedenceFilterSuppressed = true; - } - existing.context = merged; // replace context; no need to alt mapping - return true; -}; -ATNConfigSet.prototype.getStates = function() { - var states = new Set(); - for (var i = 0; i < this.configs.length; i++) { - states.add(this.configs[i].state); + getStates() { + const states = new Utils.Set(); + for (let i = 0; i < this.configs.length; i++) { + states.add(this.configs[i].state); + } + return states; } - return states; -}; -ATNConfigSet.prototype.getPredicates = function() { - var preds = []; - for (var i = 0; i < this.configs.length; i++) { - var c = this.configs[i].semanticContext; - if (c !== SemanticContext.NONE) { - preds.push(c.semanticContext); + getPredicates() { + const preds = []; + for (let i = 0; i < this.configs.length; i++) { + const c = this.configs[i].semanticContext; + if (c !== SemanticContext.NONE) { + preds.push(c.semanticContext); + } + } + return preds; + } + + optimizeConfigs(interpreter) { + if (this.readOnly) { + throw "This set is readonly"; + } + if (this.configLookup.length === 0) { + return; + } + for (let i = 0; i < this.configs.length; i++) { + const config = this.configs[i]; + config.context = interpreter.getCachedContext(config.context); } } - return preds; -}; -Object.defineProperty(ATNConfigSet.prototype, "items", { - get : function() { + addAll(coll) { + for (let i = 0; i < coll.length; i++) { + this.add(coll[i]); + } + return false; + } + + equals(other) { + return this === other || + (other instanceof ATNConfigSet && + Utils.equalArrays(this.configs, other.configs) && + this.fullCtx === other.fullCtx && + this.uniqueAlt === other.uniqueAlt && + this.conflictingAlts === other.conflictingAlts && + this.hasSemanticContext === other.hasSemanticContext && + this.dipsIntoOuterContext === other.dipsIntoOuterContext); + } + + hashCode() { + const hash = new Utils.Hash(); + hash.update(this.configs); + return hash.finish(); + } + + updateHashCode(hash) { + if (this.readOnly) { + if (this.cachedHashCode === -1) { + this.cachedHashCode = this.hashCode(); + } + hash.update(this.cachedHashCode); + } else { + hash.update(this.hashCode()); + } + } + + isEmpty() { + return this.configs.length === 0; + } + + contains(item) { + if (this.configLookup === null) { + throw "This method is not implemented for readonly sets."; + } + return this.configLookup.contains(item); + } + + containsFast(item) { + if (this.configLookup === null) { + throw "This method is not implemented for readonly sets."; + } + return this.configLookup.containsFast(item); + } + + clear() { + if (this.readOnly) { + throw "This set is readonly"; + } + this.configs = []; + this.cachedHashCode = -1; + this.configLookup = new Utils.Set(); + } + + setReadonly(readOnly) { + this.readOnly = readOnly; + if (readOnly) { + this.configLookup = null; // can't mod, no need for lookup cache + } + } + + toString() { + return Utils.arrayToString(this.configs) + + (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + + (this.uniqueAlt !== ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + + (this.conflictingAlts !== null ? ",conflictingAlts=" + this.conflictingAlts : "") + + (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : ""); + } + + get items(){ return this.configs; } -}); -ATNConfigSet.prototype.optimizeConfigs = function(interpreter) { - if (this.readOnly) { - throw "This set is readonly"; - } - if (this.configLookup.length === 0) { - return; - } - for (var i = 0; i < this.configs.length; i++) { - var config = this.configs[i]; - config.context = interpreter.getCachedContext(config.context); - } -}; - -ATNConfigSet.prototype.addAll = function(coll) { - for (var i = 0; i < coll.length; i++) { - this.add(coll[i]); - } - return false; -}; - -ATNConfigSet.prototype.equals = function(other) { - return this === other || - (other instanceof ATNConfigSet && - Utils.equalArrays(this.configs, other.configs) && - this.fullCtx === other.fullCtx && - this.uniqueAlt === other.uniqueAlt && - this.conflictingAlts === other.conflictingAlts && - this.hasSemanticContext === other.hasSemanticContext && - this.dipsIntoOuterContext === other.dipsIntoOuterContext); -}; - -ATNConfigSet.prototype.hashCode = function() { - var hash = new Hash(); - hash.update(this.configs); - return hash.finish(); -}; - - -ATNConfigSet.prototype.updateHashCode = function(hash) { - if (this.readOnly) { - if (this.cachedHashCode === -1) { - this.cachedHashCode = this.hashCode(); - } - hash.update(this.cachedHashCode); - } else { - hash.update(this.hashCode()); - } -}; - - -Object.defineProperty(ATNConfigSet.prototype, "length", { - get : function() { + get length(){ return this.configs.length; } -}); - -ATNConfigSet.prototype.isEmpty = function() { - return this.configs.length === 0; -}; - -ATNConfigSet.prototype.contains = function(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.contains(item); -}; - -ATNConfigSet.prototype.containsFast = function(item) { - if (this.configLookup === null) { - throw "This method is not implemented for readonly sets."; - } - return this.configLookup.containsFast(item); -}; - -ATNConfigSet.prototype.clear = function() { - if (this.readOnly) { - throw "This set is readonly"; - } - this.configs = []; - this.cachedHashCode = -1; - this.configLookup = new Set(); -}; - -ATNConfigSet.prototype.setReadonly = function(readOnly) { - this.readOnly = readOnly; - if (readOnly) { - this.configLookup = null; // can't mod, no need for lookup cache - } -}; - -ATNConfigSet.prototype.toString = function() { - return Utils.arrayToString(this.configs) + - (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + - (this.uniqueAlt !== ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + - (this.conflictingAlts !== null ? ",conflictingAlts=" + this.conflictingAlts : "") + - (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : ""); -}; - -function OrderedATNConfigSet() { - ATNConfigSet.call(this); - this.configLookup = new Set(); - return this; } -OrderedATNConfigSet.prototype = Object.create(ATNConfigSet.prototype); -OrderedATNConfigSet.prototype.constructor = OrderedATNConfigSet; -exports.ATNConfigSet = ATNConfigSet; -exports.OrderedATNConfigSet = OrderedATNConfigSet; +class OrderedATNConfigSet extends ATNConfigSet { + constructor() { + super(); + this.configLookup = new Utils.Set(); + } +} + +module.exports = { + ATNConfigSet, + OrderedATNConfigSet +} diff --git a/runtime/JavaScript/src/antlr4/atn/ATNDeserializationOptions.js b/runtime/JavaScript/src/antlr4/atn/ATNDeserializationOptions.js index 5f237b610..dc82dc305 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNDeserializationOptions.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNDeserializationOptions.js @@ -3,15 +3,15 @@ * can be found in the LICENSE.txt file in the project root. */ -function ATNDeserializationOptions(copyFrom) { - if(copyFrom===undefined) { - copyFrom = null; +class ATNDeserializationOptions { + constructor(copyFrom) { + if(copyFrom===undefined) { + copyFrom = null; + } + this.readOnly = false; + this.verifyATN = copyFrom===null ? true : copyFrom.verifyATN; + this.generateRuleBypassTransitions = copyFrom===null ? false : copyFrom.generateRuleBypassTransitions; } - this.readOnly = false; - this.verifyATN = copyFrom===null ? true : copyFrom.verifyATN; - this.generateRuleBypassTransitions = copyFrom===null ? false : copyFrom.generateRuleBypassTransitions; - - return this; } ATNDeserializationOptions.defaultOptions = new ATNDeserializationOptions(); @@ -22,4 +22,4 @@ ATNDeserializationOptions.defaultOptions.readOnly = true; // raise Exception("The object is read only.") // super(type(self), self).__setattr__(key,value) -exports.ATNDeserializationOptions = ATNDeserializationOptions; +module.exports = ATNDeserializationOptions diff --git a/runtime/JavaScript/src/antlr4/atn/ATNDeserializer.js b/runtime/JavaScript/src/antlr4/atn/ATNDeserializer.js index 295c07924..64bb575f9 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNDeserializer.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNDeserializer.js @@ -3,676 +3,681 @@ * can be found in the LICENSE.txt file in the project root. */ -var Token = require('./../Token').Token; -var ATN = require('./ATN').ATN; -var ATNType = require('./ATNType').ATNType; -var ATNStates = require('./ATNState'); -var ATNState = ATNStates.ATNState; -var BasicState = ATNStates.BasicState; -var DecisionState = ATNStates.DecisionState; -var BlockStartState = ATNStates.BlockStartState; -var BlockEndState = ATNStates.BlockEndState; -var LoopEndState = ATNStates.LoopEndState; -var RuleStartState = ATNStates.RuleStartState; -var RuleStopState = ATNStates.RuleStopState; -var TokensStartState = ATNStates.TokensStartState; -var PlusLoopbackState = ATNStates.PlusLoopbackState; -var StarLoopbackState = ATNStates.StarLoopbackState; -var StarLoopEntryState = ATNStates.StarLoopEntryState; -var PlusBlockStartState = ATNStates.PlusBlockStartState; -var StarBlockStartState = ATNStates.StarBlockStartState; -var BasicBlockStartState = ATNStates.BasicBlockStartState; -var Transitions = require('./Transition'); -var Transition = Transitions.Transition; -var AtomTransition = Transitions.AtomTransition; -var SetTransition = Transitions.SetTransition; -var NotSetTransition = Transitions.NotSetTransition; -var RuleTransition = Transitions.RuleTransition; -var RangeTransition = Transitions.RangeTransition; -var ActionTransition = Transitions.ActionTransition; -var EpsilonTransition = Transitions.EpsilonTransition; -var WildcardTransition = Transitions.WildcardTransition; -var PredicateTransition = Transitions.PredicateTransition; -var PrecedencePredicateTransition = Transitions.PrecedencePredicateTransition; -var IntervalSet = require('./../IntervalSet').IntervalSet; -var Interval = require('./../IntervalSet').Interval; -var ATNDeserializationOptions = require('./ATNDeserializationOptions').ATNDeserializationOptions; -var LexerActions = require('./LexerAction'); -var LexerActionType = LexerActions.LexerActionType; -var LexerSkipAction = LexerActions.LexerSkipAction; -var LexerChannelAction = LexerActions.LexerChannelAction; -var LexerCustomAction = LexerActions.LexerCustomAction; -var LexerMoreAction = LexerActions.LexerMoreAction; -var LexerTypeAction = LexerActions.LexerTypeAction; -var LexerPushModeAction = LexerActions.LexerPushModeAction; -var LexerPopModeAction = LexerActions.LexerPopModeAction; -var LexerModeAction = LexerActions.LexerModeAction; +const {Token} = require('./../Token'); +const ATN = require('./ATN'); +const ATNType = require('./ATNType'); + +const { + ATNState, + BasicState, + DecisionState, + BlockStartState, + BlockEndState, + LoopEndState, + RuleStartState, + RuleStopState, + TokensStartState, + PlusLoopbackState, + StarLoopbackState, + StarLoopEntryState, + PlusBlockStartState, + StarBlockStartState, + BasicBlockStartState +} = require('./ATNState'); + +const { + Transition, + AtomTransition, + SetTransition, + NotSetTransition, + RuleTransition, + RangeTransition, + ActionTransition, + EpsilonTransition, + WildcardTransition, + PredicateTransition, + PrecedencePredicateTransition +} = require('./Transition') + +const {IntervalSet} = require('./../IntervalSet'); +const ATNDeserializationOptions = require('./ATNDeserializationOptions'); + +const { + LexerActionType, + LexerSkipAction, + LexerChannelAction, + LexerCustomAction, + LexerMoreAction, + LexerTypeAction, + LexerPushModeAction, + LexerPopModeAction, + LexerModeAction, +} = require('./LexerAction'); + // This is the earliest supported serialized UUID. // stick to serialized version for now, we don't need a UUID instance -var BASE_SERIALIZED_UUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"; +const BASE_SERIALIZED_UUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"; // // This UUID indicates the serialized ATN contains two sets of // IntervalSets, where the second set's values are encoded as // 32-bit integers to support the full Unicode SMP range up to U+10FFFF. // -var ADDED_UNICODE_SMP = "59627784-3BE5-417A-B9EB-8131A7286089"; +const ADDED_UNICODE_SMP = "59627784-3BE5-417A-B9EB-8131A7286089"; // This list contains all of the currently supported UUIDs, ordered by when // the feature first appeared in this branch. -var SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]; +const SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]; -var SERIALIZED_VERSION = 3; +const SERIALIZED_VERSION = 3; // This is the current serialized UUID. -var SERIALIZED_UUID = ADDED_UNICODE_SMP; +const SERIALIZED_UUID = ADDED_UNICODE_SMP; function initArray( length, value) { - var tmp = []; + const tmp = []; tmp[length-1] = value; return tmp.map(function(i) {return value;}); } -function ATNDeserializer (options) { +class ATNDeserializer { + constructor(options) { - if ( options=== undefined || options === null ) { - options = ATNDeserializationOptions.defaultOptions; + if ( options=== undefined || options === null ) { + options = ATNDeserializationOptions.defaultOptions; + } + this.deserializationOptions = options; + this.stateFactories = null; + this.actionFactories = null; } - this.deserializationOptions = options; - this.stateFactories = null; - this.actionFactories = null; - return this; -} - -// Determines if a particular serialized representation of an ATN supports -// a particular feature, identified by the {@link UUID} used for serializing -// the ATN at the time the feature was first introduced. -// -// @param feature The {@link UUID} marking the first time the feature was -// supported in the serialized ATN. -// @param actualUuid The {@link UUID} of the actual serialized ATN which is -// currently being deserialized. -// @return {@code true} if the {@code actualUuid} value represents a -// serialized ATN at or after the feature identified by {@code feature} was -// introduced; otherwise, {@code false}. - -ATNDeserializer.prototype.isFeatureSupported = function(feature, actualUuid) { - var idx1 = SUPPORTED_UUIDS.indexOf(feature); - if (idx1<0) { - return false; + /** + * Determines if a particular serialized representation of an ATN supports + * a particular feature, identified by the {@link UUID} used for serializing + * the ATN at the time the feature was first introduced. + * + * @param feature The {@link UUID} marking the first time the feature was + * supported in the serialized ATN. + * @param actualUuid The {@link UUID} of the actual serialized ATN which is + * currently being deserialized. + * @return {@code true} if the {@code actualUuid} value represents a + * serialized ATN at or after the feature identified by {@code feature} was + * introduced; otherwise, {@code false}. + */ + isFeatureSupported(feature, actualUuid) { + const idx1 = SUPPORTED_UUIDS.indexOf(feature); + if (idx1<0) { + return false; + } + const idx2 = SUPPORTED_UUIDS.indexOf(actualUuid); + return idx2 >= idx1; } - var idx2 = SUPPORTED_UUIDS.indexOf(actualUuid); - return idx2 >= idx1; -}; -ATNDeserializer.prototype.deserialize = function(data) { - this.reset(data); - this.checkVersion(); - this.checkUUID(); - var atn = this.readATN(); - this.readStates(atn); - this.readRules(atn); - this.readModes(atn); - var sets = []; - // First, deserialize sets with 16-bit arguments <= U+FFFF. - this.readSets(atn, sets, this.readInt.bind(this)); - // Next, if the ATN was serialized with the Unicode SMP feature, - // deserialize sets with 32-bit arguments <= U+10FFFF. - if (this.isFeatureSupported(ADDED_UNICODE_SMP, this.uuid)) { - this.readSets(atn, sets, this.readInt32.bind(this)); - } - this.readEdges(atn, sets); - this.readDecisions(atn); - this.readLexerActions(atn); - this.markPrecedenceDecisions(atn); - this.verifyATN(atn); - if (this.deserializationOptions.generateRuleBypassTransitions && atn.grammarType === ATNType.PARSER ) { - this.generateRuleBypassTransitions(atn); - // re-verify after modification + deserialize(data) { + this.reset(data); + this.checkVersion(); + this.checkUUID(); + const atn = this.readATN(); + this.readStates(atn); + this.readRules(atn); + this.readModes(atn); + const sets = []; + // First, deserialize sets with 16-bit arguments <= U+FFFF. + this.readSets(atn, sets, this.readInt.bind(this)); + // Next, if the ATN was serialized with the Unicode SMP feature, + // deserialize sets with 32-bit arguments <= U+10FFFF. + if (this.isFeatureSupported(ADDED_UNICODE_SMP, this.uuid)) { + this.readSets(atn, sets, this.readInt32.bind(this)); + } + this.readEdges(atn, sets); + this.readDecisions(atn); + this.readLexerActions(atn); + this.markPrecedenceDecisions(atn); this.verifyATN(atn); - } - return atn; -}; - -ATNDeserializer.prototype.reset = function(data) { - var adjust = function(c) { - var v = c.charCodeAt(0); - return v>1 ? v-2 : v + 65534; - }; - var temp = data.split("").map(adjust); - // don't adjust the first value since that's the version number - temp[0] = data.charCodeAt(0); - this.data = temp; - this.pos = 0; -}; - -ATNDeserializer.prototype.checkVersion = function() { - var version = this.readInt(); - if ( version !== SERIALIZED_VERSION ) { - throw ("Could not deserialize ATN with version " + version + " (expected " + SERIALIZED_VERSION + ")."); - } -}; - -ATNDeserializer.prototype.checkUUID = function() { - var uuid = this.readUUID(); - if (SUPPORTED_UUIDS.indexOf(uuid)<0) { - throw ("Could not deserialize ATN with UUID: " + uuid + - " (expected " + SERIALIZED_UUID + " or a legacy UUID).", uuid, SERIALIZED_UUID); - } - this.uuid = uuid; -}; - -ATNDeserializer.prototype.readATN = function() { - var grammarType = this.readInt(); - var maxTokenType = this.readInt(); - return new ATN(grammarType, maxTokenType); -}; - -ATNDeserializer.prototype.readStates = function(atn) { - var j, pair, stateNumber; - var loopBackStateNumbers = []; - var endStateNumbers = []; - var nstates = this.readInt(); - for(var i=0; i1 ? v-2 : v + 65534; + }; + const temp = data.split("").map(adjust); + // don't adjust the first value since that's the version number + temp[0] = data.charCodeAt(0); + this.data = temp; + this.pos = 0; } - var numNonGreedyStates = this.readInt(); - for (j=0; j 0) { - bypassStart.addTransition(ruleToStartState.transitions[count-1]); - ruleToStartState.transitions = ruleToStartState.transitions.slice(-1); - } - // link the new states - atn.ruleToStartState[idx].addTransition(new EpsilonTransition(bypassStart)); - bypassStop.addTransition(new EpsilonTransition(endState)); - - var matchState = new BasicState(); - atn.addState(matchState); - matchState.addTransition(new AtomTransition(bypassStop, atn.ruleToTokenType[idx])); - bypassStart.addTransition(new EpsilonTransition(matchState)); -}; - -ATNDeserializer.prototype.stateIsEndStateFor = function(state, idx) { - if ( state.ruleIndex !== idx) { - return null; - } - if (!( state instanceof StarLoopEntryState)) { - return null; - } - var maybeLoopEndState = state.transitions[state.transitions.length - 1].target; - if (!( maybeLoopEndState instanceof LoopEndState)) { - return null; - } - if (maybeLoopEndState.epsilonOnlyTransitions && - (maybeLoopEndState.transitions[0].target instanceof RuleStopState)) { - return state; - } else { - return null; - } -}; - -// -// Analyze the {@link StarLoopEntryState} states in the specified ATN to set -// the {@link StarLoopEntryState//isPrecedenceDecision} field to the -// correct value. -// -// @param atn The ATN. -// -ATNDeserializer.prototype.markPrecedenceDecisions = function(atn) { - for(var i=0; i 0) { + bypassStart.addTransition(ruleToStartState.transitions[count-1]); + ruleToStartState.transitions = ruleToStartState.transitions.slice(-1); } - this.checkCondition(state.epsilonOnlyTransitions || state.transitions.length <= 1); - if (state instanceof PlusBlockStartState) { - this.checkCondition(state.loopBackState !== null); - } else if (state instanceof StarLoopEntryState) { - this.checkCondition(state.loopBackState !== null); - this.checkCondition(state.transitions.length === 2); - if (state.transitions[0].target instanceof StarBlockStartState) { - this.checkCondition(state.transitions[1].target instanceof LoopEndState); - this.checkCondition(!state.nonGreedy); - } else if (state.transitions[0].target instanceof LoopEndState) { - this.checkCondition(state.transitions[1].target instanceof StarBlockStartState); - this.checkCondition(state.nonGreedy); - } else { - throw("IllegalState"); - } - } else if (state instanceof StarLoopbackState) { - this.checkCondition(state.transitions.length === 1); - this.checkCondition(state.transitions[0].target instanceof StarLoopEntryState); - } else if (state instanceof LoopEndState) { - this.checkCondition(state.loopBackState !== null); - } else if (state instanceof RuleStartState) { - this.checkCondition(state.stopState !== null); - } else if (state instanceof BlockStartState) { - this.checkCondition(state.endState !== null); - } else if (state instanceof BlockEndState) { - this.checkCondition(state.startState !== null); - } else if (state instanceof DecisionState) { - this.checkCondition(state.transitions.length <= 1 || state.decision >= 0); + // link the new states + atn.ruleToStartState[idx].addTransition(new EpsilonTransition(bypassStart)); + bypassStop.addTransition(new EpsilonTransition(endState)); + + const matchState = new BasicState(); + atn.addState(matchState); + matchState.addTransition(new AtomTransition(bypassStop, atn.ruleToTokenType[idx])); + bypassStart.addTransition(new EpsilonTransition(matchState)); + } + + stateIsEndStateFor(state, idx) { + if ( state.ruleIndex !== idx) { + return null; + } + if (!( state instanceof StarLoopEntryState)) { + return null; + } + const maybeLoopEndState = state.transitions[state.transitions.length - 1].target; + if (!( maybeLoopEndState instanceof LoopEndState)) { + return null; + } + if (maybeLoopEndState.epsilonOnlyTransitions && + (maybeLoopEndState.transitions[0].target instanceof RuleStopState)) { + return state; } else { - this.checkCondition(state.transitions.length <= 1 || (state instanceof RuleStopState)); + return null; } - } -}; - -ATNDeserializer.prototype.checkCondition = function(condition, message) { - if (!condition) { - if (message === undefined || message===null) { - message = "IllegalState"; - } - throw (message); } -}; -ATNDeserializer.prototype.readInt = function() { - return this.data[this.pos++]; -}; + /** + * Analyze the {@link StarLoopEntryState} states in the specified ATN to set + * the {@link StarLoopEntryState//isPrecedenceDecision} field to the + * correct value. + * @param atn The ATN. + */ + markPrecedenceDecisions(atn) { + for(let i=0; i= 0); + } else { + this.checkCondition(state.transitions.length <= 1 || (state instanceof RuleStopState)); + } + } + } -ATNDeserializer.prototype.readLong = function() { - var low = this.readInt32(); - var high = this.readInt32(); - return (low & 0x00000000FFFFFFFF) | (high << 32); -}; + checkCondition(condition, message) { + if (!condition) { + if (message === undefined || message===null) { + message = "IllegalState"; + } + throw (message); + } + } + + readInt() { + return this.data[this.pos++]; + } + + readInt32() { + const low = this.readInt(); + const high = this.readInt(); + return low | (high << 16); + } + + readLong() { + const low = this.readInt32(); + const high = this.readInt32(); + return (low & 0x00000000FFFFFFFF) | (high << 32); + } + + readUUID() { + const bb = []; + for(let i=7;i>=0;i--) { + const int = this.readInt(); + /* jshint bitwise: false */ + bb[(2*i)+1] = int & 0xFF; + bb[2*i] = (int >> 8) & 0xFF; + } + return byteToHex[bb[0]] + byteToHex[bb[1]] + + byteToHex[bb[2]] + byteToHex[bb[3]] + '-' + + byteToHex[bb[4]] + byteToHex[bb[5]] + '-' + + byteToHex[bb[6]] + byteToHex[bb[7]] + '-' + + byteToHex[bb[8]] + byteToHex[bb[9]] + '-' + + byteToHex[bb[10]] + byteToHex[bb[11]] + + byteToHex[bb[12]] + byteToHex[bb[13]] + + byteToHex[bb[14]] + byteToHex[bb[15]]; + } + + edgeFactory(atn, type, src, trg, arg1, arg2, arg3, sets) { + const target = atn.states[trg]; + switch(type) { + case Transition.EPSILON: + return new EpsilonTransition(target); + case Transition.RANGE: + return arg3 !== 0 ? new RangeTransition(target, Token.EOF, arg2) : new RangeTransition(target, arg1, arg2); + case Transition.RULE: + return new RuleTransition(atn.states[arg1], arg2, arg3, target); + case Transition.PREDICATE: + return new PredicateTransition(target, arg1, arg2, arg3 !== 0); + case Transition.PRECEDENCE: + return new PrecedencePredicateTransition(target, arg1); + case Transition.ATOM: + return arg3 !== 0 ? new AtomTransition(target, Token.EOF) : new AtomTransition(target, arg1); + case Transition.ACTION: + return new ActionTransition(target, arg1, arg2, arg3 !== 0); + case Transition.SET: + return new SetTransition(target, sets[arg1]); + case Transition.NOT_SET: + return new NotSetTransition(target, sets[arg1]); + case Transition.WILDCARD: + return new WildcardTransition(target); + default: + throw "The specified transition type: " + type + " is not valid."; + } + } + + stateFactory(type, ruleIndex) { + if (this.stateFactories === null) { + const sf = []; + sf[ATNState.INVALID_TYPE] = null; + sf[ATNState.BASIC] = () => new BasicState(); + sf[ATNState.RULE_START] = () => new RuleStartState(); + sf[ATNState.BLOCK_START] = () => new BasicBlockStartState(); + sf[ATNState.PLUS_BLOCK_START] = () => new PlusBlockStartState(); + sf[ATNState.STAR_BLOCK_START] = () => new StarBlockStartState(); + sf[ATNState.TOKEN_START] = () => new TokensStartState(); + sf[ATNState.RULE_STOP] = () => new RuleStopState(); + sf[ATNState.BLOCK_END] = () => new BlockEndState(); + sf[ATNState.STAR_LOOP_BACK] = () => new StarLoopbackState(); + sf[ATNState.STAR_LOOP_ENTRY] = () => new StarLoopEntryState(); + sf[ATNState.PLUS_LOOP_BACK] = () => new PlusLoopbackState(); + sf[ATNState.LOOP_END] = () => new LoopEndState(); + this.stateFactories = sf; + } + if (type>this.stateFactories.length || this.stateFactories[type] === null) { + throw("The specified state type " + type + " is not valid."); + } else { + const s = this.stateFactories[type](); + if (s!==null) { + s.ruleIndex = ruleIndex; + return s; + } + } + } + + lexerActionFactory(type, data1, data2) { + if (this.actionFactories === null) { + const af = []; + af[LexerActionType.CHANNEL] = (data1, data2) => new LexerChannelAction(data1); + af[LexerActionType.CUSTOM] = (data1, data2) => new LexerCustomAction(data1, data2); + af[LexerActionType.MODE] = (data1, data2) => new LexerModeAction(data1); + af[LexerActionType.MORE] = (data1, data2) => LexerMoreAction.INSTANCE; + af[LexerActionType.POP_MODE] = (data1, data2) => LexerPopModeAction.INSTANCE; + af[LexerActionType.PUSH_MODE] = (data1, data2) => new LexerPushModeAction(data1); + af[LexerActionType.SKIP] = (data1, data2) => LexerSkipAction.INSTANCE; + af[LexerActionType.TYPE] = (data1, data2) => new LexerTypeAction(data1); + this.actionFactories = af; + } + if (type>this.actionFactories.length || this.actionFactories[type] === null) { + throw("The specified lexer action type " + type + " is not valid."); + } else { + return this.actionFactories[type](data1, data2); + } + } +} function createByteToHex() { - var bth = []; - for (var i = 0; i < 256; i++) { + const bth = []; + for (let i = 0; i < 256; i++) { bth[i] = (i + 0x100).toString(16).substr(1).toUpperCase(); } return bth; } -var byteToHex = createByteToHex(); - -ATNDeserializer.prototype.readUUID = function() { - var bb = []; - for(var i=7;i>=0;i--) { - var int = this.readInt(); - /* jshint bitwise: false */ - bb[(2*i)+1] = int & 0xFF; - bb[2*i] = (int >> 8) & 0xFF; - } - return byteToHex[bb[0]] + byteToHex[bb[1]] + - byteToHex[bb[2]] + byteToHex[bb[3]] + '-' + - byteToHex[bb[4]] + byteToHex[bb[5]] + '-' + - byteToHex[bb[6]] + byteToHex[bb[7]] + '-' + - byteToHex[bb[8]] + byteToHex[bb[9]] + '-' + - byteToHex[bb[10]] + byteToHex[bb[11]] + - byteToHex[bb[12]] + byteToHex[bb[13]] + - byteToHex[bb[14]] + byteToHex[bb[15]]; -}; - -ATNDeserializer.prototype.edgeFactory = function(atn, type, src, trg, arg1, arg2, arg3, sets) { - var target = atn.states[trg]; - switch(type) { - case Transition.EPSILON: - return new EpsilonTransition(target); - case Transition.RANGE: - return arg3 !== 0 ? new RangeTransition(target, Token.EOF, arg2) : new RangeTransition(target, arg1, arg2); - case Transition.RULE: - return new RuleTransition(atn.states[arg1], arg2, arg3, target); - case Transition.PREDICATE: - return new PredicateTransition(target, arg1, arg2, arg3 !== 0); - case Transition.PRECEDENCE: - return new PrecedencePredicateTransition(target, arg1); - case Transition.ATOM: - return arg3 !== 0 ? new AtomTransition(target, Token.EOF) : new AtomTransition(target, arg1); - case Transition.ACTION: - return new ActionTransition(target, arg1, arg2, arg3 !== 0); - case Transition.SET: - return new SetTransition(target, sets[arg1]); - case Transition.NOT_SET: - return new NotSetTransition(target, sets[arg1]); - case Transition.WILDCARD: - return new WildcardTransition(target); - default: - throw "The specified transition type: " + type + " is not valid."; - } -}; - -ATNDeserializer.prototype.stateFactory = function(type, ruleIndex) { - if (this.stateFactories === null) { - var sf = []; - sf[ATNState.INVALID_TYPE] = null; - sf[ATNState.BASIC] = function() { return new BasicState(); }; - sf[ATNState.RULE_START] = function() { return new RuleStartState(); }; - sf[ATNState.BLOCK_START] = function() { return new BasicBlockStartState(); }; - sf[ATNState.PLUS_BLOCK_START] = function() { return new PlusBlockStartState(); }; - sf[ATNState.STAR_BLOCK_START] = function() { return new StarBlockStartState(); }; - sf[ATNState.TOKEN_START] = function() { return new TokensStartState(); }; - sf[ATNState.RULE_STOP] = function() { return new RuleStopState(); }; - sf[ATNState.BLOCK_END] = function() { return new BlockEndState(); }; - sf[ATNState.STAR_LOOP_BACK] = function() { return new StarLoopbackState(); }; - sf[ATNState.STAR_LOOP_ENTRY] = function() { return new StarLoopEntryState(); }; - sf[ATNState.PLUS_LOOP_BACK] = function() { return new PlusLoopbackState(); }; - sf[ATNState.LOOP_END] = function() { return new LoopEndState(); }; - this.stateFactories = sf; - } - if (type>this.stateFactories.length || this.stateFactories[type] === null) { - throw("The specified state type " + type + " is not valid."); - } else { - var s = this.stateFactories[type](); - if (s!==null) { - s.ruleIndex = ruleIndex; - return s; - } - } -}; - -ATNDeserializer.prototype.lexerActionFactory = function(type, data1, data2) { - if (this.actionFactories === null) { - var af = []; - af[LexerActionType.CHANNEL] = function(data1, data2) { return new LexerChannelAction(data1); }; - af[LexerActionType.CUSTOM] = function(data1, data2) { return new LexerCustomAction(data1, data2); }; - af[LexerActionType.MODE] = function(data1, data2) { return new LexerModeAction(data1); }; - af[LexerActionType.MORE] = function(data1, data2) { return LexerMoreAction.INSTANCE; }; - af[LexerActionType.POP_MODE] = function(data1, data2) { return LexerPopModeAction.INSTANCE; }; - af[LexerActionType.PUSH_MODE] = function(data1, data2) { return new LexerPushModeAction(data1); }; - af[LexerActionType.SKIP] = function(data1, data2) { return LexerSkipAction.INSTANCE; }; - af[LexerActionType.TYPE] = function(data1, data2) { return new LexerTypeAction(data1); }; - this.actionFactories = af; - } - if (type>this.actionFactories.length || this.actionFactories[type] === null) { - throw("The specified lexer action type " + type + " is not valid."); - } else { - return this.actionFactories[type](data1, data2); - } -}; +const byteToHex = createByteToHex(); -exports.ATNDeserializer = ATNDeserializer; \ No newline at end of file +module.exports = ATNDeserializer; diff --git a/runtime/JavaScript/src/antlr4/atn/ATNSimulator.js b/runtime/JavaScript/src/antlr4/atn/ATNSimulator.js index 6e52669cb..02169d1d8 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNSimulator.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNSimulator.js @@ -1,52 +1,52 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -var DFAState = require('./../dfa/DFAState').DFAState; -var ATNConfigSet = require('./ATNConfigSet').ATNConfigSet; -var getCachedPredictionContext = require('./../PredictionContext').getCachedPredictionContext; -var Map = require('./../Utils').Map; +const {DFAState} = require('./../dfa/DFAState'); +const {ATNConfigSet} = require('./ATNConfigSet'); +const {getCachedPredictionContext} = require('./../PredictionContext'); +const {Map} = require('./../Utils'); -function ATNSimulator(atn, sharedContextCache) { +class ATNSimulator { + constructor(atn, sharedContextCache) { + /** + * The context cache maps all PredictionContext objects that are == + * to a single cached copy. This cache is shared across all contexts + * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + * to use only cached nodes/graphs in addDFAState(). We don't want to + * fill this during closure() since there are lots of contexts that + * pop up but are not used ever again. It also greatly slows down closure(). + * + *

            This cache makes a huge difference in memory and a little bit in speed. + * For the Java grammar on java.*, it dropped the memory requirements + * at the end from 25M to 16M. We don't store any of the full context + * graphs in the DFA because they are limited to local context only, + * but apparently there's a lot of repetition there as well. We optimize + * the config contexts before storing the config set in the DFA states + * by literally rebuilding them with cached subgraphs only.

            + * + *

            I tried a cache for use during closure operations, that was + * whacked after each adaptivePredict(). It cost a little bit + * more time I think and doesn't save on the overall footprint + * so it's not worth the complexity.

            + */ + this.atn = atn; + this.sharedContextCache = sharedContextCache; + return this; + } - // The context cache maps all PredictionContext objects that are == - // to a single cached copy. This cache is shared across all contexts - // in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet - // to use only cached nodes/graphs in addDFAState(). We don't want to - // fill this during closure() since there are lots of contexts that - // pop up but are not used ever again. It also greatly slows down closure(). - // - //

            This cache makes a huge difference in memory and a little bit in speed. - // For the Java grammar on java.*, it dropped the memory requirements - // at the end from 25M to 16M. We don't store any of the full context - // graphs in the DFA because they are limited to local context only, - // but apparently there's a lot of repetition there as well. We optimize - // the config contexts before storing the config set in the DFA states - // by literally rebuilding them with cached subgraphs only.

            - // - //

            I tried a cache for use during closure operations, that was - // whacked after each adaptivePredict(). It cost a little bit - // more time I think and doesn't save on the overall footprint - // so it's not worth the complexity.

            - /// - this.atn = atn; - this.sharedContextCache = sharedContextCache; - return this; + getCachedContext(context) { + if (this.sharedContextCache ===null) { + return context; + } + const visited = new Map(); + return getCachedPredictionContext(context, this.sharedContextCache, visited); + } } // Must distinguish between missing edge and edge we know leads nowhere/// ATNSimulator.ERROR = new DFAState(0x7FFFFFFF, new ATNConfigSet()); -ATNSimulator.prototype.getCachedContext = function(context) { - if (this.sharedContextCache ===null) { - return context; - } - var visited = new Map(); - return getCachedPredictionContext(context, this.sharedContextCache, visited); -}; - -exports.ATNSimulator = ATNSimulator; +module.exports = ATNSimulator; diff --git a/runtime/JavaScript/src/antlr4/atn/ATNState.js b/runtime/JavaScript/src/antlr4/atn/ATNState.js index 6f7df12a0..eb5baf4ce 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNState.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNState.js @@ -1,84 +1,115 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// The following images show the relation of states and -// {@link ATNState//transitions} for various grammar constructs. -// -//
              -// -//
            • Solid edges marked with an &//0949; indicate a required -// {@link EpsilonTransition}.
            • -// -//
            • Dashed edges indicate locations where any transition derived from -// {@link Transition} might appear.
            • -// -//
            • Dashed nodes are place holders for either a sequence of linked -// {@link BasicState} states or the inclusion of a block representing a nested -// construct in one of the forms below.
            • -// -//
            • Nodes showing multiple outgoing alternatives with a {@code ...} support -// any number of alternatives (one or more). Nodes without the {@code ...} only -// support the exact number of alternatives shown in the diagram.
            • -// -//
            -// -//

            Basic Blocks

            -// -//

            Rule

            -// -// -// -//

            Block of 1 or more alternatives

            -// -// -// -//

            Greedy Loops

            -// -//

            Greedy Closure: {@code (...)*}

            -// -// -// -//

            Greedy Positive Closure: {@code (...)+}

            -// -// -// -//

            Greedy Optional: {@code (...)?}

            -// -// -// -//

            Non-Greedy Loops

            -// -//

            Non-Greedy Closure: {@code (...)*?}

            -// -// -// -//

            Non-Greedy Positive Closure: {@code (...)+?}

            -// -// -// -//

            Non-Greedy Optional: {@code (...)??}

            -// -// -// +const INITIAL_NUM_TRANSITIONS = 4; -var INITIAL_NUM_TRANSITIONS = 4; +/** + * The following images show the relation of states and + * {@link ATNState//transitions} for various grammar constructs. + * + *
              + * + *
            • Solid edges marked with an &//0949; indicate a required + * {@link EpsilonTransition}.
            • + * + *
            • Dashed edges indicate locations where any transition derived from + * {@link Transition} might appear.
            • + * + *
            • Dashed nodes are place holders for either a sequence of linked + * {@link BasicState} states or the inclusion of a block representing a nested + * construct in one of the forms below.
            • + * + *
            • Nodes showing multiple outgoing alternatives with a {@code ...} support + * any number of alternatives (one or more). Nodes without the {@code ...} only + * support the exact number of alternatives shown in the diagram.
            • + * + *
            + * + *

            Basic Blocks

            + * + *

            Rule

            + * + * + * + *

            Block of 1 or more alternatives

            + * + * + * + *

            Greedy Loops

            + * + *

            Greedy Closure: {@code (...)*}

            + * + * + * + *

            Greedy Positive Closure: {@code (...)+}

            + * + * + * + *

            Greedy Optional: {@code (...)?}

            + * + * + * + *

            Non-Greedy Loops

            + * + *

            Non-Greedy Closure: {@code (...)*?}

            + * + * + * + *

            Non-Greedy Positive Closure: {@code (...)+?}

            + * + * + * + *

            Non-Greedy Optional: {@code (...)??}

            + * + * + */ +class ATNState { + constructor() { + // Which ATN are we in? + this.atn = null; + this.stateNumber = ATNState.INVALID_STATE_NUMBER; + this.stateType = null; + this.ruleIndex = 0; // at runtime, we don't have Rule objects + this.epsilonOnlyTransitions = false; + // Track the transitions emanating from this ATN state. + this.transitions = []; + // Used to cache lookahead during parsing, not used during construction + this.nextTokenWithinRule = null; + } -function ATNState() { - // Which ATN are we in? - this.atn = null; - this.stateNumber = ATNState.INVALID_STATE_NUMBER; - this.stateType = null; - this.ruleIndex = 0; // at runtime, we don't have Rule objects - this.epsilonOnlyTransitions = false; - // Track the transitions emanating from this ATN state. - this.transitions = []; - // Used to cache lookahead during parsing, not used during construction - this.nextTokenWithinRule = null; - return this; + toString() { + return this.stateNumber; + } + + equals(other) { + if (other instanceof ATNState) { + return this.stateNumber===other.stateNumber; + } else { + return false; + } + } + + isNonGreedyExitState() { + return false; + } + + addTransition(trans, index) { + if(index===undefined) { + index = -1; + } + if (this.transitions.length===0) { + this.epsilonOnlyTransitions = trans.isEpsilon; + } else if(this.epsilonOnlyTransitions !== trans.isEpsilon) { + this.epsilonOnlyTransitions = false; + } + if (index===-1) { + this.transitions.push(trans); + } else { + this.transitions.splice(index, 1, trans); + } + } } // constants for serialization @@ -113,214 +144,172 @@ ATNState.serializationNames = [ ATNState.INVALID_STATE_NUMBER = -1; -ATNState.prototype.toString = function() { - return this.stateNumber; -}; -ATNState.prototype.equals = function(other) { - if (other instanceof ATNState) { - return this.stateNumber===other.stateNumber; - } else { - return false; +class BasicState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.BASIC; } -}; +} -ATNState.prototype.isNonGreedyExitState = function() { - return false; -}; - - -ATNState.prototype.addTransition = function(trans, index) { - if(index===undefined) { - index = -1; - } - if (this.transitions.length===0) { - this.epsilonOnlyTransitions = trans.isEpsilon; - } else if(this.epsilonOnlyTransitions !== trans.isEpsilon) { - this.epsilonOnlyTransitions = false; +class DecisionState extends ATNState { + constructor() { + super(); + this.decision = -1; + this.nonGreedy = false; + return this; } - if (index===-1) { - this.transitions.push(trans); - } else { - this.transitions.splice(index, 1, trans); +} + +/** + * The start of a regular {@code (...)} block + */ +class BlockStartState extends DecisionState { + constructor() { + super(); + this.endState = null; + return this; } -}; - -function BasicState() { - ATNState.call(this); - this.stateType = ATNState.BASIC; - return this; } -BasicState.prototype = Object.create(ATNState.prototype); -BasicState.prototype.constructor = BasicState; - - -function DecisionState() { - ATNState.call(this); - this.decision = -1; - this.nonGreedy = false; - return this; +class BasicBlockStartState extends BlockStartState { + constructor() { + super(); + this.stateType = ATNState.BLOCK_START; + return this; + } } -DecisionState.prototype = Object.create(ATNState.prototype); -DecisionState.prototype.constructor = DecisionState; - - -// The start of a regular {@code (...)} block. -function BlockStartState() { - DecisionState.call(this); - this.endState = null; - return this; +/** + * Terminal node of a simple {@code (a|b|c)} block + */ +class BlockEndState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.BLOCK_END; + this.startState = null; + return this; + } } -BlockStartState.prototype = Object.create(DecisionState.prototype); -BlockStartState.prototype.constructor = BlockStartState; - - -function BasicBlockStartState() { - BlockStartState.call(this); - this.stateType = ATNState.BLOCK_START; - return this; +/** + * The last node in the ATN for a rule, unless that rule is the start symbol. + * In that case, there is one transition to EOF. Later, we might encode + * references to all calls to this rule to compute FOLLOW sets for + * error handling + */ +class RuleStopState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.RULE_STOP; + return this; + } } -BasicBlockStartState.prototype = Object.create(BlockStartState.prototype); -BasicBlockStartState.prototype.constructor = BasicBlockStartState; - - -// Terminal node of a simple {@code (a|b|c)} block. -function BlockEndState() { - ATNState.call(this); - this.stateType = ATNState.BLOCK_END; - this.startState = null; - return this; +class RuleStartState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.RULE_START; + this.stopState = null; + this.isPrecedenceRule = false; + return this; + } } -BlockEndState.prototype = Object.create(ATNState.prototype); -BlockEndState.prototype.constructor = BlockEndState; - - -// The last node in the ATN for a rule, unless that rule is the start symbol. -// In that case, there is one transition to EOF. Later, we might encode -// references to all calls to this rule to compute FOLLOW sets for -// error handling. -// -function RuleStopState() { - ATNState.call(this); - this.stateType = ATNState.RULE_STOP; - return this; +/** + * Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: + * one to the loop back to start of the block and one to exit. + */ +class PlusLoopbackState extends DecisionState { + constructor() { + super(); + this.stateType = ATNState.PLUS_LOOP_BACK; + return this; + } } -RuleStopState.prototype = Object.create(ATNState.prototype); -RuleStopState.prototype.constructor = RuleStopState; - -function RuleStartState() { - ATNState.call(this); - this.stateType = ATNState.RULE_START; - this.stopState = null; - this.isPrecedenceRule = false; - return this; +/** + * Start of {@code (A|B|...)+} loop. Technically a decision state, but + * we don't use for code generation; somebody might need it, so I'm defining + * it for completeness. In reality, the {@link PlusLoopbackState} node is the + * real decision-making note for {@code A+} + */ +class PlusBlockStartState extends BlockStartState { + constructor() { + super(); + this.stateType = ATNState.PLUS_BLOCK_START; + this.loopBackState = null; + return this; + } } -RuleStartState.prototype = Object.create(ATNState.prototype); -RuleStartState.prototype.constructor = RuleStartState; - -// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: -// one to the loop back to start of the block and one to exit. -// -function PlusLoopbackState() { - DecisionState.call(this); - this.stateType = ATNState.PLUS_LOOP_BACK; - return this; +/** + * The block that begins a closure loop + */ +class StarBlockStartState extends BlockStartState { + constructor() { + super(); + this.stateType = ATNState.STAR_BLOCK_START; + return this; + } } -PlusLoopbackState.prototype = Object.create(DecisionState.prototype); -PlusLoopbackState.prototype.constructor = PlusLoopbackState; - - -// Start of {@code (A|B|...)+} loop. Technically a decision state, but -// we don't use for code generation; somebody might need it, so I'm defining -// it for completeness. In reality, the {@link PlusLoopbackState} node is the -// real decision-making note for {@code A+}. -// -function PlusBlockStartState() { - BlockStartState.call(this); - this.stateType = ATNState.PLUS_BLOCK_START; - this.loopBackState = null; - return this; +class StarLoopbackState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.STAR_LOOP_BACK; + return this; + } } -PlusBlockStartState.prototype = Object.create(BlockStartState.prototype); -PlusBlockStartState.prototype.constructor = PlusBlockStartState; - -// The block that begins a closure loop. -function StarBlockStartState() { - BlockStartState.call(this); - this.stateType = ATNState.STAR_BLOCK_START; - return this; +class StarLoopEntryState extends DecisionState { + constructor() { + super(); + this.stateType = ATNState.STAR_LOOP_ENTRY; + this.loopBackState = null; + // Indicates whether this state can benefit from a precedence DFA during SLL decision making. + this.isPrecedenceDecision = null; + return this; + } } -StarBlockStartState.prototype = Object.create(BlockStartState.prototype); -StarBlockStartState.prototype.constructor = StarBlockStartState; - - -function StarLoopbackState() { - ATNState.call(this); - this.stateType = ATNState.STAR_LOOP_BACK; - return this; +/** + * Mark the end of a * or + loop + */ +class LoopEndState extends ATNState { + constructor() { + super(); + this.stateType = ATNState.LOOP_END; + this.loopBackState = null; + return this; + } } -StarLoopbackState.prototype = Object.create(ATNState.prototype); -StarLoopbackState.prototype.constructor = StarLoopbackState; - - -function StarLoopEntryState() { - DecisionState.call(this); - this.stateType = ATNState.STAR_LOOP_ENTRY; - this.loopBackState = null; - // Indicates whether this state can benefit from a precedence DFA during SLL decision making. - this.isPrecedenceDecision = null; - return this; +/** + * The Tokens rule start state linking to each lexer rule start state + */ +class TokensStartState extends DecisionState { + constructor() { + super(); + this.stateType = ATNState.TOKEN_START; + return this; + } } -StarLoopEntryState.prototype = Object.create(DecisionState.prototype); -StarLoopEntryState.prototype.constructor = StarLoopEntryState; - - -// Mark the end of a * or + loop. -function LoopEndState() { - ATNState.call(this); - this.stateType = ATNState.LOOP_END; - this.loopBackState = null; - return this; +module.exports = { + ATNState, + BasicState, + DecisionState, + BlockStartState, + BlockEndState, + LoopEndState, + RuleStartState, + RuleStopState, + TokensStartState, + PlusLoopbackState, + StarLoopbackState, + StarLoopEntryState, + PlusBlockStartState, + StarBlockStartState, + BasicBlockStartState } - -LoopEndState.prototype = Object.create(ATNState.prototype); -LoopEndState.prototype.constructor = LoopEndState; - - -// The Tokens rule start state linking to each lexer rule start state */ -function TokensStartState() { - DecisionState.call(this); - this.stateType = ATNState.TOKEN_START; - return this; -} - -TokensStartState.prototype = Object.create(DecisionState.prototype); -TokensStartState.prototype.constructor = TokensStartState; - -exports.ATNState = ATNState; -exports.BasicState = BasicState; -exports.DecisionState = DecisionState; -exports.BlockStartState = BlockStartState; -exports.BlockEndState = BlockEndState; -exports.LoopEndState = LoopEndState; -exports.RuleStartState = RuleStartState; -exports.RuleStopState = RuleStopState; -exports.TokensStartState = TokensStartState; -exports.PlusLoopbackState = PlusLoopbackState; -exports.StarLoopbackState = StarLoopbackState; -exports.StarLoopEntryState = StarLoopEntryState; -exports.PlusBlockStartState = PlusBlockStartState; -exports.StarBlockStartState = StarBlockStartState; -exports.BasicBlockStartState = BasicBlockStartState; diff --git a/runtime/JavaScript/src/antlr4/atn/ATNType.js b/runtime/JavaScript/src/antlr4/atn/ATNType.js index 788195c14..7c222a48b 100644 --- a/runtime/JavaScript/src/antlr4/atn/ATNType.js +++ b/runtime/JavaScript/src/antlr4/atn/ATNType.js @@ -2,16 +2,12 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// Represents the type of recognizer an ATN applies to. - -function ATNType() { - -} - -ATNType.LEXER = 0; -ATNType.PARSER = 1; - -exports.ATNType = ATNType; +/** + * Represents the type of recognizer an ATN applies to + */ +module.exports = { + LEXER: 0, + PARSER: 1 +}; diff --git a/runtime/JavaScript/src/antlr4/atn/LexerATNSimulator.js b/runtime/JavaScript/src/antlr4/atn/LexerATNSimulator.js index 4b6e11b1d..c863f67b2 100644 --- a/runtime/JavaScript/src/antlr4/atn/LexerATNSimulator.js +++ b/runtime/JavaScript/src/antlr4/atn/LexerATNSimulator.js @@ -1,40 +1,21 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// When we hit an accept state in either the DFA or the ATN, we -// have to notify the character stream to start buffering characters -// via {@link IntStream//mark} and record the current state. The current sim state -// includes the current index into the input, the current line, -// and current character position in that line. Note that the Lexer is -// tracking the starting line and characterization of the token. These -// variables track the "state" of the simulator when it hits an accept state. -// -//

            We track these variables separately for the DFA and ATN simulation -// because the DFA simulation often has to fail over to the ATN -// simulation. If the ATN simulation fails, we need the DFA to fall -// back to its previously accepted state, if any. If the ATN succeeds, -// then the ATN does the accept and the DFA simulator that invoked it -// can simply return the predicted token type.

            -/// - -var Token = require('./../Token').Token; -var Lexer = require('./../Lexer').Lexer; -var ATN = require('./ATN').ATN; -var ATNSimulator = require('./ATNSimulator').ATNSimulator; -var DFAState = require('./../dfa/DFAState').DFAState; -var ATNConfigSet = require('./ATNConfigSet').ATNConfigSet; -var OrderedATNConfigSet = require('./ATNConfigSet').OrderedATNConfigSet; -var PredictionContext = require('./../PredictionContext').PredictionContext; -var SingletonPredictionContext = require('./../PredictionContext').SingletonPredictionContext; -var RuleStopState = require('./ATNState').RuleStopState; -var LexerATNConfig = require('./ATNConfig').LexerATNConfig; -var Transition = require('./Transition').Transition; -var LexerActionExecutor = require('./LexerActionExecutor').LexerActionExecutor; -var LexerNoViableAltException = require('./../error/Errors').LexerNoViableAltException; +const {Token} = require('./../Token'); +const Lexer = require('./../Lexer'); +const ATN = require('./ATN'); +const ATNSimulator = require('./ATNSimulator'); +const {DFAState} = require('./../dfa/DFAState'); +const {OrderedATNConfigSet} = require('./ATNConfigSet'); +const {PredictionContext} = require('./../PredictionContext'); +const {SingletonPredictionContext} = require('./../PredictionContext'); +const {RuleStopState} = require('./ATNState'); +const {LexerATNConfig} = require('./ATNConfig'); +const {Transition} = require('./Transition'); +const LexerActionExecutor = require('./LexerActionExecutor'); +const {LexerNoViableAltException} = require('./../error/Errors'); function resetSimState(sim) { sim.index = -1; @@ -43,40 +24,621 @@ function resetSimState(sim) { sim.dfaState = null; } -function SimState() { - resetSimState(this); - return this; +class SimState { + constructor() { + resetSimState(this); + } + + reset() { + resetSimState(this); + } } -SimState.prototype.reset = function() { - resetSimState(this); -}; +class LexerATNSimulator extends ATNSimulator { + /** + * When we hit an accept state in either the DFA or the ATN, we + * have to notify the character stream to start buffering characters + * via {@link IntStream//mark} and record the current state. The current sim state + * includes the current index into the input, the current line, + * and current character position in that line. Note that the Lexer is + * tracking the starting line and characterization of the token. These + * variables track the "state" of the simulator when it hits an accept state. + * + *

            We track these variables separately for the DFA and ATN simulation + * because the DFA simulation often has to fail over to the ATN + * simulation. If the ATN simulation fails, we need the DFA to fall + * back to its previously accepted state, if any. If the ATN succeeds, + * then the ATN does the accept and the DFA simulator that invoked it + * can simply return the predicted token type.

            + */ + constructor(recog, atn, decisionToDFA, sharedContextCache) { + super(atn, sharedContextCache); + this.decisionToDFA = decisionToDFA; + this.recog = recog; + /** + * The current token's starting index into the character stream. + * Shared across DFA to ATN simulation in case the ATN fails and the + * DFA did not have a previous accept state. In this case, we use the + * ATN-generated exception object + */ + this.startIndex = -1; + // line number 1..n within the input/// + this.line = 1; + /** + * The index of the character relative to the beginning of the line + * 0..n-1 + */ + this.column = 0; + this.mode = Lexer.DEFAULT_MODE; + /** + * Used during DFA/ATN exec to record the most recent accept configuration + * info + */ + this.prevAccept = new SimState(); + } -function LexerATNSimulator(recog, atn, decisionToDFA, sharedContextCache) { - ATNSimulator.call(this, atn, sharedContextCache); - this.decisionToDFA = decisionToDFA; - this.recog = recog; - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In this case, we use the - // ATN-generated exception object. - this.startIndex = -1; - // line number 1..n within the input/// - this.line = 1; - // The index of the character relative to the beginning of the line - // 0..n-1/// - this.column = 0; - this.mode = Lexer.DEFAULT_MODE; - // Used during DFA/ATN exec to record the most recent accept configuration - // info - this.prevAccept = new SimState(); - // done - return this; + copyState(simulator) { + this.column = simulator.column; + this.line = simulator.line; + this.mode = simulator.mode; + this.startIndex = simulator.startIndex; + } + + match(input, mode) { + this.match_calls += 1; + this.mode = mode; + const mark = input.mark(); + try { + this.startIndex = input.index; + this.prevAccept.reset(); + const dfa = this.decisionToDFA[mode]; + if (dfa.s0 === null) { + return this.matchATN(input); + } else { + return this.execATN(input, dfa.s0); + } + } finally { + input.release(mark); + } + } + + reset() { + this.prevAccept.reset(); + this.startIndex = -1; + this.line = 1; + this.column = 0; + this.mode = Lexer.DEFAULT_MODE; + } + + matchATN(input) { + const startState = this.atn.modeToStartState[this.mode]; + + if (LexerATNSimulator.debug) { + console.log("matchATN mode " + this.mode + " start: " + startState); + } + const old_mode = this.mode; + const s0_closure = this.computeStartState(input, startState); + const suppressEdge = s0_closure.hasSemanticContext; + s0_closure.hasSemanticContext = false; + + const next = this.addDFAState(s0_closure); + if (!suppressEdge) { + this.decisionToDFA[this.mode].s0 = next; + } + + const predict = this.execATN(input, next); + + if (LexerATNSimulator.debug) { + console.log("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()); + } + return predict; + } + + execATN(input, ds0) { + if (LexerATNSimulator.debug) { + console.log("start state closure=" + ds0.configs); + } + if (ds0.isAcceptState) { + // allow zero-length tokens + this.captureSimState(this.prevAccept, input, ds0); + } + let t = input.LA(1); + let s = ds0; // s is current/from DFA state + + while (true) { // while more work + if (LexerATNSimulator.debug) { + console.log("execATN loop starting closure: " + s.configs); + } + + /** + * As we move src->trg, src->trg, we keep track of the previous trg to + * avoid looking up the DFA state again, which is expensive. + * If the previous target was already part of the DFA, we might + * be able to avoid doing a reach operation upon t. If s!=null, + * it means that semantic predicates didn't prevent us from + * creating a DFA state. Once we know s!=null, we check to see if + * the DFA state has an edge already for t. If so, we can just reuse + * it's configuration set; there's no point in re-computing it. + * This is kind of like doing DFA simulation within the ATN + * simulation because DFA simulation is really just a way to avoid + * computing reach/closure sets. Technically, once we know that + * we have a previously added DFA state, we could jump over to + * the DFA simulator. But, that would mean popping back and forth + * a lot and making things more complicated algorithmically. + * This optimization makes a lot of sense for loops within DFA. + * A character will take us back to an existing DFA state + * that already has lots of edges out of it. e.g., .* in comments. + * print("Target for:" + str(s) + " and:" + str(t)) + */ + let target = this.getExistingTargetState(s, t); + // print("Existing:" + str(target)) + if (target === null) { + target = this.computeTargetState(input, s, t); + // print("Computed:" + str(target)) + } + if (target === ATNSimulator.ERROR) { + break; + } + // If this is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if (t !== Token.EOF) { + this.consume(input); + } + if (target.isAcceptState) { + this.captureSimState(this.prevAccept, input, target); + if (t === Token.EOF) { + break; + } + } + t = input.LA(1); + s = target; // flip; current DFA target becomes new src/from state + } + return this.failOrAccept(this.prevAccept, input, s.configs, t); + } + + /** + * Get an existing target state for an edge in the DFA. If the target state + * for the edge has not yet been computed or is otherwise not available, + * this method returns {@code null}. + * + * @param s The current DFA state + * @param t The next input symbol + * @return The existing target DFA state for the given input symbol + * {@code t}, or {@code null} if the target state for this edge is not + * already cached + */ + getExistingTargetState(s, t) { + if (s.edges === null || t < LexerATNSimulator.MIN_DFA_EDGE || t > LexerATNSimulator.MAX_DFA_EDGE) { + return null; + } + + let target = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE]; + if(target===undefined) { + target = null; + } + if (LexerATNSimulator.debug && target !== null) { + console.log("reuse state " + s.stateNumber + " edge to " + target.stateNumber); + } + return target; + } + + /** + * Compute a target state for an edge in the DFA, and attempt to add the + * computed state and corresponding edge to the DFA. + * + * @param input The input stream + * @param s The current DFA state + * @param t The next input symbol + * + * @return The computed target DFA state for the given input symbol + * {@code t}. If {@code t} does not lead to a valid DFA state, this method + * returns {@link //ERROR}. + */ + computeTargetState(input, s, t) { + const reach = new OrderedATNConfigSet(); + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + this.getReachableConfigSet(input, s.configs, reach, t); + + if (reach.items.length === 0) { // we got nowhere on t from s + if (!reach.hasSemanticContext) { + // we got nowhere on t, don't throw out this knowledge; it'd + // cause a failover from DFA later. + this.addDFAEdge(s, t, ATNSimulator.ERROR); + } + // stop when we can't match any more char + return ATNSimulator.ERROR; + } + // Add an edge from s to target DFA found/created for reach + return this.addDFAEdge(s, t, null, reach); + } + + failOrAccept(prevAccept, input, reach, t) { + if (this.prevAccept.dfaState !== null) { + const lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor; + this.accept(input, lexerActionExecutor, this.startIndex, + prevAccept.index, prevAccept.line, prevAccept.column); + return prevAccept.dfaState.prediction; + } else { + // if no accept and EOF is first char, return EOF + if (t === Token.EOF && input.index === this.startIndex) { + return Token.EOF; + } + throw new LexerNoViableAltException(this.recog, input, this.startIndex, reach); + } + } + + /** + * Given a starting configuration set, figure out all ATN configurations + * we can reach upon input {@code t}. Parameter {@code reach} is a return + * parameter. + */ + getReachableConfigSet(input, closure, + reach, t) { + // this is used to skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + let skipAlt = ATN.INVALID_ALT_NUMBER; + for (let i = 0; i < closure.items.length; i++) { + const cfg = closure.items[i]; + const currentAltReachedAcceptState = (cfg.alt === skipAlt); + if (currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision) { + continue; + } + if (LexerATNSimulator.debug) { + console.log("testing %s at %s\n", this.getTokenName(t), cfg + .toString(this.recog, true)); + } + for (let j = 0; j < cfg.state.transitions.length; j++) { + const trans = cfg.state.transitions[j]; // for each transition + const target = this.getReachableTarget(trans, t); + if (target !== null) { + let lexerActionExecutor = cfg.lexerActionExecutor; + if (lexerActionExecutor !== null) { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - this.startIndex); + } + const treatEofAsEpsilon = (t === Token.EOF); + const config = new LexerATNConfig({state:target, lexerActionExecutor:lexerActionExecutor}, cfg); + if (this.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEofAsEpsilon)) { + // any remaining configs for this alt have a lower priority + // than the one that just reached an accept state. + skipAlt = cfg.alt; + } + } + } + } + } + + accept(input, lexerActionExecutor, + startIndex, index, line, charPos) { + if (LexerATNSimulator.debug) { + console.log("ACTION %s\n", lexerActionExecutor); + } + // seek to after last char in token + input.seek(index); + this.line = line; + this.column = charPos; + if (lexerActionExecutor !== null && this.recog !== null) { + lexerActionExecutor.execute(this.recog, input, startIndex); + } + } + + getReachableTarget(trans, t) { + if (trans.matches(t, 0, Lexer.MAX_CHAR_VALUE)) { + return trans.target; + } else { + return null; + } + } + + computeStartState(input, p) { + const initialContext = PredictionContext.EMPTY; + const configs = new OrderedATNConfigSet(); + for (let i = 0; i < p.transitions.length; i++) { + const target = p.transitions[i].target; + const cfg = new LexerATNConfig({state:target, alt:i+1, context:initialContext}, null); + this.closure(input, cfg, configs, false, false, false); + } + return configs; + } + + /** + * Since the alternatives within any lexer decision are ordered by + * preference, this method stops pursuing the closure as soon as an accept + * state is reached. After the first accept state is reached by depth-first + * search from {@code config}, all other (potentially reachable) states for + * this rule would have a lower priority. + * + * @return {Boolean} {@code true} if an accept state is reached, otherwise + * {@code false}. + */ + closure(input, config, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon) { + let cfg = null; + if (LexerATNSimulator.debug) { + console.log("closure(" + config.toString(this.recog, true) + ")"); + } + if (config.state instanceof RuleStopState) { + if (LexerATNSimulator.debug) { + if (this.recog !== null) { + console.log("closure at %s rule stop %s\n", this.recog.ruleNames[config.state.ruleIndex], config); + } else { + console.log("closure at rule stop %s\n", config); + } + } + if (config.context === null || config.context.hasEmptyPath()) { + if (config.context === null || config.context.isEmpty()) { + configs.add(config); + return true; + } else { + configs.add(new LexerATNConfig({ state:config.state, context:PredictionContext.EMPTY}, config)); + currentAltReachedAcceptState = true; + } + } + if (config.context !== null && !config.context.isEmpty()) { + for (let i = 0; i < config.context.length; i++) { + if (config.context.getReturnState(i) !== PredictionContext.EMPTY_RETURN_STATE) { + const newContext = config.context.getParent(i); // "pop" return state + const returnState = this.atn.states[config.context.getReturnState(i)]; + cfg = new LexerATNConfig({ state:returnState, context:newContext }, config); + currentAltReachedAcceptState = this.closure(input, cfg, + configs, currentAltReachedAcceptState, speculative, + treatEofAsEpsilon); + } + } + } + return currentAltReachedAcceptState; + } + // optimization + if (!config.state.epsilonOnlyTransitions) { + if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) { + configs.add(config); + } + } + for (let j = 0; j < config.state.transitions.length; j++) { + const trans = config.state.transitions[j]; + cfg = this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon); + if (cfg !== null) { + currentAltReachedAcceptState = this.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon); + } + } + return currentAltReachedAcceptState; + } + + // side-effect: can alter configs.hasSemanticContext + getEpsilonTarget(input, config, trans, + configs, speculative, treatEofAsEpsilon) { + let cfg = null; + if (trans.serializationType === Transition.RULE) { + const newContext = SingletonPredictionContext.create(config.context, trans.followState.stateNumber); + cfg = new LexerATNConfig( { state:trans.target, context:newContext}, config); + } else if (trans.serializationType === Transition.PRECEDENCE) { + throw "Precedence predicates are not supported in lexers."; + } else if (trans.serializationType === Transition.PREDICATE) { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for this "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // this predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? ; + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since this is when we + // test them, we cannot cash the DFA state target of ID. + + if (LexerATNSimulator.debug) { + console.log("EVAL rule " + trans.ruleIndex + ":" + trans.predIndex); + } + configs.hasSemanticContext = true; + if (this.evaluatePredicate(input, trans.ruleIndex, trans.predIndex, speculative)) { + cfg = new LexerATNConfig({ state:trans.target}, config); + } + } else if (trans.serializationType === Transition.ACTION) { + if (config.context === null || config.context.hasEmptyPath()) { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + const lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, + this.atn.lexerActions[trans.actionIndex]); + cfg = new LexerATNConfig({ state:trans.target, lexerActionExecutor:lexerActionExecutor }, config); + } else { + // ignore actions in referenced rules + cfg = new LexerATNConfig( { state:trans.target}, config); + } + } else if (trans.serializationType === Transition.EPSILON) { + cfg = new LexerATNConfig({ state:trans.target}, config); + } else if (trans.serializationType === Transition.ATOM || + trans.serializationType === Transition.RANGE || + trans.serializationType === Transition.SET) { + if (treatEofAsEpsilon) { + if (trans.matches(Token.EOF, 0, Lexer.MAX_CHAR_VALUE)) { + cfg = new LexerATNConfig( { state:trans.target }, config); + } + } + } + return cfg; + } + + /** + * Evaluate a predicate specified in the lexer. + * + *

            If {@code speculative} is {@code true}, this method was called before + * {@link //consume} for the matched character. This method should call + * {@link //consume} before evaluating the predicate to ensure position + * sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, + * and {@link Lexer//getcolumn}, properly reflect the current + * lexer state. This method should restore {@code input} and the simulator + * to the original state before returning (i.e. undo the actions made by the + * call to {@link //consume}.

            + * + * @param input The input stream. + * @param ruleIndex The rule containing the predicate. + * @param predIndex The index of the predicate within the rule. + * @param speculative {@code true} if the current index in {@code input} is + * one character before the predicate's location. + * + * @return {@code true} if the specified predicate evaluates to + * {@code true}. + */ + evaluatePredicate(input, ruleIndex, + predIndex, speculative) { + // assume true if no recognizer was provided + if (this.recog === null) { + return true; + } + if (!speculative) { + return this.recog.sempred(null, ruleIndex, predIndex); + } + const savedcolumn = this.column; + const savedLine = this.line; + const index = input.index; + const marker = input.mark(); + try { + this.consume(input); + return this.recog.sempred(null, ruleIndex, predIndex); + } finally { + this.column = savedcolumn; + this.line = savedLine; + input.seek(index); + input.release(marker); + } + } + + captureSimState(settings, input, dfaState) { + settings.index = input.index; + settings.line = this.line; + settings.column = this.column; + settings.dfaState = dfaState; + } + + addDFAEdge(from_, tk, to, cfgs) { + if (to === undefined) { + to = null; + } + if (cfgs === undefined) { + cfgs = null; + } + if (to === null && cfgs !== null) { + // leading to this call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes this edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to resynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + const suppressEdge = cfgs.hasSemanticContext; + cfgs.hasSemanticContext = false; + + to = this.addDFAState(cfgs); + + if (suppressEdge) { + return to; + } + } + // add the edge + if (tk < LexerATNSimulator.MIN_DFA_EDGE || tk > LexerATNSimulator.MAX_DFA_EDGE) { + // Only track edges within the DFA bounds + return to; + } + if (LexerATNSimulator.debug) { + console.log("EDGE " + from_ + " -> " + to + " upon " + tk); + } + if (from_.edges === null) { + // make room for tokens 1..n and -1 masquerading as index 0 + from_.edges = []; + } + from_.edges[tk - LexerATNSimulator.MIN_DFA_EDGE] = to; // connect + + return to; + } + + /** + * Add a new DFA state if there isn't one with this set of + * configurations already. This method also detects the first + * configuration containing an ATN rule stop state. Later, when + * traversing the DFA, we will know which rule to accept. + */ + addDFAState(configs) { + const proposed = new DFAState(null, configs); + let firstConfigWithRuleStopState = null; + for (let i = 0; i < configs.items.length; i++) { + const cfg = configs.items[i]; + if (cfg.state instanceof RuleStopState) { + firstConfigWithRuleStopState = cfg; + break; + } + } + if (firstConfigWithRuleStopState !== null) { + proposed.isAcceptState = true; + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor; + proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; + } + const dfa = this.decisionToDFA[this.mode]; + const existing = dfa.states.get(proposed); + if (existing!==null) { + return existing; + } + const newState = proposed; + newState.stateNumber = dfa.states.length; + configs.setReadonly(true); + newState.configs = configs; + dfa.states.add(newState); + return newState; + } + + getDFA(mode) { + return this.decisionToDFA[mode]; + } + +// Get the text matched so far for the current token. + getText(input) { + // index is first lookahead char, don't include. + return input.getText(this.startIndex, input.index - 1); + } + + consume(input) { + const curChar = input.LA(1); + if (curChar === "\n".charCodeAt(0)) { + this.line += 1; + this.column = 0; + } else { + this.column += 1; + } + input.consume(); + } + + getTokenName(tt) { + if (tt === -1) { + return "EOF"; + } else { + return "'" + String.fromCharCode(tt) + "'"; + } + } } -LexerATNSimulator.prototype = Object.create(ATNSimulator.prototype); -LexerATNSimulator.prototype.constructor = LexerATNSimulator; - LexerATNSimulator.debug = false; LexerATNSimulator.dfa_debug = false; @@ -85,552 +647,4 @@ LexerATNSimulator.MAX_DFA_EDGE = 127; // forces unicode to stay in ATN LexerATNSimulator.match_calls = 0; -LexerATNSimulator.prototype.copyState = function(simulator) { - this.column = simulator.column; - this.line = simulator.line; - this.mode = simulator.mode; - this.startIndex = simulator.startIndex; -}; - -LexerATNSimulator.prototype.match = function(input, mode) { - this.match_calls += 1; - this.mode = mode; - var mark = input.mark(); - try { - this.startIndex = input.index; - this.prevAccept.reset(); - var dfa = this.decisionToDFA[mode]; - if (dfa.s0 === null) { - return this.matchATN(input); - } else { - return this.execATN(input, dfa.s0); - } - } finally { - input.release(mark); - } -}; - -LexerATNSimulator.prototype.reset = function() { - this.prevAccept.reset(); - this.startIndex = -1; - this.line = 1; - this.column = 0; - this.mode = Lexer.DEFAULT_MODE; -}; - -LexerATNSimulator.prototype.matchATN = function(input) { - var startState = this.atn.modeToStartState[this.mode]; - - if (LexerATNSimulator.debug) { - console.log("matchATN mode " + this.mode + " start: " + startState); - } - var old_mode = this.mode; - var s0_closure = this.computeStartState(input, startState); - var suppressEdge = s0_closure.hasSemanticContext; - s0_closure.hasSemanticContext = false; - - var next = this.addDFAState(s0_closure); - if (!suppressEdge) { - this.decisionToDFA[this.mode].s0 = next; - } - - var predict = this.execATN(input, next); - - if (LexerATNSimulator.debug) { - console.log("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()); - } - return predict; -}; - -LexerATNSimulator.prototype.execATN = function(input, ds0) { - if (LexerATNSimulator.debug) { - console.log("start state closure=" + ds0.configs); - } - if (ds0.isAcceptState) { - // allow zero-length tokens - this.captureSimState(this.prevAccept, input, ds0); - } - var t = input.LA(1); - var s = ds0; // s is current/from DFA state - - while (true) { // while more work - if (LexerATNSimulator.debug) { - console.log("execATN loop starting closure: " + s.configs); - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=null, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=null, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set; there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - // print("Target for:" + str(s) + " and:" + str(t)) - var target = this.getExistingTargetState(s, t); - // print("Existing:" + str(target)) - if (target === null) { - target = this.computeTargetState(input, s, t); - // print("Computed:" + str(target)) - } - if (target === ATNSimulator.ERROR) { - break; - } - // If this is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if (t !== Token.EOF) { - this.consume(input); - } - if (target.isAcceptState) { - this.captureSimState(this.prevAccept, input, target); - if (t === Token.EOF) { - break; - } - } - t = input.LA(1); - s = target; // flip; current DFA target becomes new src/from state - } - return this.failOrAccept(this.prevAccept, input, s.configs, t); -}; - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// this method returns {@code null}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code null} if the target state for this edge is not -// already cached -LexerATNSimulator.prototype.getExistingTargetState = function(s, t) { - if (s.edges === null || t < LexerATNSimulator.MIN_DFA_EDGE || t > LexerATNSimulator.MAX_DFA_EDGE) { - return null; - } - - var target = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE]; - if(target===undefined) { - target = null; - } - if (LexerATNSimulator.debug && target !== null) { - console.log("reuse state " + s.stateNumber + " edge to " + target.stateNumber); - } - return target; -}; - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, this method -// returns {@link //ERROR}. -LexerATNSimulator.prototype.computeTargetState = function(input, s, t) { - var reach = new OrderedATNConfigSet(); - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - this.getReachableConfigSet(input, s.configs, reach, t); - - if (reach.items.length === 0) { // we got nowhere on t from s - if (!reach.hasSemanticContext) { - // we got nowhere on t, don't throw out this knowledge; it'd - // cause a failover from DFA later. - this.addDFAEdge(s, t, ATNSimulator.ERROR); - } - // stop when we can't match any more char - return ATNSimulator.ERROR; - } - // Add an edge from s to target DFA found/created for reach - return this.addDFAEdge(s, t, null, reach); -}; - -LexerATNSimulator.prototype.failOrAccept = function(prevAccept, input, reach, t) { - if (this.prevAccept.dfaState !== null) { - var lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor; - this.accept(input, lexerActionExecutor, this.startIndex, - prevAccept.index, prevAccept.line, prevAccept.column); - return prevAccept.dfaState.prediction; - } else { - // if no accept and EOF is first char, return EOF - if (t === Token.EOF && input.index === this.startIndex) { - return Token.EOF; - } - throw new LexerNoViableAltException(this.recog, input, this.startIndex, reach); - } -}; - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -LexerATNSimulator.prototype.getReachableConfigSet = function(input, closure, - reach, t) { - // this is used to skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - var skipAlt = ATN.INVALID_ALT_NUMBER; - for (var i = 0; i < closure.items.length; i++) { - var cfg = closure.items[i]; - var currentAltReachedAcceptState = (cfg.alt === skipAlt); - if (currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision) { - continue; - } - if (LexerATNSimulator.debug) { - console.log("testing %s at %s\n", this.getTokenName(t), cfg - .toString(this.recog, true)); - } - for (var j = 0; j < cfg.state.transitions.length; j++) { - var trans = cfg.state.transitions[j]; // for each transition - var target = this.getReachableTarget(trans, t); - if (target !== null) { - var lexerActionExecutor = cfg.lexerActionExecutor; - if (lexerActionExecutor !== null) { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - this.startIndex); - } - var treatEofAsEpsilon = (t === Token.EOF); - var config = new LexerATNConfig({state:target, lexerActionExecutor:lexerActionExecutor}, cfg); - if (this.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEofAsEpsilon)) { - // any remaining configs for this alt have a lower priority - // than the one that just reached an accept state. - skipAlt = cfg.alt; - } - } - } - } -}; - -LexerATNSimulator.prototype.accept = function(input, lexerActionExecutor, - startIndex, index, line, charPos) { - if (LexerATNSimulator.debug) { - console.log("ACTION %s\n", lexerActionExecutor); - } - // seek to after last char in token - input.seek(index); - this.line = line; - this.column = charPos; - if (lexerActionExecutor !== null && this.recog !== null) { - lexerActionExecutor.execute(this.recog, input, startIndex); - } -}; - -LexerATNSimulator.prototype.getReachableTarget = function(trans, t) { - if (trans.matches(t, 0, Lexer.MAX_CHAR_VALUE)) { - return trans.target; - } else { - return null; - } -}; - -LexerATNSimulator.prototype.computeStartState = function(input, p) { - var initialContext = PredictionContext.EMPTY; - var configs = new OrderedATNConfigSet(); - for (var i = 0; i < p.transitions.length; i++) { - var target = p.transitions[i].target; - var cfg = new LexerATNConfig({state:target, alt:i+1, context:initialContext}, null); - this.closure(input, cfg, configs, false, false, false); - } - return configs; -}; - -// Since the alternatives within any lexer decision are ordered by -// preference, this method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// this rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -LexerATNSimulator.prototype.closure = function(input, config, configs, - currentAltReachedAcceptState, speculative, treatEofAsEpsilon) { - var cfg = null; - if (LexerATNSimulator.debug) { - console.log("closure(" + config.toString(this.recog, true) + ")"); - } - if (config.state instanceof RuleStopState) { - if (LexerATNSimulator.debug) { - if (this.recog !== null) { - console.log("closure at %s rule stop %s\n", this.recog.ruleNames[config.state.ruleIndex], config); - } else { - console.log("closure at rule stop %s\n", config); - } - } - if (config.context === null || config.context.hasEmptyPath()) { - if (config.context === null || config.context.isEmpty()) { - configs.add(config); - return true; - } else { - configs.add(new LexerATNConfig({ state:config.state, context:PredictionContext.EMPTY}, config)); - currentAltReachedAcceptState = true; - } - } - if (config.context !== null && !config.context.isEmpty()) { - for (var i = 0; i < config.context.length; i++) { - if (config.context.getReturnState(i) !== PredictionContext.EMPTY_RETURN_STATE) { - var newContext = config.context.getParent(i); // "pop" return state - var returnState = this.atn.states[config.context.getReturnState(i)]; - cfg = new LexerATNConfig({ state:returnState, context:newContext }, config); - currentAltReachedAcceptState = this.closure(input, cfg, - configs, currentAltReachedAcceptState, speculative, - treatEofAsEpsilon); - } - } - } - return currentAltReachedAcceptState; - } - // optimization - if (!config.state.epsilonOnlyTransitions) { - if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) { - configs.add(config); - } - } - for (var j = 0; j < config.state.transitions.length; j++) { - var trans = config.state.transitions[j]; - cfg = this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon); - if (cfg !== null) { - currentAltReachedAcceptState = this.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEofAsEpsilon); - } - } - return currentAltReachedAcceptState; -}; - -// side-effect: can alter configs.hasSemanticContext -LexerATNSimulator.prototype.getEpsilonTarget = function(input, config, trans, - configs, speculative, treatEofAsEpsilon) { - var cfg = null; - if (trans.serializationType === Transition.RULE) { - var newContext = SingletonPredictionContext.create(config.context, trans.followState.stateNumber); - cfg = new LexerATNConfig( { state:trans.target, context:newContext}, config); - } else if (trans.serializationType === Transition.PRECEDENCE) { - throw "Precedence predicates are not supported in lexers."; - } else if (trans.serializationType === Transition.PREDICATE) { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for this "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // this predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? ; - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since this is when we - // test them, we cannot cash the DFA state target of ID. - - if (LexerATNSimulator.debug) { - console.log("EVAL rule " + trans.ruleIndex + ":" + trans.predIndex); - } - configs.hasSemanticContext = true; - if (this.evaluatePredicate(input, trans.ruleIndex, trans.predIndex, speculative)) { - cfg = new LexerATNConfig({ state:trans.target}, config); - } - } else if (trans.serializationType === Transition.ACTION) { - if (config.context === null || config.context.hasEmptyPath()) { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In this case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - var lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, - this.atn.lexerActions[trans.actionIndex]); - cfg = new LexerATNConfig({ state:trans.target, lexerActionExecutor:lexerActionExecutor }, config); - } else { - // ignore actions in referenced rules - cfg = new LexerATNConfig( { state:trans.target}, config); - } - } else if (trans.serializationType === Transition.EPSILON) { - cfg = new LexerATNConfig({ state:trans.target}, config); - } else if (trans.serializationType === Transition.ATOM || - trans.serializationType === Transition.RANGE || - trans.serializationType === Transition.SET) { - if (treatEofAsEpsilon) { - if (trans.matches(Token.EOF, 0, Lexer.MAX_CHAR_VALUE)) { - cfg = new LexerATNConfig( { state:trans.target }, config); - } - } - } - return cfg; -}; - -// Evaluate a predicate specified in the lexer. -// -//

            If {@code speculative} is {@code true}, this method was called before -// {@link //consume} for the matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

            -// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -LexerATNSimulator.prototype.evaluatePredicate = function(input, ruleIndex, - predIndex, speculative) { - // assume true if no recognizer was provided - if (this.recog === null) { - return true; - } - if (!speculative) { - return this.recog.sempred(null, ruleIndex, predIndex); - } - var savedcolumn = this.column; - var savedLine = this.line; - var index = input.index; - var marker = input.mark(); - try { - this.consume(input); - return this.recog.sempred(null, ruleIndex, predIndex); - } finally { - this.column = savedcolumn; - this.line = savedLine; - input.seek(index); - input.release(marker); - } -}; - -LexerATNSimulator.prototype.captureSimState = function(settings, input, dfaState) { - settings.index = input.index; - settings.line = this.line; - settings.column = this.column; - settings.dfaState = dfaState; -}; - -LexerATNSimulator.prototype.addDFAEdge = function(from_, tk, to, cfgs) { - if (to === undefined) { - to = null; - } - if (cfgs === undefined) { - cfgs = null; - } - if (to === null && cfgs !== null) { - // leading to this call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes this edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to resynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - var suppressEdge = cfgs.hasSemanticContext; - cfgs.hasSemanticContext = false; - - to = this.addDFAState(cfgs); - - if (suppressEdge) { - return to; - } - } - // add the edge - if (tk < LexerATNSimulator.MIN_DFA_EDGE || tk > LexerATNSimulator.MAX_DFA_EDGE) { - // Only track edges within the DFA bounds - return to; - } - if (LexerATNSimulator.debug) { - console.log("EDGE " + from_ + " -> " + to + " upon " + tk); - } - if (from_.edges === null) { - // make room for tokens 1..n and -1 masquerading as index 0 - from_.edges = []; - } - from_.edges[tk - LexerATNSimulator.MIN_DFA_EDGE] = to; // connect - - return to; -}; - -// Add a new DFA state if there isn't one with this set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -LexerATNSimulator.prototype.addDFAState = function(configs) { - var proposed = new DFAState(null, configs); - var firstConfigWithRuleStopState = null; - for (var i = 0; i < configs.items.length; i++) { - var cfg = configs.items[i]; - if (cfg.state instanceof RuleStopState) { - firstConfigWithRuleStopState = cfg; - break; - } - } - if (firstConfigWithRuleStopState !== null) { - proposed.isAcceptState = true; - proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor; - proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]; - } - var dfa = this.decisionToDFA[this.mode]; - var existing = dfa.states.get(proposed); - if (existing!==null) { - return existing; - } - var newState = proposed; - newState.stateNumber = dfa.states.length; - configs.setReadonly(true); - newState.configs = configs; - dfa.states.add(newState); - return newState; -}; - -LexerATNSimulator.prototype.getDFA = function(mode) { - return this.decisionToDFA[mode]; -}; - -// Get the text matched so far for the current token. -LexerATNSimulator.prototype.getText = function(input) { - // index is first lookahead char, don't include. - return input.getText(this.startIndex, input.index - 1); -}; - -LexerATNSimulator.prototype.consume = function(input) { - var curChar = input.LA(1); - if (curChar === "\n".charCodeAt(0)) { - this.line += 1; - this.column = 0; - } else { - this.column += 1; - } - input.consume(); -}; - -LexerATNSimulator.prototype.getTokenName = function(tt) { - if (tt === -1) { - return "EOF"; - } else { - return "'" + String.fromCharCode(tt) + "'"; - } -}; - -exports.LexerATNSimulator = LexerATNSimulator; +module.exports = LexerATNSimulator; diff --git a/runtime/JavaScript/src/antlr4/atn/LexerAction.js b/runtime/JavaScript/src/antlr4/atn/LexerAction.js index ad5be437f..faf2ed92b 100644 --- a/runtime/JavaScript/src/antlr4/atn/LexerAction.js +++ b/runtime/JavaScript/src/antlr4/atn/LexerAction.js @@ -1,366 +1,384 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ - // -function LexerActionType() { +const LexerActionType = { + // The type of a {@link LexerChannelAction} action. + CHANNEL: 0, + // The type of a {@link LexerCustomAction} action + CUSTOM: 1, + // The type of a {@link LexerModeAction} action. + MODE: 2, + //The type of a {@link LexerMoreAction} action. + MORE: 3, + //The type of a {@link LexerPopModeAction} action. + POP_MODE: 4, + //The type of a {@link LexerPushModeAction} action. + PUSH_MODE: 5, + //The type of a {@link LexerSkipAction} action. + SKIP: 6, + //The type of a {@link LexerTypeAction} action. + TYPE: 7 } -LexerActionType.CHANNEL = 0; //The type of a {@link LexerChannelAction} action. -LexerActionType.CUSTOM = 1; //The type of a {@link LexerCustomAction} action. -LexerActionType.MODE = 2; //The type of a {@link LexerModeAction} action. -LexerActionType.MORE = 3; //The type of a {@link LexerMoreAction} action. -LexerActionType.POP_MODE = 4; //The type of a {@link LexerPopModeAction} action. -LexerActionType.PUSH_MODE = 5; //The type of a {@link LexerPushModeAction} action. -LexerActionType.SKIP = 6; //The type of a {@link LexerSkipAction} action. -LexerActionType.TYPE = 7; //The type of a {@link LexerTypeAction} action. +class LexerAction { + constructor(action) { + this.actionType = action; + this.isPositionDependent = false; + } -function LexerAction(action) { - this.actionType = action; - this.isPositionDependent = false; - return this; + hashCode() { + const hash = new Hash(); + this.updateHashCode(hash); + return hash.finish() + } + + updateHashCode(hash) { + hash.update(this.actionType); + } + + equals(other) { + return this === other; + } } -LexerAction.prototype.hashCode = function() { - var hash = new Hash(); - this.updateHashCode(hash); - return hash.finish() -}; -LexerAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType); -}; +/** + * Implements the {@code skip} lexer action by calling {@link Lexer//skip}. + * + *

            The {@code skip} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.

            + */ +class LexerSkipAction extends LexerAction { + constructor() { + super(LexerActionType.SKIP); + } -LexerAction.prototype.equals = function(other) { - return this === other; -}; + execute(lexer) { + lexer.skip(); + } - - -// -// Implements the {@code skip} lexer action by calling {@link Lexer//skip}. -// -//

            The {@code skip} command does not have any parameters, so this action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

            -function LexerSkipAction() { - LexerAction.call(this, LexerActionType.SKIP); - return this; + toString() { + return "skip"; + } } -LexerSkipAction.prototype = Object.create(LexerAction.prototype); -LexerSkipAction.prototype.constructor = LexerSkipAction; - // Provides a singleton instance of this parameterless lexer action. LexerSkipAction.INSTANCE = new LexerSkipAction(); -LexerSkipAction.prototype.execute = function(lexer) { - lexer.skip(); -}; - -LexerSkipAction.prototype.toString = function() { - return "skip"; -}; - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// with the assigned type. -function LexerTypeAction(type) { - LexerAction.call(this, LexerActionType.TYPE); - this.type = type; - return this; -} - -LexerTypeAction.prototype = Object.create(LexerAction.prototype); -LexerTypeAction.prototype.constructor = LexerTypeAction; - -LexerTypeAction.prototype.execute = function(lexer) { - lexer.type = this.type; -}; - -LexerTypeAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.type); -}; - - -LexerTypeAction.prototype.equals = function(other) { - if(this === other) { - return true; - } else if (! (other instanceof LexerTypeAction)) { - return false; - } else { - return this.type === other.type; +/** + * Implements the {@code type} lexer action by calling {@link Lexer//setType} + * with the assigned type + */ +class LexerTypeAction extends LexerAction { + constructor(type) { + super(LexerActionType.TYPE); + this.type = type; } -}; -LexerTypeAction.prototype.toString = function() { - return "type(" + this.type + ")"; -}; - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -function LexerPushModeAction(mode) { - LexerAction.call(this, LexerActionType.PUSH_MODE); - this.mode = mode; - return this; -} - -LexerPushModeAction.prototype = Object.create(LexerAction.prototype); -LexerPushModeAction.prototype.constructor = LexerPushModeAction; - -//

            This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.

            -LexerPushModeAction.prototype.execute = function(lexer) { - lexer.pushMode(this.mode); -}; - -LexerPushModeAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.mode); -}; - -LexerPushModeAction.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerPushModeAction)) { - return false; - } else { - return this.mode === other.mode; + execute(lexer) { + lexer.type = this.type; } -}; -LexerPushModeAction.prototype.toString = function() { - return "pushMode(" + this.mode + ")"; -}; + updateHashCode(hash) { + hash.update(this.actionType, this.type); + } + equals(other) { + if(this === other) { + return true; + } else if (! (other instanceof LexerTypeAction)) { + return false; + } else { + return this.type === other.type; + } + } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//

            The {@code popMode} command does not have any parameters, so this action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

            -function LexerPopModeAction() { - LexerAction.call(this,LexerActionType.POP_MODE); - return this; + toString() { + return "type(" + this.type + ")"; + } } -LexerPopModeAction.prototype = Object.create(LexerAction.prototype); -LexerPopModeAction.prototype.constructor = LexerPopModeAction; + +/** + * Implements the {@code pushMode} lexer action by calling + * {@link Lexer//pushMode} with the assigned mode + */ +class LexerPushModeAction extends LexerAction { + constructor(mode) { + super(LexerActionType.PUSH_MODE); + this.mode = mode; + } + + /** + *

            This action is implemented by calling {@link Lexer//pushMode} with the + * value provided by {@link //getMode}.

            + */ + execute(lexer) { + lexer.pushMode(this.mode); + } + + updateHashCode(hash) { + hash.update(this.actionType, this.mode); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerPushModeAction)) { + return false; + } else { + return this.mode === other.mode; + } + } + + toString() { + return "pushMode(" + this.mode + ")"; + } +} + +/** + * Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. + * + *

            The {@code popMode} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.

            + */ +class LexerPopModeAction extends LexerAction { + constructor() { + super(LexerActionType.POP_MODE); + } + + /** + *

            This action is implemented by calling {@link Lexer//popMode}.

            + */ + execute(lexer) { + lexer.popMode(); + } + + toString() { + return "popMode"; + } +} LexerPopModeAction.INSTANCE = new LexerPopModeAction(); -//

            This action is implemented by calling {@link Lexer//popMode}.

            -LexerPopModeAction.prototype.execute = function(lexer) { - lexer.popMode(); -}; +/** + * Implements the {@code more} lexer action by calling {@link Lexer//more}. + * + *

            The {@code more} command does not have any parameters, so this action is + * implemented as a singleton instance exposed by {@link //INSTANCE}.

            + */ +class LexerMoreAction extends LexerAction { + constructor() { + super(LexerActionType.MORE); + } -LexerPopModeAction.prototype.toString = function() { - return "popMode"; -}; + /** + *

            This action is implemented by calling {@link Lexer//popMode}.

            + */ + execute(lexer) { + lexer.more(); + } -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//

            The {@code more} command does not have any parameters, so this action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

            -function LexerMoreAction() { - LexerAction.call(this, LexerActionType.MORE); - return this; + toString() { + return "more"; + } } -LexerMoreAction.prototype = Object.create(LexerAction.prototype); -LexerMoreAction.prototype.constructor = LexerMoreAction; - LexerMoreAction.INSTANCE = new LexerMoreAction(); -//

            This action is implemented by calling {@link Lexer//popMode}.

            -LexerMoreAction.prototype.execute = function(lexer) { - lexer.more(); -}; -LexerMoreAction.prototype.toString = function() { - return "more"; -}; +/** + * Implements the {@code mode} lexer action by calling {@link Lexer//mode} with + * the assigned mode + */ +class LexerModeAction extends LexerAction { + constructor(mode) { + super(LexerActionType.MODE); + this.mode = mode; + } + /** + *

            This action is implemented by calling {@link Lexer//mode} with the + * value provided by {@link //getMode}.

            + */ + execute(lexer) { + lexer.mode(this.mode); + } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -function LexerModeAction(mode) { - LexerAction.call(this, LexerActionType.MODE); - this.mode = mode; - return this; + updateHashCode(hash) { + hash.update(this.actionType, this.mode); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerModeAction)) { + return false; + } else { + return this.mode === other.mode; + } + } + + toString() { + return "mode(" + this.mode + ")"; + } } -LexerModeAction.prototype = Object.create(LexerAction.prototype); -LexerModeAction.prototype.constructor = LexerModeAction; - -//

            This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.

            -LexerModeAction.prototype.execute = function(lexer) { - lexer.mode(this.mode); -}; - -LexerModeAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.mode); -}; - -LexerModeAction.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerModeAction)) { - return false; - } else { - return this.mode === other.mode; +/** + * Executes a custom lexer action by calling {@link Recognizer//action} with the + * rule and action indexes assigned to the custom action. The implementation of + * a custom action is added to the generated code for the lexer in an override + * of {@link Recognizer//action} when the grammar is compiled. + * + *

            This class may represent embedded actions created with the {...} + * syntax in ANTLR 4, as well as actions created for lexer commands where the + * command argument could not be evaluated when the grammar was compiled.

            + */ +class LexerCustomAction extends LexerAction { + /** + * Constructs a custom lexer action with the specified rule and action + * indexes. + * + * @param ruleIndex The rule index to use for calls to + * {@link Recognizer//action}. + * @param actionIndex The action index to use for calls to + * {@link Recognizer//action}. + */ + constructor(ruleIndex, actionIndex) { + super(LexerActionType.CUSTOM); + this.ruleIndex = ruleIndex; + this.actionIndex = actionIndex; + this.isPositionDependent = true; } -}; -LexerModeAction.prototype.toString = function() { - return "mode(" + this.mode + ")"; -}; + /** + *

            Custom actions are implemented by calling {@link Lexer//action} with the + * appropriate rule and action indexes.

            + */ + execute(lexer) { + lexer.action(null, this.ruleIndex, this.actionIndex); + } -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//

            This class may represent embedded actions created with the {...} -// syntax in ANTLR 4, as well as actions created for lexer commands where the -// command argument could not be evaluated when the grammar was compiled.

            + updateHashCode(hash) { + hash.update(this.actionType, this.ruleIndex, this.actionIndex); + } - - // Constructs a custom lexer action with the specified rule and action - // indexes. - // - // @param ruleIndex The rule index to use for calls to - // {@link Recognizer//action}. - // @param actionIndex The action index to use for calls to - // {@link Recognizer//action}. - -function LexerCustomAction(ruleIndex, actionIndex) { - LexerAction.call(this, LexerActionType.CUSTOM); - this.ruleIndex = ruleIndex; - this.actionIndex = actionIndex; - this.isPositionDependent = true; - return this; + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerCustomAction)) { + return false; + } else { + return this.ruleIndex === other.ruleIndex && this.actionIndex === other.actionIndex; + } + } } -LexerCustomAction.prototype = Object.create(LexerAction.prototype); -LexerCustomAction.prototype.constructor = LexerCustomAction; - -//

            Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.

            -LexerCustomAction.prototype.execute = function(lexer) { - lexer.action(null, this.ruleIndex, this.actionIndex); -}; - -LexerCustomAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.ruleIndex, this.actionIndex); -}; - -LexerCustomAction.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerCustomAction)) { - return false; - } else { - return this.ruleIndex === other.ruleIndex && this.actionIndex === other.actionIndex; +/** + * Implements the {@code channel} lexer action by calling + * {@link Lexer//setChannel} with the assigned channel. + * Constructs a new {@code channel} action with the specified channel value. + * @param channel The channel value to pass to {@link Lexer//setChannel} + */ +class LexerChannelAction extends LexerAction { + constructor(channel) { + super(LexerActionType.CHANNEL); + this.channel = channel; } -}; -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a new {@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -function LexerChannelAction(channel) { - LexerAction.call(this, LexerActionType.CHANNEL); - this.channel = channel; - return this; + /** + *

            This action is implemented by calling {@link Lexer//setChannel} with the + * value provided by {@link //getChannel}.

            + */ + execute(lexer) { + lexer._channel = this.channel; + } + + updateHashCode(hash) { + hash.update(this.actionType, this.channel); + } + + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerChannelAction)) { + return false; + } else { + return this.channel === other.channel; + } + } + + toString() { + return "channel(" + this.channel + ")"; + } } -LexerChannelAction.prototype = Object.create(LexerAction.prototype); -LexerChannelAction.prototype.constructor = LexerChannelAction; -//

            This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.

            -LexerChannelAction.prototype.execute = function(lexer) { - lexer._channel = this.channel; -}; - -LexerChannelAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.channel); -}; - -LexerChannelAction.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerChannelAction)) { - return false; - } else { - return this.channel === other.channel; +/** + * This implementation of {@link LexerAction} is used for tracking input offsets + * for position-dependent actions within a {@link LexerActionExecutor}. + * + *

            This action is not serialized as part of the ATN, and is only required for + * position-dependent lexer actions which appear at a location other than the + * end of a rule. For more information about DFA optimizations employed for + * lexer actions, see {@link LexerActionExecutor//append} and + * {@link LexerActionExecutor//fixOffsetBeforeMatch}.

            + * + * Constructs a new indexed custom action by associating a character offset + * with a {@link LexerAction}. + * + *

            Note: This class is only required for lexer actions for which + * {@link LexerAction//isPositionDependent} returns {@code true}.

            + * + * @param offset The offset into the input {@link CharStream}, relative to + * the token start index, at which the specified lexer action should be + * executed. + * @param action The lexer action to execute at a particular offset in the + * input {@link CharStream}. + */ +class LexerIndexedCustomAction extends LexerAction { + constructor(offset, action) { + super(action.actionType); + this.offset = offset; + this.action = action; + this.isPositionDependent = true; } -}; -LexerChannelAction.prototype.toString = function() { - return "channel(" + this.channel + ")"; -}; + /** + *

            This method calls {@link //execute} on the result of {@link //getAction} + * using the provided {@code lexer}.

            + */ + execute(lexer) { + // assume the input stream position was properly set by the calling code + this.action.execute(lexer); + } -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//

            This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

            + updateHashCode(hash) { + hash.update(this.actionType, this.offset, this.action); + } -// Constructs a new indexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

            Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

            -// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -function LexerIndexedCustomAction(offset, action) { - LexerAction.call(this, action.actionType); - this.offset = offset; - this.action = action; - this.isPositionDependent = true; - return this; + equals(other) { + if (this === other) { + return true; + } else if (! (other instanceof LexerIndexedCustomAction)) { + return false; + } else { + return this.offset === other.offset && this.action === other.action; + } + } } -LexerIndexedCustomAction.prototype = Object.create(LexerAction.prototype); -LexerIndexedCustomAction.prototype.constructor = LexerIndexedCustomAction; - -//

            This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.

            -LexerIndexedCustomAction.prototype.execute = function(lexer) { - // assume the input stream position was properly set by the calling code - this.action.execute(lexer); -}; - -LexerIndexedCustomAction.prototype.updateHashCode = function(hash) { - hash.update(this.actionType, this.offset, this.action); -}; - -LexerIndexedCustomAction.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (! (other instanceof LexerIndexedCustomAction)) { - return false; - } else { - return this.offset === other.offset && this.action === other.action; - } -}; - - -exports.LexerActionType = LexerActionType; -exports.LexerSkipAction = LexerSkipAction; -exports.LexerChannelAction = LexerChannelAction; -exports.LexerCustomAction = LexerCustomAction; -exports.LexerIndexedCustomAction = LexerIndexedCustomAction; -exports.LexerMoreAction = LexerMoreAction; -exports.LexerTypeAction = LexerTypeAction; -exports.LexerPushModeAction = LexerPushModeAction; -exports.LexerPopModeAction = LexerPopModeAction; -exports.LexerModeAction = LexerModeAction; \ No newline at end of file +module.exports = { + LexerActionType, + LexerSkipAction, + LexerChannelAction, + LexerCustomAction, + LexerIndexedCustomAction, + LexerMoreAction, + LexerTypeAction, + LexerPushModeAction, + LexerPopModeAction, + LexerModeAction +} diff --git a/runtime/JavaScript/src/antlr4/atn/LexerActionExecutor.js b/runtime/JavaScript/src/antlr4/atn/LexerActionExecutor.js index d037c85e5..95567d48b 100644 --- a/runtime/JavaScript/src/antlr4/atn/LexerActionExecutor.js +++ b/runtime/JavaScript/src/antlr4/atn/LexerActionExecutor.js @@ -1,166 +1,173 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// Represents an executor for a sequence of lexer actions which traversed during -// the matching operation of a lexer rule (token). -// -//

            The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

            +const {hashStuff} = require("../Utils"); +const {LexerIndexedCustomAction} = require('./LexerAction'); -var hashStuff = require("../Utils").hashStuff; -var LexerIndexedCustomAction = require('./LexerAction').LexerIndexedCustomAction; +class LexerActionExecutor { + /** + * Represents an executor for a sequence of lexer actions which traversed during + * the matching operation of a lexer rule (token). + * + *

            The executor tracks position information for position-dependent lexer actions + * efficiently, ensuring that actions appearing only at the end of the rule do + * not cause bloating of the {@link DFA} created for the lexer.

            + */ + constructor(lexerActions) { + this.lexerActions = lexerActions === null ? [] : lexerActions; + /** + * Caches the result of {@link //hashCode} since the hash code is an element + * of the performance-critical {@link LexerATNConfig//hashCode} operation + */ + this.cachedHashCode = hashStuff(lexerActions); // "".join([str(la) for la in + // lexerActions])) + return this; + } -function LexerActionExecutor(lexerActions) { - this.lexerActions = lexerActions === null ? [] : lexerActions; - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - this.cachedHashCode = hashStuff(lexerActions); // "".join([str(la) for la in - // lexerActions])) - return this; + /** + * Creates a {@link LexerActionExecutor} which encodes the current offset + * for position-dependent lexer actions. + * + *

            Normally, when the executor encounters lexer actions where + * {@link LexerAction//isPositionDependent} returns {@code true}, it calls + * {@link IntStream//seek} on the input {@link CharStream} to set the input + * position to the end of the current token. This behavior provides + * for efficient DFA representation of lexer actions which appear at the end + * of a lexer rule, even when the lexer rule matches a variable number of + * characters.

            + * + *

            Prior to traversing a match transition in the ATN, the current offset + * from the token start index is assigned to all position-dependent lexer + * actions which have not already been assigned a fixed offset. By storing + * the offsets relative to the token start index, the DFA representation of + * lexer actions which appear in the middle of tokens remains efficient due + * to sharing among tokens of the same length, regardless of their absolute + * position in the input stream.

            + * + *

            If the current executor already has offsets assigned to all + * position-dependent lexer actions, the method returns {@code this}.

            + * + * @param offset The current offset to assign to all position-dependent + * lexer actions which do not already have offsets assigned. + * + * @return {LexerActionExecutor} A {@link LexerActionExecutor} which stores input stream offsets + * for all position-dependent lexer actions. + */ + fixOffsetBeforeMatch(offset) { + let updatedLexerActions = null; + for (let i = 0; i < this.lexerActions.length; i++) { + if (this.lexerActions[i].isPositionDependent && + !(this.lexerActions[i] instanceof LexerIndexedCustomAction)) { + if (updatedLexerActions === null) { + updatedLexerActions = this.lexerActions.concat([]); + } + updatedLexerActions[i] = new LexerIndexedCustomAction(offset, + this.lexerActions[i]); + } + } + if (updatedLexerActions === null) { + return this; + } else { + return new LexerActionExecutor(updatedLexerActions); + } + } + + /** + * Execute the actions encapsulated by this executor within the context of a + * particular {@link Lexer}. + * + *

            This method calls {@link IntStream//seek} to set the position of the + * {@code input} {@link CharStream} prior to calling + * {@link LexerAction//execute} on a position-dependent action. Before the + * method returns, the input position will be restored to the same position + * it was in when the method was invoked.

            + * + * @param lexer The lexer instance. + * @param input The input stream which is the source for the current token. + * When this method is called, the current {@link IntStream//index} for + * {@code input} should be the start of the following token, i.e. 1 + * character past the end of the current token. + * @param startIndex The token start index. This value may be passed to + * {@link IntStream//seek} to set the {@code input} position to the beginning + * of the token. + */ + execute(lexer, input, startIndex) { + let requiresSeek = false; + const stopIndex = input.index; + try { + for (let i = 0; i < this.lexerActions.length; i++) { + let lexerAction = this.lexerActions[i]; + if (lexerAction instanceof LexerIndexedCustomAction) { + const offset = lexerAction.offset; + input.seek(startIndex + offset); + lexerAction = lexerAction.action; + requiresSeek = (startIndex + offset) !== stopIndex; + } else if (lexerAction.isPositionDependent) { + input.seek(stopIndex); + requiresSeek = false; + } + lexerAction.execute(lexer); + } + } finally { + if (requiresSeek) { + input.seek(stopIndex); + } + } + } + + hashCode() { + return this.cachedHashCode; + } + + updateHashCode(hash) { + hash.update(this.cachedHashCode); + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof LexerActionExecutor)) { + return false; + } else if (this.cachedHashCode != other.cachedHashCode) { + return false; + } else if (this.lexerActions.length != other.lexerActions.length) { + return false; + } else { + const numActions = this.lexerActions.length + for (let idx = 0; idx < numActions; ++idx) { + if (!this.lexerActions[idx].equals(other.lexerActions[idx])) { + return false; + } + } + return true; + } + } + + /** + * Creates a {@link LexerActionExecutor} which executes the actions for + * the input {@code lexerActionExecutor} followed by a specified + * {@code lexerAction}. + * + * @param lexerActionExecutor The executor for actions already traversed by + * the lexer while matching a token within a particular + * {@link LexerATNConfig}. If this is {@code null}, the method behaves as + * though it were an empty executor. + * @param lexerAction The lexer action to execute after the actions + * specified in {@code lexerActionExecutor}. + * + * @return {LexerActionExecutor} A {@link LexerActionExecutor} for executing the combine actions + * of {@code lexerActionExecutor} and {@code lexerAction}. + */ + static append(lexerActionExecutor, lexerAction) { + if (lexerActionExecutor === null) { + return new LexerActionExecutor([ lexerAction ]); + } + const lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]); + return new LexerActionExecutor(lexerActions); + } } -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while matching a token within a particular -// {@link LexerATNConfig}. If this is {@code null}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -LexerActionExecutor.append = function(lexerActionExecutor, lexerAction) { - if (lexerActionExecutor === null) { - return new LexerActionExecutor([ lexerAction ]); - } - var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]); - return new LexerActionExecutor(lexerActions); -}; -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

            Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule matches a variable number of -// characters.

            -// -//

            Prior to traversing a match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

            -// -//

            If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

            -// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -LexerActionExecutor.prototype.fixOffsetBeforeMatch = function(offset) { - var updatedLexerActions = null; - for (var i = 0; i < this.lexerActions.length; i++) { - if (this.lexerActions[i].isPositionDependent && - !(this.lexerActions[i] instanceof LexerIndexedCustomAction)) { - if (updatedLexerActions === null) { - updatedLexerActions = this.lexerActions.concat([]); - } - updatedLexerActions[i] = new LexerIndexedCustomAction(offset, - this.lexerActions[i]); - } - } - if (updatedLexerActions === null) { - return this; - } else { - return new LexerActionExecutor(updatedLexerActions); - } -}; - -// Execute the actions encapsulated by this executor within the context of a -// particular {@link Lexer}. -// -//

            This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

            -// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When this method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -LexerActionExecutor.prototype.execute = function(lexer, input, startIndex) { - var requiresSeek = false; - var stopIndex = input.index; - try { - for (var i = 0; i < this.lexerActions.length; i++) { - var lexerAction = this.lexerActions[i]; - if (lexerAction instanceof LexerIndexedCustomAction) { - var offset = lexerAction.offset; - input.seek(startIndex + offset); - lexerAction = lexerAction.action; - requiresSeek = (startIndex + offset) !== stopIndex; - } else if (lexerAction.isPositionDependent) { - input.seek(stopIndex); - requiresSeek = false; - } - lexerAction.execute(lexer); - } - } finally { - if (requiresSeek) { - input.seek(stopIndex); - } - } -}; - -LexerActionExecutor.prototype.hashCode = function() { - return this.cachedHashCode; -}; - -LexerActionExecutor.prototype.updateHashCode = function(hash) { - hash.update(this.cachedHashCode); -}; - - -LexerActionExecutor.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof LexerActionExecutor)) { - return false; - } else if (this.cachedHashCode != other.cachedHashCode) { - return false; - } else if (this.lexerActions.length != other.lexerActions.length) { - return false; - } else { - var numActions = this.lexerActions.length - for (var idx = 0; idx < numActions; ++idx) { - if (!this.lexerActions[idx].equals(other.lexerActions[idx])) { - return false; - } - } - return true; - } -}; - -exports.LexerActionExecutor = LexerActionExecutor; +module.exports = LexerActionExecutor; diff --git a/runtime/JavaScript/src/antlr4/atn/ParserATNSimulator.js b/runtime/JavaScript/src/antlr4/atn/ParserATNSimulator.js index e910ce045..6eb24a171 100644 --- a/runtime/JavaScript/src/antlr4/atn/ParserATNSimulator.js +++ b/runtime/JavaScript/src/antlr4/atn/ParserATNSimulator.js @@ -1,1192 +1,1183 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// -// The embodiment of the adaptive LL(*), ALL(*), parsing strategy. -// -//

            -// The basic complexity of the adaptive strategy makes it harder to understand. -// We begin with ATN simulation to build paths in a DFA. Subsequent prediction -// requests go through the DFA first. If they reach a state without an edge for -// the current symbol, the algorithm fails over to the ATN simulation to -// complete the DFA path for the current input (until it finds a conflict state -// or uniquely predicting state).

            -// -//

            -// All of that is done without using the outer context because we want to create -// a DFA that is not dependent upon the rule invocation stack when we do a -// prediction. One DFA works in all contexts. We avoid using context not -// necessarily because it's slower, although it can be, but because of the DFA -// caching problem. The closure routine only considers the rule invocation stack -// created during prediction beginning in the decision rule. For example, if -// prediction occurs without invoking another rule's ATN, there are no context -// stacks in the configurations. When lack of context leads to a conflict, we -// don't know if it's an ambiguity or a weakness in the strong LL(*) parsing -// strategy (versus full LL(*)).

            -// -//

            -// When SLL yields a configuration set with conflict, we rewind the input and -// retry the ATN simulation, this time using full outer context without adding -// to the DFA. Configuration context stacks will be the full invocation stacks -// from the start rule. If we get a conflict using full context, then we can -// definitively say we have a true ambiguity for that input sequence. If we -// don't get a conflict, it implies that the decision is sensitive to the outer -// context. (It is not context-sensitive in the sense of context-sensitive -// grammars.)

            -// -//

            -// The next time we reach this DFA state with an SLL conflict, through DFA -// simulation, we will again retry the ATN simulation using full context mode. -// This is slow because we can't save the results and have to "interpret" the -// ATN each time we get that input.

            -// -//

            -// CACHING FULL CONTEXT PREDICTIONS

            -// -//

            -// We could cache results from full context to predicted alternative easily and -// that saves a lot of time but doesn't work in presence of predicates. The set -// of visible predicates from the ATN start state changes depending on the -// context, because closure can fall off the end of a rule. I tried to cache -// tuples (stack context, semantic context, predicted alt) but it was slower -// than interpreting and much more complicated. Also required a huge amount of -// memory. The goal is not to create the world's fastest parser anyway. I'd like -// to keep this algorithm simple. By launching multiple threads, we can improve -// the speed of parsing across a large number of files.

            -// -//

            -// There is no strict ordering between the amount of input used by SLL vs LL, -// which makes it really hard to build a cache for full context. Let's say that -// we have input A B C that leads to an SLL conflict with full context X. That -// implies that using X we might only use A B but we could also use A B C D to -// resolve conflict. Input A B C D could predict alternative 1 in one position -// in the input and A B C E could predict alternative 2 in another position in -// input. The conflicting SLL configurations could still be non-unique in the -// full context prediction, which would lead us to requiring more input than the -// original A B C. To make a prediction cache work, we have to track the exact -// input used during the previous prediction. That amounts to a cache that maps -// X to a specific DFA for that context.

            -// -//

            -// Something should be done for left-recursive expression predictions. They are -// likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry -// with full LL thing Sam does.

            -// -//

            -// AVOIDING FULL CONTEXT PREDICTION

            -// -//

            -// We avoid doing full context retry when the outer context is empty, we did not -// dip into the outer context by falling off the end of the decision state rule, -// or when we force SLL mode.

            -// -//

            -// As an example of the not dip into outer context case, consider as super -// constructor calls versus function calls. One grammar might look like -// this:

            -// -//
            -// ctorBody
            -//   : '{' superCall? stat* '}'
            -//   ;
            -// 
            -// -//

            -// Or, you might see something like

            -// -//
            -// stat
            -//   : superCall ';'
            -//   | expression ';'
            -//   | ...
            -//   ;
            -// 
            -// -//

            -// In both cases I believe that no closure operations will dip into the outer -// context. In the first case ctorBody in the worst case will stop at the '}'. -// In the 2nd case it should stop at the ';'. Both cases should stay within the -// entry rule and not dip into the outer context.

            -// -//

            -// PREDICATES

            -// -//

            -// Predicates are always evaluated if present in either SLL or LL both. SLL and -// LL simulation deals with predicates differently. SLL collects predicates as -// it performs closure operations like ANTLR v3 did. It delays predicate -// evaluation until it reaches and accept state. This allows us to cache the SLL -// ATN simulation whereas, if we had evaluated predicates on-the-fly during -// closure, the DFA state configuration sets would be different and we couldn't -// build up a suitable DFA.

            -// -//

            -// When building a DFA accept state during ATN simulation, we evaluate any -// predicates and return the sole semantically valid alternative. If there is -// more than 1 alternative, we report an ambiguity. If there are 0 alternatives, -// we throw an exception. Alternatives without predicates act like they have -// true predicates. The simple way to think about it is to strip away all -// alternatives with false predicates and choose the minimum alternative that -// remains.

            -// -//

            -// When we start in the DFA and reach an accept state that's predicated, we test -// those and return the minimum semantically viable alternative. If no -// alternatives are viable, we throw an exception.

            -// -//

            -// During full LL ATN simulation, closure always evaluates predicates and -// on-the-fly. This is crucial to reducing the configuration set size during -// closure. It hits a landmine when parsing with the Java grammar, for example, -// without this on-the-fly evaluation.

            -// -//

            -// SHARING DFA

            -// -//

            -// All instances of the same parser share the same decision DFAs through a -// static field. Each instance gets its own ATN simulator but they share the -// same {@link //decisionToDFA} field. They also share a -// {@link PredictionContextCache} object that makes sure that all -// {@link PredictionContext} objects are shared among the DFA states. This makes -// a big size difference.

            -// -//

            -// THREAD SAFETY

            -// -//

            -// The {@link ParserATNSimulator} locks on the {@link //decisionToDFA} field when -// it adds a new DFA object to that array. {@link //addDFAEdge} -// locks on the DFA for the current decision when setting the -// {@link DFAState//edges} field. {@link //addDFAState} locks on -// the DFA for the current decision when looking up a DFA state to see if it -// already exists. We must make sure that all requests to add DFA states that -// are equivalent result in the same shared DFA object. This is because lots of -// threads will be trying to update the DFA at once. The -// {@link //addDFAState} method also locks inside the DFA lock -// but this time on the shared context cache when it rebuilds the -// configurations' {@link PredictionContext} objects using cached -// subgraphs/nodes. No other locking occurs, even during DFA simulation. This is -// safe as long as we can guarantee that all threads referencing -// {@code s.edge[t]} get the same physical target {@link DFAState}, or -// {@code null}. Once into the DFA, the DFA simulation does not reference the -// {@link DFA//states} map. It follows the {@link DFAState//edges} field to new -// targets. The DFA simulator will either find {@link DFAState//edges} to be -// {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or -// {@code dfa.edges[t]} to be non-null. The -// {@link //addDFAEdge} method could be racing to set the field -// but in either case the DFA simulator works; if {@code null}, and requests ATN -// simulation. It could also race trying to get {@code dfa.edges[t]}, but either -// way it will work because it's not doing a test and set operation.

            -// -//

            -// Starting with SLL then failing to combined SLL/LL (Two-Stage -// Parsing)

            -// -//

            -// Sam pointed out that if SLL does not give a syntax error, then there is no -// point in doing full LL, which is slower. We only have to try LL if we get a -// syntax error. For maximum speed, Sam starts the parser set to pure SLL -// mode with the {@link BailErrorStrategy}:

            -// -//
            -// parser.{@link Parser//getInterpreter() getInterpreter()}.{@link //setPredictionMode setPredictionMode}{@code (}{@link PredictionMode//SLL}{@code )};
            -// parser.{@link Parser//setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
            -// 
            -// -//

            -// If it does not get a syntax error, then we're done. If it does get a syntax -// error, we need to retry with the combined SLL/LL strategy.

            -// -//

            -// The reason this works is as follows. If there are no SLL conflicts, then the -// grammar is SLL (at least for that input set). If there is an SLL conflict, -// the full LL analysis must yield a set of viable alternatives which is a -// subset of the alternatives reported by SLL. If the LL set is a singleton, -// then the grammar is LL but not SLL. If the LL set is the same size as the SLL -// set, the decision is SLL. If the LL set has size > 1, then that decision -// is truly ambiguous on the current input. If the LL set is smaller, then the -// SLL conflict resolution might choose an alternative that the full LL would -// rule out as a possibility based upon better context information. If that's -// the case, then the SLL parse will definitely get an error because the full LL -// analysis says it's not viable. If SLL conflict resolution chooses an -// alternative within the LL set, them both SLL and LL would choose the same -// alternative because they both choose the minimum of multiple conflicting -// alternatives.

            -// -//

            -// Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and -// a smaller LL set called s. If s is {@code {2, 3}}, then SLL -// parsing will get an error because SLL will pursue alternative 1. If -// s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will -// choose the same alternative because alternative one is the minimum of either -// set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax -// error. If s is {@code {1}} then SLL will succeed.

            -// -//

            -// Of course, if the input is invalid, then we will get an error for sure in -// both SLL and LL parsing. Erroneous input will therefore require 2 passes over -// the input.

            -// +const Utils = require('./../Utils'); +const {Set, BitSet, DoubleDict} = Utils; -var Utils = require('./../Utils'); -var Set = Utils.Set; -var BitSet = Utils.BitSet; -var DoubleDict = Utils.DoubleDict; -var ATN = require('./ATN').ATN; -var ATNState = require('./ATNState').ATNState; -var ATNConfig = require('./ATNConfig').ATNConfig; -var ATNConfigSet = require('./ATNConfigSet').ATNConfigSet; -var Token = require('./../Token').Token; -var DFAState = require('./../dfa/DFAState').DFAState; -var PredPrediction = require('./../dfa/DFAState').PredPrediction; -var ATNSimulator = require('./ATNSimulator').ATNSimulator; -var PredictionMode = require('./PredictionMode').PredictionMode; -var RuleContext = require('./../RuleContext').RuleContext; -var ParserRuleContext = require('./../ParserRuleContext').ParserRuleContext; -var SemanticContext = require('./SemanticContext').SemanticContext; -var StarLoopEntryState = require('./ATNState').StarLoopEntryState; -var RuleStopState = require('./ATNState').RuleStopState; -var PredictionContext = require('./../PredictionContext').PredictionContext; -var Interval = require('./../IntervalSet').Interval; -var Transitions = require('./Transition'); -var Transition = Transitions.Transition; -var SetTransition = Transitions.SetTransition; -var NotSetTransition = Transitions.NotSetTransition; -var RuleTransition = Transitions.RuleTransition; -var ActionTransition = Transitions.ActionTransition; -var NoViableAltException = require('./../error/Errors').NoViableAltException; +const ATN = require('./ATN'); +const {ATNState, RuleStopState} = require('./ATNState'); -var SingletonPredictionContext = require('./../PredictionContext').SingletonPredictionContext; -var predictionContextFromRuleContext = require('./../PredictionContext').predictionContextFromRuleContext; - -function ParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache) { - ATNSimulator.call(this, atn, sharedContextCache); - this.parser = parser; - this.decisionToDFA = decisionToDFA; - // SLL, LL, or LL + exact ambig detection?// - this.predictionMode = PredictionMode.LL; - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - this._input = null; - this._startIndex = 0; - this._outerContext = null; - this._dfa = null; - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)→c. We can avoid - // the merge if we ever see a and b again. Note that (b,a)→c should - // also be examined during cache lookup. - // - this.mergeCache = null; - return this; -} - -ParserATNSimulator.prototype = Object.create(ATNSimulator.prototype); -ParserATNSimulator.prototype.constructor = ParserATNSimulator; - -ParserATNSimulator.prototype.debug = false; -ParserATNSimulator.prototype.debug_closure = false; -ParserATNSimulator.prototype.debug_add = false; -ParserATNSimulator.prototype.debug_list_atn_decisions = false; -ParserATNSimulator.prototype.dfa_debug = false; -ParserATNSimulator.prototype.retry_debug = false; +const {ATNConfig} = require('./ATNConfig'); +const {ATNConfigSet} = require('./ATNConfigSet'); +const {Token} = require('./../Token'); +const {DFAState, PredPrediction} = require('./../dfa/DFAState'); +const ATNSimulator = require('./ATNSimulator'); +const PredictionMode = require('./PredictionMode'); +const RuleContext = require('./../RuleContext'); +const ParserRuleContext = require('./../ParserRuleContext'); +const {SemanticContext} = require('./SemanticContext'); +const {PredictionContext} = require('./../PredictionContext'); +const {Interval} = require('./../IntervalSet'); +const {Transition, SetTransition, NotSetTransition, RuleTransition, ActionTransition} = require('./Transition'); +const {NoViableAltException} = require('./../error/Errors'); +const {SingletonPredictionContext, predictionContextFromRuleContext} = require('./../PredictionContext'); -ParserATNSimulator.prototype.reset = function() { -}; - -ParserATNSimulator.prototype.adaptivePredict = function(input, decision, outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("adaptivePredict decision " + decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + - input.LT(1).column); - } - this._input = input; - this._startIndex = input.index; - this._outerContext = outerContext; - - var dfa = this.decisionToDFA[decision]; - this._dfa = dfa; - var m = input.mark(); - var index = input.index; - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - try { - var s0; - if (dfa.precedenceDfa) { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()); - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0; - } - if (s0===null) { - if (outerContext===null) { - outerContext = RuleContext.EMPTY; - } - if (this.debug || this.debug_list_atn_decisions) { - console.log("predictATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - ", outerContext=" + outerContext.toString(this.parser.ruleNames)); - } - - var fullCtx = false; - var s0_closure = this.computeStartState(dfa.atnStartState, RuleContext.EMPTY, fullCtx); - - if( dfa.precedenceDfa) { - // If this is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0_closure; // not used for prediction but useful to know start configs anyway - s0_closure = this.applyPrecedenceFilter(s0_closure); - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0); - } else { - s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); - dfa.s0 = s0; - } - } - var alt = this.execATN(dfa, s0, input, index, outerContext); - if (this.debug) { - console.log("DFA after predictATN: " + dfa.toString(this.parser.literalNames)); - } - return alt; - } finally { +/** + * The embodiment of the adaptive LL(*), ALL(*), parsing strategy. + * + *

            + * The basic complexity of the adaptive strategy makes it harder to understand. + * We begin with ATN simulation to build paths in a DFA. Subsequent prediction + * requests go through the DFA first. If they reach a state without an edge for + * the current symbol, the algorithm fails over to the ATN simulation to + * complete the DFA path for the current input (until it finds a conflict state + * or uniquely predicting state).

            + * + *

            + * All of that is done without using the outer context because we want to create + * a DFA that is not dependent upon the rule invocation stack when we do a + * prediction. One DFA works in all contexts. We avoid using context not + * necessarily because it's slower, although it can be, but because of the DFA + * caching problem. The closure routine only considers the rule invocation stack + * created during prediction beginning in the decision rule. For example, if + * prediction occurs without invoking another rule's ATN, there are no context + * stacks in the configurations. When lack of context leads to a conflict, we + * don't know if it's an ambiguity or a weakness in the strong LL(*) parsing + * strategy (versus full LL(*)).

            + * + *

            + * When SLL yields a configuration set with conflict, we rewind the input and + * retry the ATN simulation, this time using full outer context without adding + * to the DFA. Configuration context stacks will be the full invocation stacks + * from the start rule. If we get a conflict using full context, then we can + * definitively say we have a true ambiguity for that input sequence. If we + * don't get a conflict, it implies that the decision is sensitive to the outer + * context. (It is not context-sensitive in the sense of context-sensitive + * grammars.)

            + * + *

            + * The next time we reach this DFA state with an SLL conflict, through DFA + * simulation, we will again retry the ATN simulation using full context mode. + * This is slow because we can't save the results and have to "interpret" the + * ATN each time we get that input.

            + * + *

            + * CACHING FULL CONTEXT PREDICTIONS

            + * + *

            + * We could cache results from full context to predicted alternative easily and + * that saves a lot of time but doesn't work in presence of predicates. The set + * of visible predicates from the ATN start state changes depending on the + * context, because closure can fall off the end of a rule. I tried to cache + * tuples (stack context, semantic context, predicted alt) but it was slower + * than interpreting and much more complicated. Also required a huge amount of + * memory. The goal is not to create the world's fastest parser anyway. I'd like + * to keep this algorithm simple. By launching multiple threads, we can improve + * the speed of parsing across a large number of files.

            + * + *

            + * There is no strict ordering between the amount of input used by SLL vs LL, + * which makes it really hard to build a cache for full context. Let's say that + * we have input A B C that leads to an SLL conflict with full context X. That + * implies that using X we might only use A B but we could also use A B C D to + * resolve conflict. Input A B C D could predict alternative 1 in one position + * in the input and A B C E could predict alternative 2 in another position in + * input. The conflicting SLL configurations could still be non-unique in the + * full context prediction, which would lead us to requiring more input than the + * original A B C. To make a prediction cache work, we have to track the exact + * input used during the previous prediction. That amounts to a cache that maps + * X to a specific DFA for that context.

            + * + *

            + * Something should be done for left-recursive expression predictions. They are + * likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry + * with full LL thing Sam does.

            + * + *

            + * AVOIDING FULL CONTEXT PREDICTION

            + * + *

            + * We avoid doing full context retry when the outer context is empty, we did not + * dip into the outer context by falling off the end of the decision state rule, + * or when we force SLL mode.

            + * + *

            + * As an example of the not dip into outer context case, consider as super + * constructor calls versus function calls. One grammar might look like + * this:

            + * + *
            + * ctorBody
            + *   : '{' superCall? stat* '}'
            + *   ;
            + * 
            + * + *

            + * Or, you might see something like

            + * + *
            + * stat
            + *   : superCall ';'
            + *   | expression ';'
            + *   | ...
            + *   ;
            + * 
            + * + *

            + * In both cases I believe that no closure operations will dip into the outer + * context. In the first case ctorBody in the worst case will stop at the '}'. + * In the 2nd case it should stop at the ';'. Both cases should stay within the + * entry rule and not dip into the outer context.

            + * + *

            + * PREDICATES

            + * + *

            + * Predicates are always evaluated if present in either SLL or LL both. SLL and + * LL simulation deals with predicates differently. SLL collects predicates as + * it performs closure operations like ANTLR v3 did. It delays predicate + * evaluation until it reaches and accept state. This allows us to cache the SLL + * ATN simulation whereas, if we had evaluated predicates on-the-fly during + * closure, the DFA state configuration sets would be different and we couldn't + * build up a suitable DFA.

            + * + *

            + * When building a DFA accept state during ATN simulation, we evaluate any + * predicates and return the sole semantically valid alternative. If there is + * more than 1 alternative, we report an ambiguity. If there are 0 alternatives, + * we throw an exception. Alternatives without predicates act like they have + * true predicates. The simple way to think about it is to strip away all + * alternatives with false predicates and choose the minimum alternative that + * remains.

            + * + *

            + * When we start in the DFA and reach an accept state that's predicated, we test + * those and return the minimum semantically viable alternative. If no + * alternatives are viable, we throw an exception.

            + * + *

            + * During full LL ATN simulation, closure always evaluates predicates and + * on-the-fly. This is crucial to reducing the configuration set size during + * closure. It hits a landmine when parsing with the Java grammar, for example, + * without this on-the-fly evaluation.

            + * + *

            + * SHARING DFA

            + * + *

            + * All instances of the same parser share the same decision DFAs through a + * static field. Each instance gets its own ATN simulator but they share the + * same {@link //decisionToDFA} field. They also share a + * {@link PredictionContextCache} object that makes sure that all + * {@link PredictionContext} objects are shared among the DFA states. This makes + * a big size difference.

            + * + *

            + * THREAD SAFETY

            + * + *

            + * The {@link ParserATNSimulator} locks on the {@link //decisionToDFA} field when + * it adds a new DFA object to that array. {@link //addDFAEdge} + * locks on the DFA for the current decision when setting the + * {@link DFAState//edges} field. {@link //addDFAState} locks on + * the DFA for the current decision when looking up a DFA state to see if it + * already exists. We must make sure that all requests to add DFA states that + * are equivalent result in the same shared DFA object. This is because lots of + * threads will be trying to update the DFA at once. The + * {@link //addDFAState} method also locks inside the DFA lock + * but this time on the shared context cache when it rebuilds the + * configurations' {@link PredictionContext} objects using cached + * subgraphs/nodes. No other locking occurs, even during DFA simulation. This is + * safe as long as we can guarantee that all threads referencing + * {@code s.edge[t]} get the same physical target {@link DFAState}, or + * {@code null}. Once into the DFA, the DFA simulation does not reference the + * {@link DFA//states} map. It follows the {@link DFAState//edges} field to new + * targets. The DFA simulator will either find {@link DFAState//edges} to be + * {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or + * {@code dfa.edges[t]} to be non-null. The + * {@link //addDFAEdge} method could be racing to set the field + * but in either case the DFA simulator works; if {@code null}, and requests ATN + * simulation. It could also race trying to get {@code dfa.edges[t]}, but either + * way it will work because it's not doing a test and set operation.

            + * + *

            + * Starting with SLL then failing to combined SLL/LL (Two-Stage + * Parsing)

            + * + *

            + * Sam pointed out that if SLL does not give a syntax error, then there is no + * point in doing full LL, which is slower. We only have to try LL if we get a + * syntax error. For maximum speed, Sam starts the parser set to pure SLL + * mode with the {@link BailErrorStrategy}:

            + * + *
            + * parser.{@link Parser//getInterpreter() getInterpreter()}.{@link //setPredictionMode setPredictionMode}{@code (}{@link PredictionMode//SLL}{@code )};
            + * parser.{@link Parser//setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
            + * 
            + * + *

            + * If it does not get a syntax error, then we're done. If it does get a syntax + * error, we need to retry with the combined SLL/LL strategy.

            + * + *

            + * The reason this works is as follows. If there are no SLL conflicts, then the + * grammar is SLL (at least for that input set). If there is an SLL conflict, + * the full LL analysis must yield a set of viable alternatives which is a + * subset of the alternatives reported by SLL. If the LL set is a singleton, + * then the grammar is LL but not SLL. If the LL set is the same size as the SLL + * set, the decision is SLL. If the LL set has size > 1, then that decision + * is truly ambiguous on the current input. If the LL set is smaller, then the + * SLL conflict resolution might choose an alternative that the full LL would + * rule out as a possibility based upon better context information. If that's + * the case, then the SLL parse will definitely get an error because the full LL + * analysis says it's not viable. If SLL conflict resolution chooses an + * alternative within the LL set, them both SLL and LL would choose the same + * alternative because they both choose the minimum of multiple conflicting + * alternatives.

            + * + *

            + * Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and + * a smaller LL set called s. If s is {@code {2, 3}}, then SLL + * parsing will get an error because SLL will pursue alternative 1. If + * s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will + * choose the same alternative because alternative one is the minimum of either + * set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax + * error. If s is {@code {1}} then SLL will succeed.

            + * + *

            + * Of course, if the input is invalid, then we will get an error for sure in + * both SLL and LL parsing. Erroneous input will therefore require 2 passes over + * the input.

            + */ +class ParserATNSimulator extends ATNSimulator { + constructor(parser, atn, decisionToDFA, sharedContextCache) { + super(atn, sharedContextCache); + this.parser = parser; + this.decisionToDFA = decisionToDFA; + // SLL, LL, or LL + exact ambig detection?// + this.predictionMode = PredictionMode.LL; + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + this._input = null; + this._startIndex = 0; + this._outerContext = null; this._dfa = null; - this.mergeCache = null; // wack cache after each prediction - input.seek(index); - input.release(m); + /** + * Each prediction operation uses a cache for merge of prediction contexts. + * Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + * isn't synchronized but we're ok since two threads shouldn't reuse same + * parser/atnsim object because it can only handle one input at a time. + * This maps graphs a and b to merged result c. (a,b)→c. We can avoid + * the merge if we ever see a and b again. Note that (b,a)→c should + * also be examined during cache lookup. + */ + this.mergeCache = null; + this.debug = false; + this.debug_closure = false; + this.debug_add = false; + this.debug_list_atn_decisions = false; + this.dfa_debug = false; + this.retry_debug = false; } -}; -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): - // if the set is empty, there is no viable alternative for current symbol - // does the state uniquely predict an alternative? - // does the state have a conflict that would prevent us from - // putting it on the work list? + reset() {} -// We also have some key operations to do: - // add an edge from previous DFA state to potentially new DFA state, D, - // upon current symbol but only if adding to work list, which means in all - // cases except no viable alternative (and possibly non-greedy decisions?) - // collecting predicates and adding semantic context to DFA accept states - // adding rule context to context-sensitive DFA accept states - // consuming an input symbol - // reporting a conflict - // reporting an ambiguity - // reporting a context sensitivity - // reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -ParserATNSimulator.prototype.execATN = function(dfa, s0, input, startIndex, outerContext ) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATN decision " + dfa.decision + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + input.LT(1).line + ":" + input.LT(1).column); - } - var alt; - var previousD = s0; - - if (this.debug) { - console.log("s0 = " + s0); - } - var t = input.LA(1); - while(true) { // while more work - var D = this.getExistingTargetState(previousD, t); - if(D===null) { - D = this.computeTargetState(dfa, previousD, t); + adaptivePredict(input, decision, outerContext) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("adaptivePredict decision " + decision + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + input.LT(1).line + ":" + + input.LT(1).column); } - if(D===ATNSimulator.ERROR) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - var e = this.noViableAlt(input, outerContext, previousD.configs, startIndex); - input.seek(startIndex); - alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; + this._input = input; + this._startIndex = input.index; + this._outerContext = outerContext; + + const dfa = this.decisionToDFA[decision]; + this._dfa = dfa; + const m = input.mark(); + const index = input.index; + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + try { + let s0; + if (dfa.precedenceDfa) { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()); } else { - throw e; + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0; } - } - if(D.requiresFullContext && this.predictionMode !== PredictionMode.SLL) { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - var conflictingAlts = null; - if (D.predicates!==null) { - if (this.debug) { - console.log("DFA state has preds in DFA sim LL failover"); + if (s0===null) { + if (outerContext===null) { + outerContext = RuleContext.EMPTY; } - var conflictIndex = input.index; - if(conflictIndex !== startIndex) { - input.seek(startIndex); + if (this.debug || this.debug_list_atn_decisions) { + console.log("predictATN decision " + dfa.decision + + " exec LA(1)==" + this.getLookaheadName(input) + + ", outerContext=" + outerContext.toString(this.parser.ruleNames)); } - conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true); - if (conflictingAlts.length===1) { - if(this.debug) { - console.log("Full LL avoided"); - } - return conflictingAlts.minValue(); - } - if (conflictIndex !== startIndex) { - // restore the index so reporting the fallback to full - // context occurs with the index at the correct spot - input.seek(conflictIndex); + + const fullCtx = false; + let s0_closure = this.computeStartState(dfa.atnStartState, RuleContext.EMPTY, fullCtx); + + if( dfa.precedenceDfa) { + // If this is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0_closure; // not used for prediction but useful to know start configs anyway + s0_closure = this.applyPrecedenceFilter(s0_closure); + s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); + dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0); + } else { + s0 = this.addDFAState(dfa, new DFAState(null, s0_closure)); + dfa.s0 = s0; } } - if (this.dfa_debug) { - console.log("ctx sensitive state " + outerContext +" in " + D); + const alt = this.execATN(dfa, s0, input, index, outerContext); + if (this.debug) { + console.log("DFA after predictATN: " + dfa.toString(this.parser.literalNames)); } - var fullCtx = true; - var s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx); - this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index); - alt = this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext); return alt; + } finally { + this._dfa = null; + this.mergeCache = null; // wack cache after each prediction + input.seek(index); + input.release(m); } - if (D.isAcceptState) { - if (D.predicates===null) { - return D.prediction; + } + + /** + * Performs ATN simulation to compute a predicted alternative based + * upon the remaining input, but also updates the DFA cache to avoid + * having to traverse the ATN again for the same input sequence. + * + * There are some key conditions we're looking for after computing a new + * set of ATN configs (proposed DFA state): + * if the set is empty, there is no viable alternative for current symbol + * does the state uniquely predict an alternative? + * does the state have a conflict that would prevent us from + * putting it on the work list? + * + * We also have some key operations to do: + * add an edge from previous DFA state to potentially new DFA state, D, + * upon current symbol but only if adding to work list, which means in all + * cases except no viable alternative (and possibly non-greedy decisions?) + * collecting predicates and adding semantic context to DFA accept states + * adding rule context to context-sensitive DFA accept states + * consuming an input symbol + * reporting a conflict + * reporting an ambiguity + * reporting a context sensitivity + * reporting insufficient predicates + * + * cover these cases: + * dead end + * single alt + * single alt + preds + * conflict + * conflict + preds + * + */ + execATN(dfa, s0, input, startIndex, outerContext ) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("execATN decision " + dfa.decision + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + input.LT(1).line + ":" + input.LT(1).column); + } + let alt; + let previousD = s0; + + if (this.debug) { + console.log("s0 = " + s0); + } + let t = input.LA(1); + while(true) { // while more work + let D = this.getExistingTargetState(previousD, t); + if(D===null) { + D = this.computeTargetState(dfa, previousD, t); } - var stopIndex = input.index; - input.seek(startIndex); - var alts = this.evalSemanticContext(D.predicates, outerContext, true); - if (alts.length===0) { - throw this.noViableAlt(input, outerContext, D.configs, startIndex); - } else if (alts.length===1) { - return alts.minValue(); - } else { - // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. - this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs); - return alts.minValue(); + if(D===ATNSimulator.ERROR) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + const e = this.noViableAlt(input, outerContext, previousD.configs, startIndex); + input.seek(startIndex); + alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext); + if(alt!==ATN.INVALID_ALT_NUMBER) { + return alt; + } else { + throw e; + } + } + if(D.requiresFullContext && this.predictionMode !== PredictionMode.SLL) { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + let conflictingAlts = null; + if (D.predicates!==null) { + if (this.debug) { + console.log("DFA state has preds in DFA sim LL failover"); + } + const conflictIndex = input.index; + if(conflictIndex !== startIndex) { + input.seek(startIndex); + } + conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true); + if (conflictingAlts.length===1) { + if(this.debug) { + console.log("Full LL avoided"); + } + return conflictingAlts.minValue(); + } + if (conflictIndex !== startIndex) { + // restore the index so reporting the fallback to full + // context occurs with the index at the correct spot + input.seek(conflictIndex); + } + } + if (this.dfa_debug) { + console.log("ctx sensitive state " + outerContext +" in " + D); + } + const fullCtx = true; + const s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx); + this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index); + alt = this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext); + return alt; + } + if (D.isAcceptState) { + if (D.predicates===null) { + return D.prediction; + } + const stopIndex = input.index; + input.seek(startIndex); + const alts = this.evalSemanticContext(D.predicates, outerContext, true); + if (alts.length===0) { + throw this.noViableAlt(input, outerContext, D.configs, startIndex); + } else if (alts.length===1) { + return alts.minValue(); + } else { + // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. + this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs); + return alts.minValue(); + } + } + previousD = D; + + if (t !== Token.EOF) { + input.consume(); + t = input.LA(1); } } - previousD = D; + } - if (t !== Token.EOF) { - input.consume(); - t = input.LA(1); + /** + * Get an existing target state for an edge in the DFA. If the target state + * for the edge has not yet been computed or is otherwise not available, + * this method returns {@code null}. + * + * @param previousD The current DFA state + * @param t The next input symbol + * @return The existing target DFA state for the given input symbol + * {@code t}, or {@code null} if the target state for this edge is not + * already cached + */ + getExistingTargetState(previousD, t) { + const edges = previousD.edges; + if (edges===null) { + return null; + } else { + return edges[t + 1] || null; } } -}; -// -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// this method returns {@code null}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code null} if the target state for this edge is not -// already cached -// -ParserATNSimulator.prototype.getExistingTargetState = function(previousD, t) { - var edges = previousD.edges; - if (edges===null) { - return null; - } else { - return edges[t + 1] || null; - } -}; -// -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, this method -// returns {@link //ERROR}. -// -ParserATNSimulator.prototype.computeTargetState = function(dfa, previousD, t) { - var reach = this.computeReachSet(previousD.configs, t, false); - if(reach===null) { - this.addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); - return ATNSimulator.ERROR; - } - // create new target state; we'll add to DFA after it's complete - var D = new DFAState(null, reach); - var predictedAlt = this.getUniqueAlt(reach); + /** + * Compute a target state for an edge in the DFA, and attempt to add the + * computed state and corresponding edge to the DFA. + * + * @param dfa The DFA + * @param previousD The current DFA state + * @param t The next input symbol + * + * @return The computed target DFA state for the given input symbol + * {@code t}. If {@code t} does not lead to a valid DFA state, this method + * returns {@link //ERROR + */ + computeTargetState(dfa, previousD, t) { + const reach = this.computeReachSet(previousD.configs, t, false); + if(reach===null) { + this.addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR); + return ATNSimulator.ERROR; + } + // create new target state; we'll add to DFA after it's complete + let D = new DFAState(null, reach); - if (this.debug) { - var altSubSets = PredictionMode.getConflictingAltSubsets(reach); - console.log("SLL altSubSets=" + Utils.arrayToString(altSubSets) + - ", previous=" + previousD.configs + - ", configs=" + reach + - ", predict=" + predictedAlt + - ", allSubsetsConflict=" + - PredictionMode.allSubsetsConflict(altSubSets) + ", conflictingAlts=" + - this.getConflictingAlts(reach)); + const predictedAlt = this.getUniqueAlt(reach); + + if (this.debug) { + const altSubSets = PredictionMode.getConflictingAltSubsets(reach); + console.log("SLL altSubSets=" + Utils.arrayToString(altSubSets) + + ", previous=" + previousD.configs + + ", configs=" + reach + + ", predict=" + predictedAlt + + ", allSubsetsConflict=" + + PredictionMode.allSubsetsConflict(altSubSets) + ", conflictingAlts=" + + this.getConflictingAlts(reach)); + } + if (predictedAlt!==ATN.INVALID_ALT_NUMBER) { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true; + D.configs.uniqueAlt = predictedAlt; + D.prediction = predictedAlt; + } else if (PredictionMode.hasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = this.getConflictingAlts(reach); + D.requiresFullContext = true; + // in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = true; + D.prediction = D.configs.conflictingAlts.minValue(); + } + if (D.isAcceptState && D.configs.hasSemanticContext) { + this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)); + if( D.predicates!==null) { + D.prediction = ATN.INVALID_ALT_NUMBER; + } + } + // all adds to dfa are done after we've created full D state + D = this.addDFAEdge(dfa, previousD, t, D); + return D; } - if (predictedAlt!==ATN.INVALID_ALT_NUMBER) { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true; - D.configs.uniqueAlt = predictedAlt; - D.prediction = predictedAlt; - } else if (PredictionMode.hasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = this.getConflictingAlts(reach); - D.requiresFullContext = true; - // in SLL-only mode, we will stop at this state and return the minimum alt - D.isAcceptState = true; - D.prediction = D.configs.conflictingAlts.minValue(); - } - if (D.isAcceptState && D.configs.hasSemanticContext) { - this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)); - if( D.predicates!==null) { - D.prediction = ATN.INVALID_ALT_NUMBER; + + predicateDFAState(dfaState, decisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + const nalts = decisionState.transitions.length; + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + const altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs); + const altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); + if (altToPred!==null) { + dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred); + dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.prediction = altsToCollectPredsFrom.minValue(); } } - // all adds to dfa are done after we've created full D state - D = this.addDFAEdge(dfa, previousD, t, D); - return D; -}; - -ParserATNSimulator.prototype.predicateDFAState = function(dfaState, decisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - var nalts = decisionState.transitions.length; - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - var altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs); - var altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts); - if (altToPred!==null) { - dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred); - dfaState.prediction = ATN.INVALID_ALT_NUMBER; // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.prediction = altsToCollectPredsFrom.minValue(); - } -}; // comes back with reach.uniqueAlt set to a valid alt -ParserATNSimulator.prototype.execATNWithFullContext = function(dfa, D, // how far we got before failing over - s0, - input, - startIndex, - outerContext) { - if (this.debug || this.debug_list_atn_decisions) { - console.log("execATNWithFullContext "+s0); - } - var fullCtx = true; - var foundExactAmbig = false; - var reach = null; - var previous = s0; - input.seek(startIndex); - var t = input.LA(1); - var predictedAlt = -1; - while (true) { // while more work - reach = this.computeReachSet(previous, t, fullCtx); - if (reach===null) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision; better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - var e = this.noViableAlt(input, outerContext, previous, startIndex); - input.seek(startIndex); - var alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext); - if(alt!==ATN.INVALID_ALT_NUMBER) { - return alt; + execATNWithFullContext(dfa, D, // how far we got before failing over + s0, + input, + startIndex, + outerContext) { + if (this.debug || this.debug_list_atn_decisions) { + console.log("execATNWithFullContext "+s0); + } + const fullCtx = true; + let foundExactAmbig = false; + let reach = null; + let previous = s0; + input.seek(startIndex); + let t = input.LA(1); + let predictedAlt = -1; + while (true) { // while more work + reach = this.computeReachSet(previous, t, fullCtx); + if (reach===null) { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision; better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + const e = this.noViableAlt(input, outerContext, previous, startIndex); + input.seek(startIndex); + const alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext); + if(alt!==ATN.INVALID_ALT_NUMBER) { + return alt; + } else { + throw e; + } + } + const altSubSets = PredictionMode.getConflictingAltSubsets(reach); + if(this.debug) { + console.log("LL altSubSets=" + altSubSets + ", predict=" + + PredictionMode.getUniqueAlt(altSubSets) + ", resolvesToJustOneViableAlt=" + + PredictionMode.resolvesToJustOneViableAlt(altSubSets)); + } + reach.uniqueAlt = this.getUniqueAlt(reach); + // unique prediction? + if(reach.uniqueAlt!==ATN.INVALID_ALT_NUMBER) { + predictedAlt = reach.uniqueAlt; + break; + } else if (this.predictionMode !== PredictionMode.LL_EXACT_AMBIG_DETECTION) { + predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets); + if(predictedAlt !== ATN.INVALID_ALT_NUMBER) { + break; + } } else { - throw e; + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if (PredictionMode.allSubsetsConflict(altSubSets) && PredictionMode.allSubsetsEqual(altSubSets)) { + foundExactAmbig = true; + predictedAlt = PredictionMode.getSingleViableAlt(altSubSets); + break; + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach; + if( t !== Token.EOF) { + input.consume(); + t = input.LA(1); } } - var altSubSets = PredictionMode.getConflictingAltSubsets(reach); - if(this.debug) { - console.log("LL altSubSets=" + altSubSets + ", predict=" + - PredictionMode.getUniqueAlt(altSubSets) + ", resolvesToJustOneViableAlt=" + - PredictionMode.resolvesToJustOneViableAlt(altSubSets)); + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if (reach.uniqueAlt !== ATN.INVALID_ALT_NUMBER ) { + this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index); + return predictedAlt; } - reach.uniqueAlt = this.getUniqueAlt(reach); - // unique prediction? - if(reach.uniqueAlt!==ATN.INVALID_ALT_NUMBER) { - predictedAlt = reach.uniqueAlt; - break; - } else if (this.predictionMode !== PredictionMode.LL_EXACT_AMBIG_DETECTION) { - predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets); - if(predictedAlt !== ATN.INVALID_ALT_NUMBER) { - break; - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if (PredictionMode.allSubsetsConflict(altSubSets) && PredictionMode.allSubsetsEqual(altSubSets)) { - foundExactAmbig = true; - predictedAlt = PredictionMode.getSingleViableAlt(altSubSets); - break; - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach; - if( t !== Token.EOF) { - input.consume(); - t = input.LA(1); - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if (reach.uniqueAlt !== ATN.INVALID_ALT_NUMBER ) { - this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index); + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In this case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve this without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + this.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, null, reach); + return predictedAlt; } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In this case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve this without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - this.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, null, reach); - - return predictedAlt; -}; - -ParserATNSimulator.prototype.computeReachSet = function(closure, t, fullCtx) { - if (this.debug) { - console.log("in computeReachSet, starting closure: " + closure); - } - if( this.mergeCache===null) { - this.mergeCache = new DoubleDict(); - } - var intermediate = new ATNConfigSet(fullCtx); - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative matching the longest overall sequence is - // chosen when multiple such configurations can match the input. - - var skippedStopStates = null; - - // First figure out where we can reach on input t - for (var i=0; iWhen {@code lookToEndOfRule} is true, this method uses -// {@link ATN//nextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

            -// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, this method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a new configuration set containing only -// the configurations from {@code configs} which are in a rule stop state -// -ParserATNSimulator.prototype.removeAllConfigsNotInRuleStopState = function(configs, lookToEndOfRule) { - if (PredictionMode.allConfigsInRuleStopStates(configs)) { - return configs; - } - var result = new ATNConfigSet(configs.fullCtx); - for(var i=0; i -//
          • Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
          • -//
          • Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
              -//
            • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
            • -//
            • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
            • -//
            -//
          • -// -// -//

            -// The prediction context must be considered by this filter to address -// situations like the following. -//

            -// -//
            -// grammar TA;
            -// prog: statement* EOF;
            -// statement: letterA | statement letterA 'b' ;
            -// letterA: 'a';
            -// 
            -//
            -//

            -// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

            -// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -// -ParserATNSimulator.prototype.applyPrecedenceFilter = function(configs) { - var config; - var statesFromAlt1 = []; - var configSet = new ATNConfigSet(configs.fullCtx); - for(var i=0; i1 - // (basically a graph subtraction algorithm). - if (!config.precedenceFilterSuppressed) { - var context = statesFromAlt1[config.state.stateNumber] || null; - if (context!==null && context.equals(config.context)) { - // eliminated + if (c.state instanceof RuleStopState) { + if (fullCtx || t === Token.EOF) { + if (skippedStopStates===null) { + skippedStopStates = []; + } + skippedStopStates.push(c); + if(this.debug_add) { + console.log("added " + c + " to skippedStopStates"); + } + } continue; } - } - configSet.add(config, this.mergeCache); - } - return configSet; -}; + for(let j=0;jWhen {@code lookToEndOfRule} is true, this method uses + * {@link ATN//nextTokens} for each configuration in {@code configs} which is + * not already in a rule stop state to see if a rule stop state is reachable + * from the configuration via epsilon-only transitions.

            + * + * @param configs the configuration set to update + * @param lookToEndOfRule when true, this method checks for rule stop states + * reachable by epsilon-only transitions from each configuration in + * {@code configs}. + * + * @return {@code configs} if all configurations in {@code configs} are in a + * rule stop state, otherwise return a new configuration set containing only + * the configurations from {@code configs} which are in a rule stop state + */ + removeAllConfigsNotInRuleStopState(configs, lookToEndOfRule) { + if (PredictionMode.allConfigsInRuleStopStates(configs)) { + return configs; } - if (pred !== SemanticContext.NONE) { - containsPredicate = true; + const result = new ATNConfigSet(configs.fullCtx); + for(let i=0; i -// The default implementation of this method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be reported later at a more -// localized location.

            -// -//
              -//
            • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
            • -//
            • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
            • -//
            • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
            • -//
            -// -//

            -// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, this could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing this alternative within -// {@link //adaptivePredict} instead of throwing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

            -// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //adaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //adaptivePredict} should report an error instead. -// -ParserATNSimulator.prototype.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule = function(configs, outerContext) { - var cfgs = this.splitAccordingToSemanticValidity(configs, outerContext); - var semValidConfigs = cfgs[0]; - var semInvalidConfigs = cfgs[1]; - var alt = this.getAltThatFinishedDecisionEntryRule(semValidConfigs); - if (alt!==ATN.INVALID_ALT_NUMBER) { // semantically/syntactically viable path exists - return alt; + computeStartState(p, ctx, fullCtx) { + // always at least the implicit call to start rule + const initialContext = predictionContextFromRuleContext(this.atn, ctx); + const configs = new ATNConfigSet(fullCtx); + for(let i=0;i0) { - alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs); - if (alt!==ATN.INVALID_ALT_NUMBER) { // syntactically viable path exists + + /** + * This method transforms the start state computed by + * {@link //computeStartState} to the special start state used by a + * precedence DFA for a particular precedence value. The transformation + * process applies the following changes to the start state's configuration + * set. + * + *
              + *
            1. Evaluate the precedence predicates for each configuration using + * {@link SemanticContext//evalPrecedence}.
            2. + *
            3. Remove all configurations which predict an alternative greater than + * 1, for which another configuration that predicts alternative 1 is in the + * same ATN state with the same prediction context. This transformation is + * valid for the following reasons: + *
                + *
              • The closure block cannot contain any epsilon transitions which bypass + * the body of the closure, so all states reachable via alternative 1 are + * part of the precedence alternatives of the transformed left-recursive + * rule.
              • + *
              • The "primary" portion of a left recursive rule cannot contain an + * epsilon transition, so the only way an alternative other than 1 can exist + * in a state that is also reachable via alternative 1 is by nesting calls + * to the left-recursive rule, with the outer calls not being at the + * preferred precedence level.
              • + *
              + *
            4. + *
            + * + *

            + * The prediction context must be considered by this filter to address + * situations like the following. + *

            + * + *
            +     * grammar TA;
            +     * prog: statement* EOF;
            +     * statement: letterA | statement letterA 'b' ;
            +     * letterA: 'a';
            +     * 
            + *
            + *

            + * If the above grammar, the ATN state immediately before the token + * reference {@code 'a'} in {@code letterA} is reachable from the left edge + * of both the primary and closure blocks of the left-recursive rule + * {@code statement}. The prediction context associated with each of these + * configurations distinguishes between them, and prevents the alternative + * which stepped out to {@code prog} (and then back in to {@code statement} + * from being eliminated by the filter. + *

            + * + * @param configs The configuration set computed by + * {@link //computeStartState} as the start state for the DFA. + * @return The transformed configuration set representing the start state + * for a precedence DFA at a particular precedence level (determined by + * calling {@link Parser//getPrecedence}) + */ + applyPrecedenceFilter(configs) { + let config; + const statesFromAlt1 = []; + const configSet = new ATNConfigSet(configs.fullCtx); + for(let i=0; i1 + // (basically a graph subtraction algorithm). + if (!config.precedenceFilterSuppressed) { + const context = statesFromAlt1[config.state.stateNumber] || null; + if (context!==null && context.equals(config.context)) { + // eliminated + continue; + } + } + configSet.add(config, this.mergeCache); + } + return configSet; + } + + getReachableTarget(trans, ttype) { + if (trans.matches(ttype, 0, this.atn.maxTokenType)) { + return trans.target; + } else { + return null; + } + } + + getPredsForAmbigAlts(ambigAlts, configs, nalts) { + // REACH=[1|1|[]|0:0, 1|2|[]|0:1] + // altToPred starts as an array of all null contexts. The entry at index i + // corresponds to alternative i. altToPred[i] may have one of three values: + // 1. null: no ATNConfig c is found such that c.alt==i + // 2. SemanticContext.NONE: At least one ATNConfig c exists such that + // c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + // alt i has at least one unpredicated config. + // 3. Non-NONE Semantic Context: There exists at least one, and for all + // ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + // + // From this, it is clear that NONE||anything==NONE. + // + let altToPred = []; + for(let i=0;i + * The default implementation of this method uses the following + * algorithm to identify an ATN configuration which successfully parsed the + * decision entry rule. Choosing such an alternative ensures that the + * {@link ParserRuleContext} returned by the calling rule will be complete + * and valid, and the syntax error will be reported later at a more + * localized location.

            + * + *
              + *
            • If a syntactically valid path or paths reach the end of the decision rule and + * they are semantically valid if predicated, return the min associated alt.
            • + *
            • Else, if a semantically invalid but syntactically valid path exist + * or paths exist, return the minimum associated alt. + *
            • + *
            • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
            • + *
            + * + *

            + * In some scenarios, the algorithm described above could predict an + * alternative which will result in a {@link FailedPredicateException} in + * the parser. Specifically, this could occur if the only configuration + * capable of successfully parsing to the end of the decision rule is + * blocked by a semantic predicate. By choosing this alternative within + * {@link //adaptivePredict} instead of throwing a + * {@link NoViableAltException}, the resulting + * {@link FailedPredicateException} in the parser will identify the specific + * predicate which is preventing the parser from successfully parsing the + * decision rule, which helps developers identify and correct logic errors + * in semantic predicates. + *

            + * + * @param configs The ATN configurations which were valid immediately before + * the {@link //ERROR} state was reached + * @param outerContext The is the \gamma_0 initial parser context from the paper + * or the parser stack at the instant before prediction commences. + * + * @return The value to return from {@link //adaptivePredict}, or + * {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not + * identified and {@link //adaptivePredict} should report an error instead + */ + getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs, outerContext) { + const cfgs = this.splitAccordingToSemanticValidity(configs, outerContext); + const semValidConfigs = cfgs[0]; + const semInvalidConfigs = cfgs[1]; + let alt = this.getAltThatFinishedDecisionEntryRule(semValidConfigs); + if (alt!==ATN.INVALID_ALT_NUMBER) { // semantically/syntactically viable path exists return alt; } - } - return ATN.INVALID_ALT_NUMBER; -}; - -ParserATNSimulator.prototype.getAltThatFinishedDecisionEntryRule = function(configs) { - var alts = []; - for(var i=0;i0 || ((c.state instanceof RuleStopState) && c.context.hasEmptyPath())) { - if(alts.indexOf(c.alt)<0) { - alts.push(c.alt); + // Is there a syntactically valid path with a failed pred? + if (semInvalidConfigs.items.length>0) { + alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs); + if (alt!==ATN.INVALID_ALT_NUMBER) { // syntactically viable path exists + return alt; } } - } - if (alts.length===0) { return ATN.INVALID_ALT_NUMBER; - } else { - return Math.min.apply(null, alts); } -}; -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a new set so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. -// -ParserATNSimulator.prototype.splitAccordingToSemanticValidity = function( configs, outerContext) { - var succeeded = new ATNConfigSet(configs.fullCtx); - var failed = new ATNConfigSet(configs.fullCtx); - for(var i=0;i0 || ((c.state instanceof RuleStopState) && c.context.hasEmptyPath())) { + if(alts.indexOf(c.alt)<0) { + alts.push(c.alt); + } } - continue; } - var predicateEvaluationResult = pair.pred.evaluate(this.parser, outerContext); - if (this.debug || this.dfa_debug) { - console.log("eval pred " + pair + "=" + predicateEvaluationResult); - } - if (predicateEvaluationResult) { - if (this.debug || this.dfa_debug) { - console.log("PREDICT " + pair.alt); - } - predictions.add(pair.alt); - if (! complete) { - break; - } + if (alts.length===0) { + return ATN.INVALID_ALT_NUMBER; + } else { + return Math.min.apply(null, alts); } } - return predictions; -}; + + /** + * Walk the list of configurations and split them according to + * those that have preds evaluating to true/false. If no pred, assume + * true pred and include in succeeded set. Returns Pair of sets. + * + * Create a new set so as not to alter the incoming parameter. + * + * Assumption: the input stream has been restored to the starting point + * prediction, which is where predicates need to evaluate.*/ + splitAccordingToSemanticValidity( configs, outerContext) { + const succeeded = new ATNConfigSet(configs.fullCtx); + const failed = new ATNConfigSet(configs.fullCtx); + for(let i=0;i50) { - throw "problem"; - } + closure(config, configs, closureBusy, collectPredicates, fullCtx, treatEofAsEpsilon) { + const initialDepth = 0; + this.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEofAsEpsilon); } - if (config.state instanceof RuleStopState) { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if (! config.context.isEmpty()) { - for ( var i =0; i50) { + throw "problem"; + } + } + if (config.state instanceof RuleStopState) { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if (! config.context.isEmpty()) { + for (let i =0; i 0. + if (this._dfa !== null && this._dfa.precedenceDfa) { + if (t.outermostPrecedenceReturn === this._dfa.atnStartState.ruleIndex) { + c.precedenceFilterSuppressed = true; + } + } + + c.reachesIntoOuterContext += 1; + if (closureBusy.add(c)!==c) { + // avoid infinite recursion for right-recursive rules + continue; + } + configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method + newDepth -= 1; + if (this.debug) { + console.log("dips into outer ctx: " + c); + } + } else { + if (!t.isEpsilon && closureBusy.add(c)!==c){ + // avoid infinite recursion for EOF* and EOF+ + continue; + } + if (t instanceof RuleTransition) { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if (newDepth >= 0) { + newDepth += 1; + } + } + } + this.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon); } } } - this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon); -}; + canDropLoopEntryEdgeInLeftRecursiveRule(config) { + // return False + const p = config.state; + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + // Are we the special loop entry/exit state? or SLL wildcard + if(p.stateType != ATNState.STAR_LOOP_ENTRY) + return false; + if(p.stateType != ATNState.STAR_LOOP_ENTRY || !p.isPrecedenceDecision || + config.context.isEmpty() || config.context.hasEmptyPath()) + return false; -// Do the actual work of walking epsilon edges// -ParserATNSimulator.prototype.closure_ = function(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) { - var p = config.state; - // optimization - if (! p.epsilonOnlyTransitions) { - configs.add(config, this.mergeCache); - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. + // Require all return states to return back to the same rule that p is in. + const numCtxs = config.context.length; + for(let i=0; i 0. - if (this._dfa !== null && this._dfa.precedenceDfa) { - if (t.outermostPrecedenceReturn === this._dfa.atnStartState.ruleIndex) { - c.precedenceFilterSuppressed = true; - } - } + getRuleName(index) { + if (this.parser!==null && index>=0) { + return this.parser.ruleNames[index]; + } else { + return ""; + } + } - c.reachesIntoOuterContext += 1; - if (closureBusy.add(c)!==c) { - // avoid infinite recursion for right-recursive rules - continue; + getEpsilonTarget(config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon) { + switch(t.serializationType) { + case Transition.RULE: + return this.ruleTransition(config, t); + case Transition.PRECEDENCE: + return this.precedenceTransition(config, t, collectPredicates, inContext, fullCtx); + case Transition.PREDICATE: + return this.predTransition(config, t, collectPredicates, inContext, fullCtx); + case Transition.ACTION: + return this.actionTransition(config, t); + case Transition.EPSILON: + return new ATNConfig({state:t.target}, config); + case Transition.ATOM: + case Transition.RANGE: + case Transition.SET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if (treatEofAsEpsilon) { + if (t.matches(Token.EOF, 0, 1)) { + return new ATNConfig({state: t.target}, config); } - configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method - newDepth -= 1; - if (this.debug) { - console.log("dips into outer ctx: " + c); + } + return null; + default: + return null; + } + } + + actionTransition(config, t) { + if (this.debug) { + const index = t.actionIndex==-1 ? 65535 : t.actionIndex; + console.log("ACTION edge " + t.ruleIndex + ":" + index); + } + return new ATNConfig({state:t.target}, config); + } + + precedenceTransition(config, pt, collectPredicates, inContext, fullCtx) { + if (this.debug) { + console.log("PRED (collectPredicates=" + collectPredicates + ") " + + pt.precedence + ">=_p, ctx dependent=true"); + if (this.parser!==null) { + console.log("context surrounding pred is " + Utils.arrayToString(this.parser.getRuleInvocationStack())); + } + } + let c = null; + if (collectPredicates && inContext) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + const currentPosition = this._input.index; + this._input.seek(this._startIndex); + const predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext); + this._input.seek(currentPosition); + if (predSucceeds) { + c = new ATNConfig({state:pt.target}, config); // no pred context } } else { - if (!t.isEpsilon && closureBusy.add(c)!==c){ - // avoid infinite recursion for EOF* and EOF+ - continue; + const newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate()); + c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config); + } + } else { + c = new ATNConfig({state:pt.target}, config); + } + if (this.debug) { + console.log("config from pred transition=" + c); + } + return c; + } + + predTransition(config, pt, collectPredicates, inContext, fullCtx) { + if (this.debug) { + console.log("PRED (collectPredicates=" + collectPredicates + ") " + pt.ruleIndex + + ":" + pt.predIndex + ", ctx dependent=" + pt.isCtxDependent); + if (this.parser!==null) { + console.log("context surrounding pred is " + Utils.arrayToString(this.parser.getRuleInvocationStack())); + } + } + let c = null; + if (collectPredicates && ((pt.isCtxDependent && inContext) || ! pt.isCtxDependent)) { + if (fullCtx) { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + const currentPosition = this._input.index; + this._input.seek(this._startIndex); + const predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext); + this._input.seek(currentPosition); + if (predSucceeds) { + c = new ATNConfig({state:pt.target}, config); // no pred context } - if (t instanceof RuleTransition) { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if (newDepth >= 0) { - newDepth += 1; - } + } else { + const newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate()); + c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config); + } + } else { + c = new ATNConfig({state:pt.target}, config); + } + if (this.debug) { + console.log("config from pred transition=" + c); + } + return c; + } + + ruleTransition(config, t) { + if (this.debug) { + console.log("CALL rule " + this.getRuleName(t.target.ruleIndex) + ", ctx=" + config.context); + } + const returnState = t.followState; + const newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber); + return new ATNConfig({state:t.target, context:newContext}, config ); + } + + getConflictingAlts(configs) { + const altsets = PredictionMode.getConflictingAltSubsets(configs); + return PredictionMode.getAlts(altsets); + } + + /** + * Sam pointed out a problem with the previous definition, v3, of + * ambiguous states. If we have another state associated with conflicting + * alternatives, we should keep going. For example, the following grammar + * + * s : (ID | ID ID?) ';' ; + * + * When the ATN simulation reaches the state before ';', it has a DFA + * state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally + * 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node + * because alternative to has another way to continue, via [6|2|[]]. + * The key is that we have a single state that has config's only associated + * with a single alternative, 2, and crucially the state transitions + * among the configurations are all non-epsilon transitions. That means + * we don't consider any conflicts that include alternative 2. So, we + * ignore the conflict between alts 1 and 2. We ignore a set of + * conflicting alts when there is an intersection with an alternative + * associated with a single alt state in the state→config-list map. + * + * It's also the case that we might have two conflicting configurations but + * also a 3rd nonconflicting configuration for a different alternative: + * [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: + * + * a : A | A | A B ; + * + * After matching input A, we reach the stop state for rule A, state 1. + * State 8 is the state right before B. Clearly alternatives 1 and 2 + * conflict and no amount of further lookahead will separate the two. + * However, alternative 3 will be able to continue and so we do not + * stop working on this state. In the previous example, we're concerned + * with states associated with the conflicting alternatives. Here alt + * 3 is not associated with the conflicting configs, but since we can continue + * looking for input reasonably, I don't declare the state done. We + * ignore a set of conflicting alts when we have an alternative + * that we still need to pursue + */ + getConflictingAltsOrUniqueAlt(configs) { + let conflictingAlts = null; + if (configs.uniqueAlt!== ATN.INVALID_ALT_NUMBER) { + conflictingAlts = new BitSet(); + conflictingAlts.add(configs.uniqueAlt); + } else { + conflictingAlts = configs.conflictingAlts; + } + return conflictingAlts; + } + + getTokenName(t) { + if (t===Token.EOF) { + return "EOF"; + } + if( this.parser!==null && this.parser.literalNames!==null) { + if (t >= this.parser.literalNames.length && t >= this.parser.symbolicNames.length) { + console.log("" + t + " ttype out of range: " + this.parser.literalNames); + console.log("" + this.parser.getInputStream().getTokens()); + } else { + const name = this.parser.literalNames[t] || this.parser.symbolicNames[t]; + return name + "<" + t + ">"; + } + } + return "" + t; + } + + getLookaheadName(input) { + return this.getTokenName(input.LA(1)); + } + + /** + * Used for debugging in adaptivePredict around execATN but I cut + * it out for clarity now that alg. works well. We can leave this + * "dead" code for a bit + */ + dumpDeadEndConfigs(nvae) { + console.log("dead end configs: "); + const decs = nvae.getDeadEndConfigs(); + for(let i=0; i0) { + const t = c.state.transitions[0]; + if (t instanceof AtomTransition) { + trans = "Atom "+ this.getTokenName(t.label); + } else if (t instanceof SetTransition) { + const neg = (t instanceof NotSetTransition); + trans = (neg ? "~" : "") + "Set " + t.set; } } - this.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon); + console.error(c.toString(this.parser, true) + ":" + trans); } } -}; - -ParserATNSimulator.prototype.canDropLoopEntryEdgeInLeftRecursiveRule = function(config) { - // return False - var p = config.state; - // First check to see if we are in StarLoopEntryState generated during - // left-recursion elimination. For efficiency, also check if - // the context has an empty stack case. If so, it would mean - // global FOLLOW so we can't perform optimization - // Are we the special loop entry/exit state? or SLL wildcard - if(p.stateType != ATNState.STAR_LOOP_ENTRY) - return false; - if(p.stateType != ATNState.STAR_LOOP_ENTRY || !p.isPrecedenceDecision || - config.context.isEmpty() || config.context.hasEmptyPath()) - return false; - - // Require all return states to return back to the same rule that p is in. - var numCtxs = config.context.length; - for(var i=0; i=0) { - return this.parser.ruleNames[index]; - } else { - return ""; - } -}; - -ParserATNSimulator.prototype.getEpsilonTarget = function(config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon) { - switch(t.serializationType) { - case Transition.RULE: - return this.ruleTransition(config, t); - case Transition.PRECEDENCE: - return this.precedenceTransition(config, t, collectPredicates, inContext, fullCtx); - case Transition.PREDICATE: - return this.predTransition(config, t, collectPredicates, inContext, fullCtx); - case Transition.ACTION: - return this.actionTransition(config, t); - case Transition.EPSILON: - return new ATNConfig({state:t.target}, config); - case Transition.ATOM: - case Transition.RANGE: - case Transition.SET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if (treatEofAsEpsilon) { - if (t.matches(Token.EOF, 0, 1)) { - return new ATNConfig({state: t.target}, config); + getUniqueAlt(configs) { + let alt = ATN.INVALID_ALT_NUMBER; + for(let i=0;i=_p, ctx dependent=true"); - if (this.parser!==null) { - console.log("context surrounding pred is " + Utils.arrayToString(this.parser.getRuleInvocationStack())); + /** + * Add an edge to the DFA, if possible. This method calls + * {@link //addDFAState} to ensure the {@code to} state is present in the + * DFA. If {@code from} is {@code null}, or if {@code t} is outside the + * range of edges that can be represented in the DFA tables, this method + * returns without adding the edge to the DFA. + * + *

            If {@code to} is {@code null}, this method returns {@code null}. + * Otherwise, this method returns the {@link DFAState} returned by calling + * {@link //addDFAState} for the {@code to} state.

            + * + * @param dfa The DFA + * @param from_ The source state for the edge + * @param t The input symbol + * @param to The target state for the edge + * + * @return If {@code to} is {@code null}, this method returns {@code null}; + * otherwise this method returns the result of calling {@link //addDFAState} + * on {@code to} + */ + addDFAEdge(dfa, from_, t, to) { + if( this.debug) { + console.log("EDGE " + from_ + " -> " + to + " upon " + this.getTokenName(t)); } - } - var c = null; - if (collectPredicates && inContext) { - if (fullCtx) { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - var currentPosition = this._input.index; - this._input.seek(this._startIndex); - var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext); - this._input.seek(currentPosition); - if (predSucceeds) { - c = new ATNConfig({state:pt.target}, config); // no pred context - } - } else { - var newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate()); - c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config); + if (to===null) { + return null; } - } else { - c = new ATNConfig({state:pt.target}, config); - } - if (this.debug) { - console.log("config from pred transition=" + c); - } - return c; -}; - -ParserATNSimulator.prototype.predTransition = function(config, pt, collectPredicates, inContext, fullCtx) { - if (this.debug) { - console.log("PRED (collectPredicates=" + collectPredicates + ") " + pt.ruleIndex + - ":" + pt.predIndex + ", ctx dependent=" + pt.isCtxDependent); - if (this.parser!==null) { - console.log("context surrounding pred is " + Utils.arrayToString(this.parser.getRuleInvocationStack())); + to = this.addDFAState(dfa, to); // used existing if possible not incoming + if (from_===null || t < -1 || t > this.atn.maxTokenType) { + return to; } - } - var c = null; - if (collectPredicates && ((pt.isCtxDependent && inContext) || ! pt.isCtxDependent)) { - if (fullCtx) { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - var currentPosition = this._input.index; - this._input.seek(this._startIndex); - var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext); - this._input.seek(currentPosition); - if (predSucceeds) { - c = new ATNConfig({state:pt.target}, config); // no pred context - } - } else { - var newSemCtx = SemanticContext.andContext(config.semanticContext, pt.getPredicate()); - c = new ATNConfig({state:pt.target, semanticContext:newSemCtx}, config); + if (from_.edges===null) { + from_.edges = []; } - } else { - c = new ATNConfig({state:pt.target}, config); - } - if (this.debug) { - console.log("config from pred transition=" + c); - } - return c; -}; + from_.edges[t+1] = to; // connect -ParserATNSimulator.prototype.ruleTransition = function(config, t) { - if (this.debug) { - console.log("CALL rule " + this.getRuleName(t.target.ruleIndex) + ", ctx=" + config.context); - } - var returnState = t.followState; - var newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber); - return new ATNConfig({state:t.target, context:newContext}, config ); -}; - -ParserATNSimulator.prototype.getConflictingAlts = function(configs) { - var altsets = PredictionMode.getConflictingAltSubsets(configs); - return PredictionMode.getAlts(altsets); -}; - - // Sam pointed out a problem with the previous definition, v3, of - // ambiguous states. If we have another state associated with conflicting - // alternatives, we should keep going. For example, the following grammar - // - // s : (ID | ID ID?) ';' ; - // - // When the ATN simulation reaches the state before ';', it has a DFA - // state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally - // 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node - // because alternative to has another way to continue, via [6|2|[]]. - // The key is that we have a single state that has config's only associated - // with a single alternative, 2, and crucially the state transitions - // among the configurations are all non-epsilon transitions. That means - // we don't consider any conflicts that include alternative 2. So, we - // ignore the conflict between alts 1 and 2. We ignore a set of - // conflicting alts when there is an intersection with an alternative - // associated with a single alt state in the state→config-list map. - // - // It's also the case that we might have two conflicting configurations but - // also a 3rd nonconflicting configuration for a different alternative: - // [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: - // - // a : A | A | A B ; - // - // After matching input A, we reach the stop state for rule A, state 1. - // State 8 is the state right before B. Clearly alternatives 1 and 2 - // conflict and no amount of further lookahead will separate the two. - // However, alternative 3 will be able to continue and so we do not - // stop working on this state. In the previous example, we're concerned - // with states associated with the conflicting alternatives. Here alt - // 3 is not associated with the conflicting configs, but since we can continue - // looking for input reasonably, I don't declare the state done. We - // ignore a set of conflicting alts when we have an alternative - // that we still need to pursue. -// - -ParserATNSimulator.prototype.getConflictingAltsOrUniqueAlt = function(configs) { - var conflictingAlts = null; - if (configs.uniqueAlt!== ATN.INVALID_ALT_NUMBER) { - conflictingAlts = new BitSet(); - conflictingAlts.add(configs.uniqueAlt); - } else { - conflictingAlts = configs.conflictingAlts; - } - return conflictingAlts; -}; - -ParserATNSimulator.prototype.getTokenName = function( t) { - if (t===Token.EOF) { - return "EOF"; - } - if( this.parser!==null && this.parser.literalNames!==null) { - if (t >= this.parser.literalNames.length && t >= this.parser.symbolicNames.length) { - console.log("" + t + " ttype out of range: " + this.parser.literalNames); - console.log("" + this.parser.getInputStream().getTokens()); - } else { - var name = this.parser.literalNames[t] || this.parser.symbolicNames[t]; - return name + "<" + t + ">"; + if (this.debug) { + const literalNames = this.parser===null ? null : this.parser.literalNames; + const symbolicNames = this.parser===null ? null : this.parser.symbolicNames; + console.log("DFA=\n" + dfa.toString(literalNames, symbolicNames)); } - } - return "" + t; -}; - -ParserATNSimulator.prototype.getLookaheadName = function(input) { - return this.getTokenName(input.LA(1)); -}; - -// Used for debugging in adaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave this -// "dead" code for a bit. -// -ParserATNSimulator.prototype.dumpDeadEndConfigs = function(nvae) { - console.log("dead end configs: "); - var decs = nvae.getDeadEndConfigs(); - for(var i=0; i0) { - var t = c.state.transitions[0]; - if (t instanceof AtomTransition) { - trans = "Atom "+ this.getTokenName(t.label); - } else if (t instanceof SetTransition) { - var neg = (t instanceof NotSetTransition); - trans = (neg ? "~" : "") + "Set " + t.set; - } - } - console.error(c.toString(this.parser, true) + ":" + trans); - } -}; - -ParserATNSimulator.prototype.noViableAlt = function(input, outerContext, configs, startIndex) { - return new NoViableAltException(this.parser, input, input.get(startIndex), input.LT(1), configs, outerContext); -}; - -ParserATNSimulator.prototype.getUniqueAlt = function(configs) { - var alt = ATN.INVALID_ALT_NUMBER; - for(var i=0;iIf {@code to} is {@code null}, this method returns {@code null}. -// Otherwise, this method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

            -// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code null}, this method returns {@code null}; -// otherwise this method returns the result of calling {@link //addDFAState} -// on {@code to} -// -ParserATNSimulator.prototype.addDFAEdge = function(dfa, from_, t, to) { - if( this.debug) { - console.log("EDGE " + from_ + " -> " + to + " upon " + this.getTokenName(t)); - } - if (to===null) { - return null; - } - to = this.addDFAState(dfa, to); // used existing if possible not incoming - if (from_===null || t < -1 || t > this.atn.maxTokenType) { return to; } - if (from_.edges===null) { - from_.edges = []; - } - from_.edges[t+1] = to; // connect - if (this.debug) { - var literalNames = this.parser===null ? null : this.parser.literalNames; - var symbolicNames = this.parser===null ? null : this.parser.symbolicNames; - console.log("DFA=\n" + dfa.toString(literalNames, symbolicNames)); - } - return to; -}; -// -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise this -// method returns {@code D} after adding it to the DFA. -// -//

            If {@code D} is {@link //ERROR}, this method returns {@link //ERROR} and -// does not change the DFA.

            -// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -// -ParserATNSimulator.prototype.addDFAState = function(dfa, D) { - if (D == ATNSimulator.ERROR) { + /** + * Add state {@code D} to the DFA if it is not already present, and return + * the actual instance stored in the DFA. If a state equivalent to {@code D} + * is already in the DFA, the existing state is returned. Otherwise this + * method returns {@code D} after adding it to the DFA. + * + *

            If {@code D} is {@link //ERROR}, this method returns {@link //ERROR} and + * does not change the DFA.

            + * + * @param dfa The dfa + * @param D The DFA state to add + * @return The state stored in the DFA. This will be either the existing + * state if {@code D} is already in the DFA, or {@code D} itself if the + * state was not already present + */ + addDFAState(dfa, D) { + if (D == ATNSimulator.ERROR) { + return D; + } + const existing = dfa.states.get(D); + if(existing!==null) { + return existing; + } + D.stateNumber = dfa.states.length; + if (! D.configs.readOnly) { + D.configs.optimizeConfigs(this); + D.configs.setReadonly(true); + } + dfa.states.add(D); + if (this.debug) { + console.log("adding new DFA state: " + D); + } return D; } - var existing = dfa.states.get(D); - if(existing!==null) { - return existing; - } - D.stateNumber = dfa.states.length; - if (! D.configs.readOnly) { - D.configs.optimizeConfigs(this); - D.configs.setReadonly(true); - } - dfa.states.add(D); - if (this.debug) { - console.log("adding new DFA state: " + D); - } - return D; -}; -ParserATNSimulator.prototype.reportAttemptingFullContext = function(dfa, conflictingAlts, configs, startIndex, stopIndex) { - if (this.debug || this.retry_debug) { - var interval = new Interval(startIndex, stopIndex + 1); - console.log("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); + reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportAttemptingFullContext decision=" + dfa.decision + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs); + } } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs); - } -}; -ParserATNSimulator.prototype.reportContextSensitivity = function(dfa, prediction, configs, startIndex, stopIndex) { - if (this.debug || this.retry_debug) { - var interval = new Interval(startIndex, stopIndex + 1); - console.log("reportContextSensitivity decision=" + dfa.decision + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); + reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportContextSensitivity decision=" + dfa.decision + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs); + } } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs); - } -}; -// If context sensitive parsing, we know it's ambiguity not conflict// -ParserATNSimulator.prototype.reportAmbiguity = function(dfa, D, startIndex, stopIndex, - exact, ambigAlts, configs ) { - if (this.debug || this.retry_debug) { - var interval = new Interval(startIndex, stopIndex + 1); - console.log("reportAmbiguity " + ambigAlts + ":" + configs + - ", input=" + this.parser.getTokenStream().getText(interval)); + // If context sensitive parsing, we know it's ambiguity not conflict// + reportAmbiguity(dfa, D, startIndex, stopIndex, + exact, ambigAlts, configs ) { + if (this.debug || this.retry_debug) { + const interval = new Interval(startIndex, stopIndex + 1); + console.log("reportAmbiguity " + ambigAlts + ":" + configs + + ", input=" + this.parser.getTokenStream().getText(interval)); + } + if (this.parser!==null) { + this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs); + } } - if (this.parser!==null) { - this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs); - } -}; +} -exports.ParserATNSimulator = ParserATNSimulator; \ No newline at end of file +module.exports = ParserATNSimulator; diff --git a/runtime/JavaScript/src/antlr4/atn/PredictionMode.js b/runtime/JavaScript/src/antlr4/atn/PredictionMode.js index 217a533d5..e88cdbbb0 100644 --- a/runtime/JavaScript/src/antlr4/atn/PredictionMode.js +++ b/runtime/JavaScript/src/antlr4/atn/PredictionMode.js @@ -1,559 +1,562 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. -var Set = require('./../Utils').Set; -var Map = require('./../Utils').Map; -var BitSet = require('./../Utils').BitSet; -var AltDict = require('./../Utils').AltDict; -var ATN = require('./ATN').ATN; -var RuleStopState = require('./ATNState').RuleStopState; -var ATNConfigSet = require('./ATNConfigSet').ATNConfigSet; -var ATNConfig = require('./ATNConfig').ATNConfig; -var SemanticContext = require('./SemanticContext').SemanticContext; -var Hash = require("../Utils").Hash; -var hashStuff = require('./../Utils').hashStuff; -var equalArrays = require('./../Utils').equalArrays; +const {Map, BitSet, AltDict, hashStuff} = require('./../Utils'); +const ATN = require('./ATN'); +const {RuleStopState} = require('./ATNState'); +const {ATNConfigSet} = require('./ATNConfigSet'); +const {ATNConfig} = require('./ATNConfig'); +const {SemanticContext} = require('./SemanticContext'); -function PredictionMode() { - return this; -} +/** + * This enumeration defines the prediction modes available in ANTLR 4 along with + * utility methods for analyzing configuration sets for conflicts and/or + * ambiguities. + */ +const PredictionMode = { + /** + * The SLL(*) prediction mode. This prediction mode ignores the current + * parser context when making predictions. This is the fastest prediction + * mode, and provides correct results for many grammars. This prediction + * mode is more powerful than the prediction mode provided by ANTLR 3, but + * may result in syntax errors for grammar and input combinations which are + * not SLL. + * + *

            + * When using this prediction mode, the parser will either return a correct + * parse tree (i.e. the same parse tree that would be returned with the + * {@link //LL} prediction mode), or it will report a syntax error. If a + * syntax error is encountered when using the {@link //SLL} prediction mode, + * it may be due to either an actual syntax error in the input or indicate + * that the particular combination of grammar and input requires the more + * powerful {@link //LL} prediction abilities to complete successfully.

            + * + *

            + * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.

            + */ + SLL: 0, -// -// The SLL(*) prediction mode. This prediction mode ignores the current -// parser context when making predictions. This is the fastest prediction -// mode, and provides correct results for many grammars. This prediction -// mode is more powerful than the prediction mode provided by ANTLR 3, but -// may result in syntax errors for grammar and input combinations which are -// not SLL. -// -//

            -// When using this prediction mode, the parser will either return a correct -// parse tree (i.e. the same parse tree that would be returned with the -// {@link //LL} prediction mode), or it will report a syntax error. If a -// syntax error is encountered when using the {@link //SLL} prediction mode, -// it may be due to either an actual syntax error in the input or indicate -// that the particular combination of grammar and input requires the more -// powerful {@link //LL} prediction abilities to complete successfully.

            -// -//

            -// This prediction mode does not provide any guarantees for prediction -// behavior for syntactically-incorrect inputs.

            -// -PredictionMode.SLL = 0; -// -// The LL(*) prediction mode. This prediction mode allows the current parser -// context to be used for resolving SLL conflicts that occur during -// prediction. This is the fastest prediction mode that guarantees correct -// parse results for all combinations of grammars with syntactically correct -// inputs. -// -//

            -// When using this prediction mode, the parser will make correct decisions -// for all syntactically-correct grammar and input combinations. However, in -// cases where the grammar is truly ambiguous this prediction mode might not -// report a precise answer for exactly which alternatives are -// ambiguous.

            -// -//

            -// This prediction mode does not provide any guarantees for prediction -// behavior for syntactically-incorrect inputs.

            -// -PredictionMode.LL = 1; -// -// The LL(*) prediction mode with exact ambiguity detection. In addition to -// the correctness guarantees provided by the {@link //LL} prediction mode, -// this prediction mode instructs the prediction algorithm to determine the -// complete and exact set of ambiguous alternatives for every ambiguous -// decision encountered while parsing. -// -//

            -// This prediction mode may be used for diagnosing ambiguities during -// grammar development. Due to the performance overhead of calculating sets -// of ambiguous alternatives, this prediction mode should be avoided when -// the exact results are not necessary.

            -// -//

            -// This prediction mode does not provide any guarantees for prediction -// behavior for syntactically-incorrect inputs.

            -// -PredictionMode.LL_EXACT_AMBIG_DETECTION = 2; + /** + * The LL(*) prediction mode. This prediction mode allows the current parser + * context to be used for resolving SLL conflicts that occur during + * prediction. This is the fastest prediction mode that guarantees correct + * parse results for all combinations of grammars with syntactically correct + * inputs. + * + *

            + * When using this prediction mode, the parser will make correct decisions + * for all syntactically-correct grammar and input combinations. However, in + * cases where the grammar is truly ambiguous this prediction mode might not + * report a precise answer for exactly which alternatives are + * ambiguous.

            + * + *

            + * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.

            + */ + LL: 1, + /** + * + * The LL(*) prediction mode with exact ambiguity detection. In addition to + * the correctness guarantees provided by the {@link //LL} prediction mode, + * this prediction mode instructs the prediction algorithm to determine the + * complete and exact set of ambiguous alternatives for every ambiguous + * decision encountered while parsing. + * + *

            + * This prediction mode may be used for diagnosing ambiguities during + * grammar development. Due to the performance overhead of calculating sets + * of ambiguous alternatives, this prediction mode should be avoided when + * the exact results are not necessary.

            + * + *

            + * This prediction mode does not provide any guarantees for prediction + * behavior for syntactically-incorrect inputs.

            + */ + LL_EXACT_AMBIG_DETECTION: 2, -// -// Computes the SLL prediction termination condition. -// -//

            -// This method computes the SLL prediction termination condition for both of -// the following cases.

            -// -//
              -//
            • The usual SLL+LL fallback upon SLL conflict
            • -//
            • Pure SLL without LL fallback
            • -//
            -// -//

            COMBINED SLL+LL PARSING

            -// -//

            When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

            -// -//

            Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

            -// -//

            Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

            -// -//

            HEURISTIC

            -// -//

            As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

            -// -//

            {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

            -// -//

            When the ATN simulation reaches the state before {@code ';'}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

            -// -//

            It also let's us continue for this rule:

            -// -//

            {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

            -// -//

            After matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

            -// -//

            PURE SLL PARSING

            -// -//

            To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

            -// -//

            PREDICATES IN SLL+LL PARSING

            -// -//

            SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

            -// -//

            Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

            -// -//

            {@code (s, 1, x, {}), (s, 1, x', {p})}

            -// -//

            Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.

            -// -//

            {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

            -// -//

            If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

            -// -PredictionMode.hasSLLConflictTerminatingPrediction = function( mode, configs) { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to match additional input so we terminate prediction. - // - if (PredictionMode.allConfigsInRuleStopStates(configs)) { - return true; - } - // pure SLL mode parsing - if (mode === PredictionMode.SLL) { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL; costs more time - // since we'll often fail over anyway. - if (configs.hasSemanticContext) { - // dup configs, tossing out semantic predicates - var dup = new ATNConfigSet(); - for(var i=0;i + * This method computes the SLL prediction termination condition for both of + * the following cases.

            + * + *
              + *
            • The usual SLL+LL fallback upon SLL conflict
            • + *
            • Pure SLL without LL fallback
            • + *
            + * + *

            COMBINED SLL+LL PARSING

            + * + *

            When LL-fallback is enabled upon SLL conflict, correct predictions are + * ensured regardless of how the termination condition is computed by this + * method. Due to the substantially higher cost of LL prediction, the + * prediction should only fall back to LL when the additional lookahead + * cannot lead to a unique SLL prediction.

            + * + *

            Assuming combined SLL+LL parsing, an SLL configuration set with only + * conflicting subsets should fall back to full LL, even if the + * configuration sets don't resolve to the same alternative (e.g. + * {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + * configuration, SLL could continue with the hopes that more lookahead will + * resolve via one of those non-conflicting configurations.

            + * + *

            Here's the prediction termination rule them: SLL (for SLL+LL parsing) + * stops when it sees only conflicting configuration subsets. In contrast, + * full LL keeps going when there is uncertainty.

            + * + *

            HEURISTIC

            + * + *

            As a heuristic, we stop prediction when we see any conflicting subset + * unless we see a state that only has one alternative associated with it. + * The single-alt-state thing lets prediction continue upon rules like + * (otherwise, it would admit defeat too soon):

            + * + *

            {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

            + * + *

            When the ATN simulation reaches the state before {@code ';'}, it has a + * DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + * {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + * processing this node because alternative to has another way to continue, + * via {@code [6|2|[]]}.

            + * + *

            It also let's us continue for this rule:

            + * + *

            {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

            + * + *

            After matching input A, we reach the stop state for rule A, state 1. + * State 8 is the state right before B. Clearly alternatives 1 and 2 + * conflict and no amount of further lookahead will separate the two. + * However, alternative 3 will be able to continue and so we do not stop + * working on this state. In the previous example, we're concerned with + * states associated with the conflicting alternatives. Here alt 3 is not + * associated with the conflicting configs, but since we can continue + * looking for input reasonably, don't declare the state done.

            + * + *

            PURE SLL PARSING

            + * + *

            To handle pure SLL parsing, all we have to do is make sure that we + * combine stack contexts for configurations that differ only by semantic + * predicate. From there, we can do the usual SLL termination heuristic.

            + * + *

            PREDICATES IN SLL+LL PARSING

            + * + *

            SLL decisions don't evaluate predicates until after they reach DFA stop + * states because they need to create the DFA cache that works in all + * semantic situations. In contrast, full LL evaluates predicates collected + * during start state computation so it can ignore predicates thereafter. + * This means that SLL termination detection can totally ignore semantic + * predicates.

            + * + *

            Implementation-wise, {@link ATNConfigSet} combines stack contexts but not + * semantic predicate contexts so we might see two configurations like the + * following.

            + * + *

            {@code (s, 1, x, {}), (s, 1, x', {p})}

            + * + *

            Before testing these configurations against others, we have to merge + * {@code x} and {@code x'} (without modifying the existing configurations). + * For example, we test {@code (x+x')==x''} when looking for conflicts in + * the following configurations.

            + * + *

            {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

            + * + *

            If the configuration set has predicates (as indicated by + * {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of + * the configurations to strip out all of the predicates so that a standard + * {@link ATNConfigSet} will merge everything ignoring predicates.

            + */ + hasSLLConflictTerminatingPrediction: function( mode, configs) { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to match additional input so we terminate prediction. + // + if (PredictionMode.allConfigsInRuleStopStates(configs)) { + return true; + } + // pure SLL mode parsing + if (mode === PredictionMode.SLL) { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL; costs more time + // since we'll often fail over anyway. + if (configs.hasSemanticContext) { + // dup configs, tossing out semantic predicates + const dup = new ATNConfigSet(); + for(let i=0;iCan we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

            -// -//

            The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

            -// -//

            Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

            -// -//

            {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

            -// -//

            Or in pseudo-code, for each configuration {@code c} in {@code C}:

            -// -//
            -// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
            -// alt and not pred
            -// 
            -// -//

            The values in {@code map} are the set of {@code A_s,ctx} sets.

            -// -//

            If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

            -// -//

            Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

            -// -//

            The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal; you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

            -// -//

            No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

            -// -//

            CONFLICTING CONFIGS

            -// -//

            Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

            -// -//

            For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

            -// -//

            CONTINUE/STOP RULE

            -// -//

            Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

            -// -//

            The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

            -// -//

            CASES

            -// -//
              -// -//
            • no conflicts and more than 1 alternative in set => continue
            • -// -//
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
            • -// -//
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
            • -// -//
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
            • -// -//
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
            • -// -//
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
            • -// -//
            -// -//

            EXACT AMBIGUITY DETECTION

            -// -//

            If all states report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

            -// -//

            |A_i|>1 and -// A_i = A_j for all i, j.

            -// -//

            In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

            -// -PredictionMode.resolvesToJustOneViableAlt = function(altsets) { - return PredictionMode.getSingleViableAlt(altsets); -}; + /** + * + * Full LL prediction termination. + * + *

            Can we stop looking ahead during ATN simulation or is there some + * uncertainty as to which alternative we will ultimately pick, after + * consuming more input? Even if there are partial conflicts, we might know + * that everything is going to resolve to the same minimum alternative. That + * means we can stop since no more lookahead will change that fact. On the + * other hand, there might be multiple conflicts that resolve to different + * minimums. That means we need more look ahead to decide which of those + * alternatives we should predict.

            + * + *

            The basic idea is to split the set of configurations {@code C}, into + * conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + * non-conflicting configurations. Two configurations conflict if they have + * identical {@link ATNConfig//state} and {@link ATNConfig//context} values + * but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} + * and {@code (s, j, ctx, _)} for {@code i!=j}.

            + * + *

            Reduce these configuration subsets to the set of possible alternatives. + * You can compute the alternative subsets in one pass as follows:

            + * + *

            {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + * {@code C} holding {@code s} and {@code ctx} fixed.

            + * + *

            Or in pseudo-code, for each configuration {@code c} in {@code C}:

            + * + *
            +     * map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
            +     * alt and not pred
            +     * 
            + * + *

            The values in {@code map} are the set of {@code A_s,ctx} sets.

            + * + *

            If {@code |A_s,ctx|=1} then there is no conflict associated with + * {@code s} and {@code ctx}.

            + * + *

            Reduce the subsets to singletons by choosing a minimum of each subset. If + * the union of these alternative subsets is a singleton, then no amount of + * more lookahead will help us. We will always pick that alternative. If, + * however, there is more than one alternative, then we are uncertain which + * alternative to predict and must continue looking for resolution. We may + * or may not discover an ambiguity in the future, even if there are no + * conflicting subsets this round.

            + * + *

            The biggest sin is to terminate early because it means we've made a + * decision but were uncertain as to the eventual outcome. We haven't used + * enough lookahead. On the other hand, announcing a conflict too late is no + * big deal; you will still have the conflict. It's just inefficient. It + * might even look until the end of file.

            + * + *

            No special consideration for semantic predicates is required because + * predicates are evaluated on-the-fly for full LL prediction, ensuring that + * no configuration contains a semantic context during the termination + * check.

            + * + *

            CONFLICTING CONFIGS

            + * + *

            Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + * when {@code i!=j} but {@code x=x'}. Because we merge all + * {@code (s, i, _)} configurations together, that means that there are at + * most {@code n} configurations associated with state {@code s} for + * {@code n} possible alternatives in the decision. The merged stacks + * complicate the comparison of configuration contexts {@code x} and + * {@code x'}. Sam checks to see if one is a subset of the other by calling + * merge and checking to see if the merged result is either {@code x} or + * {@code x'}. If the {@code x} associated with lowest alternative {@code i} + * is the superset, then {@code i} is the only possible prediction since the + * others resolve to {@code min(i)} as well. However, if {@code x} is + * associated with {@code j>i} then at least one stack configuration for + * {@code j} is not in conflict with alternative {@code i}. The algorithm + * should keep going, looking for more lookahead due to the uncertainty.

            + * + *

            For simplicity, I'm doing a equality check between {@code x} and + * {@code x'} that lets the algorithm continue to consume lookahead longer + * than necessary. The reason I like the equality is of course the + * simplicity but also because that is the test you need to detect the + * alternatives that are actually in conflict.

            + * + *

            CONTINUE/STOP RULE

            + * + *

            Continue if union of resolved alternative sets from non-conflicting and + * conflicting alternative subsets has more than one alternative. We are + * uncertain about which alternative to predict.

            + * + *

            The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + * alternatives are still in the running for the amount of input we've + * consumed at this point. The conflicting sets let us to strip away + * configurations that won't lead to more states because we resolve + * conflicts to the configuration with a minimum alternate for the + * conflicting set.

            + * + *

            CASES

            + * + *
              + * + *
            • no conflicts and more than 1 alternative in set => continue
            • + * + *
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, + * {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set + * {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + * {@code {1,3}} => continue + *
            • + * + *
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + * {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set + * {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = + * {@code {1}} => stop and predict 1
            • + * + *
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, + * {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U + * {@code {1}} = {@code {1}} => stop and predict 1, can announce + * ambiguity {@code {1,2}}
            • + * + *
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, + * {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U + * {@code {2}} = {@code {1,2}} => continue
            • + * + *
            • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, + * {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U + * {@code {3}} = {@code {1,3}} => continue
            • + * + *
            + * + *

            EXACT AMBIGUITY DETECTION

            + * + *

            If all states report the same conflicting set of alternatives, then we + * know we have the exact ambiguity set.

            + * + *

            |A_i|>1 and + * A_i = A_j for all i, j.

            + * + *

            In other words, we continue examining lookahead until all {@code A_i} + * have more than one alternative and all {@code A_i} are the same. If + * {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + * because the resolved set is {@code {1}}. To determine what the real + * ambiguity is, we have to know whether the ambiguity is between one and + * two or one and three so we keep going. We can only stop prediction when + * we need exact ambiguity detection when the sets look like + * {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

            + */ + resolvesToJustOneViableAlt: function(altsets) { + return PredictionMode.getSingleViableAlt(altsets); + }, -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -PredictionMode.allSubsetsConflict = function(altsets) { - return ! PredictionMode.hasNonConflictingAltSet(altsets); -}; -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -PredictionMode.hasNonConflictingAltSet = function(altsets) { - for(var i=0;i1) { - return true; + + /** + * Determines if any single alternative subset in {@code altsets} contains + * more than one alternative. + * + * @param altsets a collection of alternative subsets + * @return {@code true} if {@code altsets} contains a {@link BitSet} with + * {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} + */ + hasConflictingAltSet: function(altsets) { + for(let i=0;i1) { + return true; + } } - } - return false; -}; + return false; + }, -// -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -// -PredictionMode.allSubsetsEqual = function(altsets) { - var first = null; - for(var i=0;i -// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not -// alt and not pred -// - -PredictionMode.getConflictingAltSubsets = function(configs) { - var configToAlts = new Map(); - configToAlts.hashFunction = function(cfg) { hashStuff(cfg.state.stateNumber, cfg.context); }; - configToAlts.equalsFunction = function(c1, c2) { return c1.state.stateNumber==c2.state.stateNumber && c1.context.equals(c2.context);} - configs.items.map(function(cfg) { - var alts = configToAlts.get(cfg); - if (alts === null) { - alts = new BitSet(); - configToAlts.put(cfg, alts); - } - alts.add(cfg.alt); - }); - return configToAlts.getValues(); -}; - -// -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
            -// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
            -// 
            -// -PredictionMode.getStateToAltMap = function(configs) { - var m = new AltDict(); - configs.items.map(function(c) { - var alts = m.get(c.state); - if (alts === null) { - alts = new BitSet(); - m.put(c.state, alts); - } - alts.add(c.alt); - }); - return m; -}; - -PredictionMode.hasStateAssociatedWithOneAlt = function(configs) { - var values = PredictionMode.getStateToAltMap(configs).values(); - for(var i=0;i + * map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not + * alt and not pred + * + */ + getConflictingAltSubsets: function(configs) { + const configToAlts = new Map(); + configToAlts.hashFunction = function(cfg) { hashStuff(cfg.state.stateNumber, cfg.context); }; + configToAlts.equalsFunction = function(c1, c2) { return c1.state.stateNumber==c2.state.stateNumber && c1.context.equals(c2.context);} + configs.items.map(function(cfg) { + let alts = configToAlts.get(cfg); + if (alts === null) { + alts = new BitSet(); + configToAlts.put(cfg, alts); + } + alts.add(cfg.alt); + }); + return configToAlts.getValues(); + }, + + /** + * Get a map from state to alt subset from a configuration set. For each + * configuration {@code c} in {@code configs}: + * + *
            +     * map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
            +     * 
            + */ + getStateToAltMap: function(configs) { + const m = new AltDict(); + configs.items.map(function(c) { + let alts = m.get(c.state); + if (alts === null) { + alts = new BitSet(); + m.put(c.state, alts); + } + alts.add(c.alt); + }); + return m; + }, + + hasStateAssociatedWithOneAlt: function(configs) { + const values = PredictionMode.getStateToAltMap(configs).values(); + for(let i=0;iI have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

            -// +const {Set, Hash} = require('./../Utils'); -var Set = require('./../Utils').Set; -var Hash = require('./../Utils').Hash; +/** + * A tree structure used to record the semantic context in which + * an ATN configuration is valid. It's either a single predicate, + * a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. + * + *

            I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of + * {@link SemanticContext} within the scope of this outer class.

            + */ +class SemanticContext { + hashCode() { + const hash = new Hash(); + this.updateHashCode(hash); + return hash.finish(); + } -function SemanticContext() { - return this; + /** + * For context independent predicates, we evaluate them without a local + * context (i.e., null context). That way, we can evaluate them without + * having to create proper rule-specific context during prediction (as + * opposed to the parser, which creates them naturally). In a practical + * sense, this avoids a cast exception from RuleContext to myruleContext. + * + *

            For context dependent predicates, we must pass in a local context so that + * references such as $arg evaluate properly as _localctx.arg. We only + * capture context dependent predicates in the context in which we begin + * prediction, so we passed in the outer context here in case of context + * dependent predicate evaluation.

            + */ + evaluate(parser, outerContext) {} + + /** + * Evaluate the precedence predicates for the context and reduce the result. + * + * @param parser The parser instance. + * @param outerContext The current parser context object. + * @return The simplified semantic context after precedence predicates are + * evaluated, which will be one of the following values. + *
              + *
            • {@link //NONE}: if the predicate simplifies to {@code true} after + * precedence predicates are evaluated.
            • + *
            • {@code null}: if the predicate simplifies to {@code false} after + * precedence predicates are evaluated.
            • + *
            • {@code this}: if the semantic context is not changed as a result of + * precedence predicate evaluation.
            • + *
            • A non-{@code null} {@link SemanticContext}: the new simplified + * semantic context after precedence predicates are evaluated.
            • + *
            + */ + evalPrecedence(parser, outerContext) { + return this; + } + + static andContext(a, b) { + if (a === null || a === SemanticContext.NONE) { + return b; + } + if (b === null || b === SemanticContext.NONE) { + return a; + } + const result = new AND(a, b); + if (result.opnds.length === 1) { + return result.opnds[0]; + } else { + return result; + } + } + + static orContext(a, b) { + if (a === null) { + return b; + } + if (b === null) { + return a; + } + if (a === SemanticContext.NONE || b === SemanticContext.NONE) { + return SemanticContext.NONE; + } + const result = new OR(a, b); + if (result.opnds.length === 1) { + return result.opnds[0]; + } else { + return result; + } + } } -SemanticContext.prototype.hashCode = function() { - var hash = new Hash(); - this.updateHashCode(hash); - return hash.finish(); -}; -// For context independent predicates, we evaluate them without a local -// context (i.e., null context). That way, we can evaluate them without -// having to create proper rule-specific context during prediction (as -// opposed to the parser, which creates them naturally). In a practical -// sense, this avoids a cast exception from RuleContext to myruleContext. -// -//

            For context dependent predicates, we must pass in a local context so that -// references such as $arg evaluate properly as _localctx.arg. We only -// capture context dependent predicates in the context in which we begin -// prediction, so we passed in the outer context here in case of context -// dependent predicate evaluation.

            -// -SemanticContext.prototype.evaluate = function(parser, outerContext) { -}; +class Predicate extends SemanticContext { + constructor(ruleIndex, predIndex, isCtxDependent) { + super(); + this.ruleIndex = ruleIndex === undefined ? -1 : ruleIndex; + this.predIndex = predIndex === undefined ? -1 : predIndex; + this.isCtxDependent = isCtxDependent === undefined ? false : isCtxDependent; // e.g., $i ref in pred + } -// -// Evaluate the precedence predicates for the context and reduce the result. -// -// @param parser The parser instance. -// @param outerContext The current parser context object. -// @return The simplified semantic context after precedence predicates are -// evaluated, which will be one of the following values. -//
              -//
            • {@link //NONE}: if the predicate simplifies to {@code true} after -// precedence predicates are evaluated.
            • -//
            • {@code null}: if the predicate simplifies to {@code false} after -// precedence predicates are evaluated.
            • -//
            • {@code this}: if the semantic context is not changed as a result of -// precedence predicate evaluation.
            • -//
            • A non-{@code null} {@link SemanticContext}: the new simplified -// semantic context after precedence predicates are evaluated.
            • -//
            -// -SemanticContext.prototype.evalPrecedence = function(parser, outerContext) { - return this; -}; + evaluate(parser, outerContext) { + const localctx = this.isCtxDependent ? outerContext : null; + return parser.sempred(localctx, this.ruleIndex, this.predIndex); + } -SemanticContext.andContext = function(a, b) { - if (a === null || a === SemanticContext.NONE) { - return b; + updateHashCode(hash) { + hash.update(this.ruleIndex, this.predIndex, this.isCtxDependent); } - if (b === null || b === SemanticContext.NONE) { - return a; - } - var result = new AND(a, b); - if (result.opnds.length === 1) { - return result.opnds[0]; - } else { - return result; - } -}; -SemanticContext.orContext = function(a, b) { - if (a === null) { - return b; + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof Predicate)) { + return false; + } else { + return this.ruleIndex === other.ruleIndex && + this.predIndex === other.predIndex && + this.isCtxDependent === other.isCtxDependent; + } } - if (b === null) { - return a; - } - if (a === SemanticContext.NONE || b === SemanticContext.NONE) { - return SemanticContext.NONE; - } - var result = new OR(a, b); - if (result.opnds.length === 1) { - return result.opnds[0]; - } else { - return result; - } -}; -function Predicate(ruleIndex, predIndex, isCtxDependent) { - SemanticContext.call(this); - this.ruleIndex = ruleIndex === undefined ? -1 : ruleIndex; - this.predIndex = predIndex === undefined ? -1 : predIndex; - this.isCtxDependent = isCtxDependent === undefined ? false : isCtxDependent; // e.g., $i ref in pred - return this; + toString() { + return "{" + this.ruleIndex + ":" + this.predIndex + "}?"; + } } -Predicate.prototype = Object.create(SemanticContext.prototype); -Predicate.prototype.constructor = Predicate; - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. -// +/** + * The default {@link SemanticContext}, which is semantically equivalent to + * a predicate of the form {@code {true}?} + */ SemanticContext.NONE = new Predicate(); -Predicate.prototype.evaluate = function(parser, outerContext) { - var localctx = this.isCtxDependent ? outerContext : null; - return parser.sempred(localctx, this.ruleIndex, this.predIndex); -}; - -Predicate.prototype.updateHashCode = function(hash) { - hash.update(this.ruleIndex, this.predIndex, this.isCtxDependent); -}; - -Predicate.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof Predicate)) { - return false; - } else { - return this.ruleIndex === other.ruleIndex && - this.predIndex === other.predIndex && - this.isCtxDependent === other.isCtxDependent; +class PrecedencePredicate extends SemanticContext { + constructor(precedence) { + super(); + this.precedence = precedence === undefined ? 0 : precedence; } -}; -Predicate.prototype.toString = function() { - return "{" + this.ruleIndex + ":" + this.predIndex + "}?"; -}; - -function PrecedencePredicate(precedence) { - SemanticContext.call(this); - this.precedence = precedence === undefined ? 0 : precedence; -} - -PrecedencePredicate.prototype = Object.create(SemanticContext.prototype); -PrecedencePredicate.prototype.constructor = PrecedencePredicate; - -PrecedencePredicate.prototype.evaluate = function(parser, outerContext) { - return parser.precpred(outerContext, this.precedence); -}; - -PrecedencePredicate.prototype.evalPrecedence = function(parser, outerContext) { - if (parser.precpred(outerContext, this.precedence)) { - return SemanticContext.NONE; - } else { - return null; + evaluate(parser, outerContext) { + return parser.precpred(outerContext, this.precedence); } -}; -PrecedencePredicate.prototype.compareTo = function(other) { - return this.precedence - other.precedence; -}; - -PrecedencePredicate.prototype.updateHashCode = function(hash) { - hash.update(31); -}; - -PrecedencePredicate.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof PrecedencePredicate)) { - return false; - } else { - return this.precedence === other.precedence; - } -}; - -PrecedencePredicate.prototype.toString = function() { - return "{"+this.precedence+">=prec}?"; -}; - - - -PrecedencePredicate.filterPrecedencePredicates = function(set) { - var result = []; - set.values().map( function(context) { - if (context instanceof PrecedencePredicate) { - result.push(context); + evalPrecedence(parser, outerContext) { + if (parser.precpred(outerContext, this.precedence)) { + return SemanticContext.NONE; + } else { + return null; } - }); - return result; -}; - - -// A semantic context which is true whenever none of the contained contexts -// is false. -// -function AND(a, b) { - SemanticContext.call(this); - var operands = new Set(); - if (a instanceof AND) { - a.opnds.map(function(o) { - operands.add(o); - }); - } else { - operands.add(a); } - if (b instanceof AND) { - b.opnds.map(function(o) { - operands.add(o); - }); - } else { - operands.add(b); + + compareTo(other) { + return this.precedence - other.precedence; } - var precedencePredicates = PrecedencePredicate.filterPrecedencePredicates(operands); - if (precedencePredicates.length > 0) { - // interested in the transition with the lowest precedence - var reduced = null; - precedencePredicates.map( function(p) { - if(reduced===null || p.precedence=prec}?"; + } + + static filterPrecedencePredicates(set) { + const result = []; + set.values().map( function(context) { + if (context instanceof PrecedencePredicate) { + result.push(context); } }); - operands.add(reduced); + return result; } - this.opnds = operands.values(); - return this; } -AND.prototype = Object.create(SemanticContext.prototype); -AND.prototype.constructor = AND; - -AND.prototype.equals = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof AND)) { - return false; - } else { - return this.opnds === other.opnds; - } -}; - -AND.prototype.updateHashCode = function(hash) { - hash.update(this.opnds, "AND"); -}; -// -// {@inheritDoc} -// -//

            -// The evaluation of predicates by this context is short-circuiting, but -// unordered.

            -// -AND.prototype.evaluate = function(parser, outerContext) { - for (var i = 0; i < this.opnds.length; i++) { - if (!this.opnds[i].evaluate(parser, outerContext)) { - return false; +class AND extends SemanticContext { + /** + * A semantic context which is true whenever none of the contained contexts + * is false + */ + constructor(a, b) { + super(); + const operands = new Set(); + if (a instanceof AND) { + a.opnds.map(function(o) { + operands.add(o); + }); + } else { + operands.add(a); } - } - return true; -}; - -AND.prototype.evalPrecedence = function(parser, outerContext) { - var differs = false; - var operands = []; - for (var i = 0; i < this.opnds.length; i++) { - var context = this.opnds[i]; - var evaluated = context.evalPrecedence(parser, outerContext); - differs |= (evaluated !== context); - if (evaluated === null) { - // The AND context is false if any element is false - return null; - } else if (evaluated !== SemanticContext.NONE) { - // Reduce the result by skipping true elements - operands.push(evaluated); + if (b instanceof AND) { + b.opnds.map(function(o) { + operands.add(o); + }); + } else { + operands.add(b); } - } - if (!differs) { - return this; - } - if (operands.length === 0) { - // all elements were true, so the AND context is true - return SemanticContext.NONE; - } - var result = null; - operands.map(function(o) { - result = result === null ? o : SemanticContext.andContext(result, o); - }); - return result; -}; - -AND.prototype.toString = function() { - var s = ""; - this.opnds.map(function(o) { - s += "&& " + o.toString(); - }); - return s.length > 3 ? s.slice(3) : s; -}; - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// -function OR(a, b) { - SemanticContext.call(this); - var operands = new Set(); - if (a instanceof OR) { - a.opnds.map(function(o) { - operands.add(o); - }); - } else { - operands.add(a); - } - if (b instanceof OR) { - b.opnds.map(function(o) { - operands.add(o); - }); - } else { - operands.add(b); + const precedencePredicates = PrecedencePredicate.filterPrecedencePredicates(operands); + if (precedencePredicates.length > 0) { + // interested in the transition with the lowest precedence + let reduced = null; + precedencePredicates.map( function(p) { + if(reduced===null || p.precedence 0) { - // interested in the transition with the highest precedence - var s = precedencePredicates.sort(function(a, b) { - return a.compareTo(b); - }); - var reduced = s[s.length-1]; - operands.add(reduced); - } - this.opnds = operands.values(); - return this; -} - -OR.prototype = Object.create(SemanticContext.prototype); -OR.prototype.constructor = OR; - -OR.prototype.constructor = function(other) { - if (this === other) { - return true; - } else if (!(other instanceof OR)) { - return false; - } else { - return this.opnds === other.opnds; - } -}; - -OR.prototype.updateHashCode = function(hash) { - hash.update(this.opnds, "OR"); -}; - -//

            -// The evaluation of predicates by this context is short-circuiting, but -// unordered.

            -// -OR.prototype.evaluate = function(parser, outerContext) { - for (var i = 0; i < this.opnds.length; i++) { - if (this.opnds[i].evaluate(parser, outerContext)) { + equals(other) { + if (this === other) { return true; + } else if (!(other instanceof AND)) { + return false; + } else { + return this.opnds === other.opnds; } } - return false; -}; -OR.prototype.evalPrecedence = function(parser, outerContext) { - var differs = false; - var operands = []; - for (var i = 0; i < this.opnds.length; i++) { - var context = this.opnds[i]; - var evaluated = context.evalPrecedence(parser, outerContext); - differs |= (evaluated !== context); - if (evaluated === SemanticContext.NONE) { - // The OR context is true if any element is true + updateHashCode(hash) { + hash.update(this.opnds, "AND"); + } + + /** + * {@inheritDoc} + * + *

            + * The evaluation of predicates by this context is short-circuiting, but + * unordered.

            + */ + evaluate(parser, outerContext) { + for (let i = 0; i < this.opnds.length; i++) { + if (!this.opnds[i].evaluate(parser, outerContext)) { + return false; + } + } + return true; + } + + evalPrecedence(parser, outerContext) { + let differs = false; + const operands = []; + for (let i = 0; i < this.opnds.length; i++) { + const context = this.opnds[i]; + const evaluated = context.evalPrecedence(parser, outerContext); + differs |= (evaluated !== context); + if (evaluated === null) { + // The AND context is false if any element is false + return null; + } else if (evaluated !== SemanticContext.NONE) { + // Reduce the result by skipping true elements + operands.push(evaluated); + } + } + if (!differs) { + return this; + } + if (operands.length === 0) { + // all elements were true, so the AND context is true return SemanticContext.NONE; - } else if (evaluated !== null) { - // Reduce the result by skipping false elements - operands.push(evaluated); + } + let result = null; + operands.map(function(o) { + result = result === null ? o : SemanticContext.andContext(result, o); + }); + return result; + } + + toString() { + let s = ""; + this.opnds.map(function(o) { + s += "&& " + o.toString(); + }); + return s.length > 3 ? s.slice(3) : s; + } +} + + +class OR extends SemanticContext { + /** + * A semantic context which is true whenever at least one of the contained + * contexts is true + */ + constructor(a, b) { + super(); + const operands = new Set(); + if (a instanceof OR) { + a.opnds.map(function(o) { + operands.add(o); + }); + } else { + operands.add(a); + } + if (b instanceof OR) { + b.opnds.map(function(o) { + operands.add(o); + }); + } else { + operands.add(b); + } + + const precedencePredicates = PrecedencePredicate.filterPrecedencePredicates(operands); + if (precedencePredicates.length > 0) { + // interested in the transition with the highest precedence + const s = precedencePredicates.sort(function(a, b) { + return a.compareTo(b); + }); + const reduced = s[s.length-1]; + operands.add(reduced); + } + this.opnds = operands.values(); + } + + equals(other) { + if (this === other) { + return true; + } else if (!(other instanceof OR)) { + return false; + } else { + return this.opnds === other.opnds; } } - if (!differs) { - return this; - } - if (operands.length === 0) { - // all elements were false, so the OR context is false - return null; - } - var result = null; - operands.map(function(o) { - return result === null ? o : SemanticContext.orContext(result, o); - }); - return result; -}; -OR.prototype.toString = function() { - var s = ""; - this.opnds.map(function(o) { - s += "|| " + o.toString(); - }); - return s.length > 3 ? s.slice(3) : s; -}; + updateHashCode(hash) { + hash.update(this.opnds, "OR"); + } -exports.SemanticContext = SemanticContext; -exports.PrecedencePredicate = PrecedencePredicate; -exports.Predicate = Predicate; + /** + *

            + * The evaluation of predicates by this context is short-circuiting, but + * unordered.

            + */ + evaluate(parser, outerContext) { + for (let i = 0; i < this.opnds.length; i++) { + if (this.opnds[i].evaluate(parser, outerContext)) { + return true; + } + } + return false; + } + + evalPrecedence(parser, outerContext) { + let differs = false; + const operands = []; + for (let i = 0; i < this.opnds.length; i++) { + const context = this.opnds[i]; + const evaluated = context.evalPrecedence(parser, outerContext); + differs |= (evaluated !== context); + if (evaluated === SemanticContext.NONE) { + // The OR context is true if any element is true + return SemanticContext.NONE; + } else if (evaluated !== null) { + // Reduce the result by skipping false elements + operands.push(evaluated); + } + } + if (!differs) { + return this; + } + if (operands.length === 0) { + // all elements were false, so the OR context is false + return null; + } + const result = null; + operands.map(function(o) { + return result === null ? o : SemanticContext.orContext(result, o); + }); + return result; + } + + toString() { + let s = ""; + this.opnds.map(function(o) { + s += "|| " + o.toString(); + }); + return s.length > 3 ? s.slice(3) : s; + } +} + +module.exports = { + SemanticContext, + PrecedencePredicate, + Predicate +} diff --git a/runtime/JavaScript/src/antlr4/atn/Transition.js b/runtime/JavaScript/src/antlr4/atn/Transition.js index fcd068db9..a900acf3b 100644 --- a/runtime/JavaScript/src/antlr4/atn/Transition.js +++ b/runtime/JavaScript/src/antlr4/atn/Transition.js @@ -2,45 +2,48 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// An ATN transition between any two ATN states. Subclasses define -// atom, set, epsilon, action, predicate, rule transitions. -// -//

            This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

            -// -//

            Since we never have to change the ATN transitions once we construct it, -// we can fix these transitions as specific classes. The DFA transitions -// on the other hand need to update the labels as it adds transitions to -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

            +const {Token} = require('./../Token'); +const {IntervalSet} = require('./../IntervalSet'); +const {Predicate, PrecedencePredicate} = require('./SemanticContext'); -var Token = require('./../Token').Token; -var Interval = require('./../IntervalSet').Interval; -var IntervalSet = require('./../IntervalSet').IntervalSet; -var Predicate = require('./SemanticContext').Predicate; -var PrecedencePredicate = require('./SemanticContext').PrecedencePredicate; - -function Transition (target) { - // The target of this transition. - if (target===undefined || target===null) { - throw "target cannot be null."; +/** + * An ATN transition between any two ATN states. Subclasses define + * atom, set, epsilon, action, predicate, rule transitions. + * + *

            This is a one way link. It emanates from a state (usually via a list of + * transitions) and has a target state.

            + * + *

            Since we never have to change the ATN transitions once we construct it, + * we can fix these transitions as specific classes. The DFA transitions + * on the other hand need to update the labels as it adds transitions to + * the states. We'll use the term Edge for the DFA to distinguish them from + * ATN transitions.

            + */ +class Transition { + constructor(target) { + // The target of this transition. + if (target===undefined || target===null) { + throw "target cannot be null."; + } + this.target = target; + // Are we epsilon, action, sempred? + this.isEpsilon = false; + this.label = null; } - this.target = target; - // Are we epsilon, action, sempred? - this.isEpsilon = false; - this.label = null; - return this; } - // constants for serialization + +// constants for serialization + Transition.EPSILON = 1; Transition.RANGE = 2; Transition.RULE = 3; -Transition.PREDICATE = 4; // e.g., {isType(input.LT(1))}? +// e.g., {isType(input.LT(1))}? +Transition.PREDICATE = 4; Transition.ATOM = 5; Transition.ACTION = 6; -Transition.SET = 7; // ~(A|B) or ~atom, wildcard, which convert to next 2 +// ~(A|B) or ~atom, wildcard, which convert to next 2 +Transition.SET = 7; Transition.NOT_SET = 8; Transition.WILDCARD = 9; Transition.PRECEDENCE = 10; @@ -74,243 +77,227 @@ Transition.serializationTypes = { // TODO: make all transitions sets? no, should remove set edges -function AtomTransition(target, label) { - Transition.call(this, target); - this.label_ = label; // The token type or character value; or, signifies special label. - this.label = this.makeLabel(); - this.serializationType = Transition.ATOM; - return this; + +class AtomTransition extends Transition { + constructor(target, label) { + super(target); + // The token type or character value; or, signifies special label. + this.label_ = label; + this.label = this.makeLabel(); + this.serializationType = Transition.ATOM; + } + + makeLabel() { + const s = new IntervalSet(); + s.addOne(this.label_); + return s; + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return this.label_ === symbol; + } + + toString() { + return this.label_; + } } -AtomTransition.prototype = Object.create(Transition.prototype); -AtomTransition.prototype.constructor = AtomTransition; -AtomTransition.prototype.makeLabel = function() { - var s = new IntervalSet(); - s.addOne(this.label_); - return s; -}; +class RuleTransition extends Transition { + constructor(ruleStart, ruleIndex, precedence, followState) { + super(ruleStart); + // ptr to the rule definition object for this rule ref + this.ruleIndex = ruleIndex; + this.precedence = precedence; + // what node to begin computations following ref to rule + this.followState = followState; + this.serializationType = Transition.RULE; + this.isEpsilon = true; + } -AtomTransition.prototype.matches = function( symbol, minVocabSymbol, maxVocabSymbol) { - return this.label_ === symbol; -}; - -AtomTransition.prototype.toString = function() { - return this.label_; -}; - -function RuleTransition(ruleStart, ruleIndex, precedence, followState) { - Transition.call(this, ruleStart); - this.ruleIndex = ruleIndex; // ptr to the rule definition object for this rule ref - this.precedence = precedence; - this.followState = followState; // what node to begin computations following ref to rule - this.serializationType = Transition.RULE; - this.isEpsilon = true; - return this; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } } -RuleTransition.prototype = Object.create(Transition.prototype); -RuleTransition.prototype.constructor = RuleTransition; +class EpsilonTransition extends Transition { + constructor(target, outermostPrecedenceReturn) { + super(target); + this.serializationType = Transition.EPSILON; + this.isEpsilon = true; + this.outermostPrecedenceReturn = outermostPrecedenceReturn; + } -RuleTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } - -function EpsilonTransition(target, outermostPrecedenceReturn) { - Transition.call(this, target); - this.serializationType = Transition.EPSILON; - this.isEpsilon = true; - this.outermostPrecedenceReturn = outermostPrecedenceReturn; - return this; + toString() { + return "epsilon"; + } } -EpsilonTransition.prototype = Object.create(Transition.prototype); -EpsilonTransition.prototype.constructor = EpsilonTransition; -EpsilonTransition.prototype.matches = function( symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; +class RangeTransition extends Transition { + constructor(target, start, stop) { + super(target); + this.serializationType = Transition.RANGE; + this.start = start; + this.stop = stop; + this.label = this.makeLabel(); + } -EpsilonTransition.prototype.toString = function() { - return "epsilon"; -}; + makeLabel() { + const s = new IntervalSet(); + s.addRange(this.start, this.stop); + return s; + } -function RangeTransition(target, start, stop) { - Transition.call(this, target); - this.serializationType = Transition.RANGE; - this.start = start; - this.stop = stop; - this.label = this.makeLabel(); - return this; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= this.start && symbol <= this.stop; + } + + toString() { + return "'" + String.fromCharCode(this.start) + "'..'" + String.fromCharCode(this.stop) + "'"; + } } -RangeTransition.prototype = Object.create(Transition.prototype); -RangeTransition.prototype.constructor = RangeTransition; -RangeTransition.prototype.makeLabel = function() { - var s = new IntervalSet(); - s.addRange(this.start, this.stop); - return s; -}; - -RangeTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= this.start && symbol <= this.stop; -}; - -RangeTransition.prototype.toString = function() { - return "'" + String.fromCharCode(this.start) + "'..'" + String.fromCharCode(this.stop) + "'"; -}; - -function AbstractPredicateTransition(target) { - Transition.call(this, target); - return this; +class AbstractPredicateTransition extends Transition { + constructor(target) { + super(target); + } } -AbstractPredicateTransition.prototype = Object.create(Transition.prototype); -AbstractPredicateTransition.prototype.constructor = AbstractPredicateTransition; +class PredicateTransition extends AbstractPredicateTransition { + constructor(target, ruleIndex, predIndex, isCtxDependent) { + super(target); + this.serializationType = Transition.PREDICATE; + this.ruleIndex = ruleIndex; + this.predIndex = predIndex; + this.isCtxDependent = isCtxDependent; // e.g., $i ref in pred + this.isEpsilon = true; + } -function PredicateTransition(target, ruleIndex, predIndex, isCtxDependent) { - AbstractPredicateTransition.call(this, target); - this.serializationType = Transition.PREDICATE; - this.ruleIndex = ruleIndex; - this.predIndex = predIndex; - this.isCtxDependent = isCtxDependent; // e.g., $i ref in pred - this.isEpsilon = true; - return this; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } + + getPredicate() { + return new Predicate(this.ruleIndex, this.predIndex, this.isCtxDependent); + } + + toString() { + return "pred_" + this.ruleIndex + ":" + this.predIndex; + } } -PredicateTransition.prototype = Object.create(AbstractPredicateTransition.prototype); -PredicateTransition.prototype.constructor = PredicateTransition; -PredicateTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; +class ActionTransition extends Transition { + constructor(target, ruleIndex, actionIndex, isCtxDependent) { + super(target); + this.serializationType = Transition.ACTION; + this.ruleIndex = ruleIndex; + this.actionIndex = actionIndex===undefined ? -1 : actionIndex; + this.isCtxDependent = isCtxDependent===undefined ? false : isCtxDependent; // e.g., $i ref in pred + this.isEpsilon = true; + } -PredicateTransition.prototype.getPredicate = function() { - return new Predicate(this.ruleIndex, this.predIndex, this.isCtxDependent); -}; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } -PredicateTransition.prototype.toString = function() { - return "pred_" + this.ruleIndex + ":" + this.predIndex; -}; - -function ActionTransition(target, ruleIndex, actionIndex, isCtxDependent) { - Transition.call(this, target); - this.serializationType = Transition.ACTION; - this.ruleIndex = ruleIndex; - this.actionIndex = actionIndex===undefined ? -1 : actionIndex; - this.isCtxDependent = isCtxDependent===undefined ? false : isCtxDependent; // e.g., $i ref in pred - this.isEpsilon = true; - return this; + toString() { + return "action_" + this.ruleIndex + ":" + this.actionIndex; + } } -ActionTransition.prototype = Object.create(Transition.prototype); -ActionTransition.prototype.constructor = ActionTransition; - - -ActionTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -ActionTransition.prototype.toString = function() { - return "action_" + this.ruleIndex + ":" + this.actionIndex; -}; - // A transition containing a set of values. -function SetTransition(target, set) { - Transition.call(this, target); - this.serializationType = Transition.SET; - if (set !==undefined && set !==null) { - this.label = set; - } else { - this.label = new IntervalSet(); - this.label.addOne(Token.INVALID_TYPE); +class SetTransition extends Transition { + constructor(target, set) { + super(target); + this.serializationType = Transition.SET; + if (set !==undefined && set !==null) { + this.label = set; + } else { + this.label = new IntervalSet(); + this.label.addOne(Token.INVALID_TYPE); + } + } + + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return this.label.contains(symbol); + } + + toString() { + return this.label.toString(); } - return this; } -SetTransition.prototype = Object.create(Transition.prototype); -SetTransition.prototype.constructor = SetTransition; +class NotSetTransition extends SetTransition { + constructor(target, set) { + super(target, set); + this.serializationType = Transition.NOT_SET; + } -SetTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return this.label.contains(symbol); -}; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && + !super.matches(symbol, minVocabSymbol, maxVocabSymbol); + } - -SetTransition.prototype.toString = function() { - return this.label.toString(); -}; - -function NotSetTransition(target, set) { - SetTransition.call(this, target, set); - this.serializationType = Transition.NOT_SET; - return this; + toString() { + return '~' + super.toString(); + } } -NotSetTransition.prototype = Object.create(SetTransition.prototype); -NotSetTransition.prototype.constructor = NotSetTransition; +class WildcardTransition extends Transition { + constructor(target) { + super(target); + this.serializationType = Transition.WILDCARD; + } -NotSetTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && - !SetTransition.prototype.matches.call(this, symbol, minVocabSymbol, maxVocabSymbol); -}; + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; + } -NotSetTransition.prototype.toString = function() { - return '~' + SetTransition.prototype.toString.call(this); -}; - -function WildcardTransition(target) { - Transition.call(this, target); - this.serializationType = Transition.WILDCARD; - return this; + toString() { + return "."; + } } -WildcardTransition.prototype = Object.create(Transition.prototype); -WildcardTransition.prototype.constructor = WildcardTransition; +class PrecedencePredicateTransition extends AbstractPredicateTransition { + constructor(target, precedence) { + super(target); + this.serializationType = Transition.PRECEDENCE; + this.precedence = precedence; + this.isEpsilon = true; + } + matches(symbol, minVocabSymbol, maxVocabSymbol) { + return false; + } -WildcardTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol; -}; + getPredicate() { + return new PrecedencePredicate(this.precedence); + } -WildcardTransition.prototype.toString = function() { - return "."; -}; - -function PrecedencePredicateTransition(target, precedence) { - AbstractPredicateTransition.call(this, target); - this.serializationType = Transition.PRECEDENCE; - this.precedence = precedence; - this.isEpsilon = true; - return this; + toString() { + return this.precedence + " >= _p"; + } } -PrecedencePredicateTransition.prototype = Object.create(AbstractPredicateTransition.prototype); -PrecedencePredicateTransition.prototype.constructor = PrecedencePredicateTransition; - -PrecedencePredicateTransition.prototype.matches = function(symbol, minVocabSymbol, maxVocabSymbol) { - return false; -}; - -PrecedencePredicateTransition.prototype.getPredicate = function() { - return new PrecedencePredicate(this.precedence); -}; - -PrecedencePredicateTransition.prototype.toString = function() { - return this.precedence + " >= _p"; -}; - -exports.Transition = Transition; -exports.AtomTransition = AtomTransition; -exports.SetTransition = SetTransition; -exports.NotSetTransition = NotSetTransition; -exports.RuleTransition = RuleTransition; -exports.ActionTransition = ActionTransition; -exports.EpsilonTransition = EpsilonTransition; -exports.RangeTransition = RangeTransition; -exports.WildcardTransition = WildcardTransition; -exports.PredicateTransition = PredicateTransition; -exports.PrecedencePredicateTransition = PrecedencePredicateTransition; -exports.AbstractPredicateTransition = AbstractPredicateTransition; \ No newline at end of file +module.exports = { + Transition, + AtomTransition, + SetTransition, + NotSetTransition, + RuleTransition, + ActionTransition, + EpsilonTransition, + RangeTransition, + WildcardTransition, + PredicateTransition, + PrecedencePredicateTransition, + AbstractPredicateTransition +} diff --git a/runtime/JavaScript/src/antlr4/atn/index.js b/runtime/JavaScript/src/antlr4/atn/index.js index e0eabe2ea..5f6dcdce5 100644 --- a/runtime/JavaScript/src/antlr4/atn/index.js +++ b/runtime/JavaScript/src/antlr4/atn/index.js @@ -3,8 +3,8 @@ * can be found in the LICENSE.txt file in the project root. */ -exports.ATN = require('./ATN').ATN; -exports.ATNDeserializer = require('./ATNDeserializer').ATNDeserializer; -exports.LexerATNSimulator = require('./LexerATNSimulator').LexerATNSimulator; -exports.ParserATNSimulator = require('./ParserATNSimulator').ParserATNSimulator; -exports.PredictionMode = require('./PredictionMode').PredictionMode; +exports.ATN = require('./ATN'); +exports.ATNDeserializer = require('./ATNDeserializer'); +exports.LexerATNSimulator = require('./LexerATNSimulator'); +exports.ParserATNSimulator = require('./ParserATNSimulator'); +exports.PredictionMode = require('./PredictionMode'); diff --git a/runtime/JavaScript/src/antlr4/dfa/DFA.js b/runtime/JavaScript/src/antlr4/dfa/DFA.js index 57879d6ac..770e929e9 100644 --- a/runtime/JavaScript/src/antlr4/dfa/DFA.js +++ b/runtime/JavaScript/src/antlr4/dfa/DFA.js @@ -1,153 +1,162 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -var Set = require("../Utils").Set; -var DFAState = require('./DFAState').DFAState; -var StarLoopEntryState = require('../atn/ATNState').StarLoopEntryState; -var ATNConfigSet = require('./../atn/ATNConfigSet').ATNConfigSet; -var DFASerializer = require('./DFASerializer').DFASerializer; -var LexerDFASerializer = require('./DFASerializer').LexerDFASerializer; +const {Set} = require("../Utils"); +const {DFAState} = require('./DFAState'); +const {StarLoopEntryState} = require('../atn/ATNState'); +const {ATNConfigSet} = require('./../atn/ATNConfigSet'); +const {DFASerializer} = require('./DFASerializer'); +const {LexerDFASerializer} = require('./DFASerializer'); - - -function DFA(atnStartState, decision) { - if (decision === undefined) { - decision = 0; - } - // From which ATN state did we create this DFA? - this.atnStartState = atnStartState; - this.decision = decision; - // A set of all DFA states. Use {@link Map} so we can get old state back - // ({@link Set} only allows you to see if it's there). - this._states = new Set(); - this.s0 = null; - // {@code true} if this DFA is for a precedence decision; otherwise, - // {@code false}. This is the backing field for {@link //isPrecedenceDfa}, - // {@link //setPrecedenceDfa}. - this.precedenceDfa = false; - if (atnStartState instanceof StarLoopEntryState) - { - if (atnStartState.isPrecedenceDecision) { - this.precedenceDfa = true; - var precedenceState = new DFAState(null, new ATNConfigSet()); - precedenceState.edges = []; - precedenceState.isAcceptState = false; - precedenceState.requiresFullContext = false; - this.s0 = precedenceState; - } - } - return this; -} - -// Get the start state for a specific precedence value. -// -// @param precedence The current precedence. -// @return The start state corresponding to the specified precedence, or -// {@code null} if no start state exists for the specified precedence. -// -// @throws IllegalStateException if this is not a precedence DFA. -// @see //isPrecedenceDfa() - -DFA.prototype.getPrecedenceStartState = function(precedence) { - if (!(this.precedenceDfa)) { - throw ("Only precedence DFAs may contain a precedence start state."); - } - // s0.edges is never null for a precedence DFA - if (precedence < 0 || precedence >= this.s0.edges.length) { - return null; - } - return this.s0.edges[precedence] || null; -}; - -// Set the start state for a specific precedence value. -// -// @param precedence The current precedence. -// @param startState The start state corresponding to the specified -// precedence. -// -// @throws IllegalStateException if this is not a precedence DFA. -// @see //isPrecedenceDfa() -// -DFA.prototype.setPrecedenceStartState = function(precedence, startState) { - if (!(this.precedenceDfa)) { - throw ("Only precedence DFAs may contain a precedence start state."); - } - if (precedence < 0) { - return; - } - - // synchronization on s0 here is ok. when the DFA is turned into a - // precedence DFA, s0 will be initialized once and not updated again - // s0.edges is never null for a precedence DFA - this.s0.edges[precedence] = startState; -}; - -// -// Sets whether this is a precedence DFA. If the specified value differs -// from the current DFA configuration, the following actions are taken; -// otherwise no changes are made to the current DFA. -// -//
              -//
            • The {@link //states} map is cleared
            • -//
            • If {@code precedenceDfa} is {@code false}, the initial state -// {@link //s0} is set to {@code null}; otherwise, it is initialized to a new -// {@link DFAState} with an empty outgoing {@link DFAState//edges} array to -// store the start states for individual precedence values.
            • -//
            • The {@link //precedenceDfa} field is updated
            • -//
            -// -// @param precedenceDfa {@code true} if this is a precedence DFA; otherwise, -// {@code false} - -DFA.prototype.setPrecedenceDfa = function(precedenceDfa) { - if (this.precedenceDfa!==precedenceDfa) { - this._states = new DFAStatesSet(); - if (precedenceDfa) { - var precedenceState = new DFAState(null, new ATNConfigSet()); - precedenceState.edges = []; - precedenceState.isAcceptState = false; - precedenceState.requiresFullContext = false; - this.s0 = precedenceState; - } else { - this.s0 = null; +class DFA { + constructor(atnStartState, decision) { + if (decision === undefined) { + decision = 0; + } + /** + * From which ATN state did we create this DFA? + */ + this.atnStartState = atnStartState; + this.decision = decision; + /** + * A set of all DFA states. Use {@link Map} so we can get old state back + * ({@link Set} only allows you to see if it's there). + */ + this._states = new Set(); + this.s0 = null; + /** + * {@code true} if this DFA is for a precedence decision; otherwise, + * {@code false}. This is the backing field for {@link //isPrecedenceDfa}, + * {@link //setPrecedenceDfa} + */ + this.precedenceDfa = false; + if (atnStartState instanceof StarLoopEntryState) + { + if (atnStartState.isPrecedenceDecision) { + this.precedenceDfa = true; + const precedenceState = new DFAState(null, new ATNConfigSet()); + precedenceState.edges = []; + precedenceState.isAcceptState = false; + precedenceState.requiresFullContext = false; + this.s0 = precedenceState; + } } - this.precedenceDfa = precedenceDfa; } -}; -Object.defineProperty(DFA.prototype, "states", { - get : function() { + /** + * Get the start state for a specific precedence value. + * + * @param precedence The current precedence. + * @return The start state corresponding to the specified precedence, or + * {@code null} if no start state exists for the specified precedence. + * + * @throws IllegalStateException if this is not a precedence DFA. + * @see //isPrecedenceDfa() + */ + getPrecedenceStartState(precedence) { + if (!(this.precedenceDfa)) { + throw ("Only precedence DFAs may contain a precedence start state."); + } + // s0.edges is never null for a precedence DFA + if (precedence < 0 || precedence >= this.s0.edges.length) { + return null; + } + return this.s0.edges[precedence] || null; + } + + /** + * Set the start state for a specific precedence value. + * + * @param precedence The current precedence. + * @param startState The start state corresponding to the specified + * precedence. + * + * @throws IllegalStateException if this is not a precedence DFA. + * @see //isPrecedenceDfa() + */ + setPrecedenceStartState(precedence, startState) { + if (!(this.precedenceDfa)) { + throw ("Only precedence DFAs may contain a precedence start state."); + } + if (precedence < 0) { + return; + } + + /** + * synchronization on s0 here is ok. when the DFA is turned into a + * precedence DFA, s0 will be initialized once and not updated again + * s0.edges is never null for a precedence DFA + */ + this.s0.edges[precedence] = startState; + } + + /** + * Sets whether this is a precedence DFA. If the specified value differs + * from the current DFA configuration, the following actions are taken; + * otherwise no changes are made to the current DFA. + * + *
              + *
            • The {@link //states} map is cleared
            • + *
            • If {@code precedenceDfa} is {@code false}, the initial state + * {@link //s0} is set to {@code null}; otherwise, it is initialized to a new + * {@link DFAState} with an empty outgoing {@link DFAState//edges} array to + * store the start states for individual precedence values.
            • + *
            • The {@link //precedenceDfa} field is updated
            • + *
            + * + * @param precedenceDfa {@code true} if this is a precedence DFA; otherwise, + * {@code false} + */ + setPrecedenceDfa(precedenceDfa) { + if (this.precedenceDfa!==precedenceDfa) { + this._states = new DFAStatesSet(); + if (precedenceDfa) { + const precedenceState = new DFAState(null, new ATNConfigSet()); + precedenceState.edges = []; + precedenceState.isAcceptState = false; + precedenceState.requiresFullContext = false; + this.s0 = precedenceState; + } else { + this.s0 = null; + } + this.precedenceDfa = precedenceDfa; + } + } + + /** + * Return a list of all states in this DFA, ordered by state number. + */ + sortedStates() { + const list = this._states.values(); + return list.sort(function(a, b) { + return a.stateNumber - b.stateNumber; + }); + } + + toString(literalNames, symbolicNames) { + literalNames = literalNames || null; + symbolicNames = symbolicNames || null; + if (this.s0 === null) { + return ""; + } + const serializer = new DFASerializer(this, literalNames, symbolicNames); + return serializer.toString(); + } + + toLexerString() { + if (this.s0 === null) { + return ""; + } + const serializer = new LexerDFASerializer(this); + return serializer.toString(); + } + + get states(){ return this._states; } -}); +} -// Return a list of all states in this DFA, ordered by state number. -DFA.prototype.sortedStates = function() { - var list = this._states.values(); - return list.sort(function(a, b) { - return a.stateNumber - b.stateNumber; - }); -}; -DFA.prototype.toString = function(literalNames, symbolicNames) { - literalNames = literalNames || null; - symbolicNames = symbolicNames || null; - if (this.s0 === null) { - return ""; - } - var serializer = new DFASerializer(this, literalNames, symbolicNames); - return serializer.toString(); -}; - -DFA.prototype.toLexerString = function() { - if (this.s0 === null) { - return ""; - } - var serializer = new LexerDFASerializer(this); - return serializer.toString(); -}; - -exports.DFA = DFA; +module.exports = DFA; diff --git a/runtime/JavaScript/src/antlr4/dfa/DFASerializer.js b/runtime/JavaScript/src/antlr4/dfa/DFASerializer.js index d1eb33f68..a9d7d1bb1 100644 --- a/runtime/JavaScript/src/antlr4/dfa/DFASerializer.js +++ b/runtime/JavaScript/src/antlr4/dfa/DFASerializer.js @@ -3,77 +3,75 @@ * can be found in the LICENSE.txt file in the project root. */ -// A DFA walker that knows how to dump them to serialized strings.#/ +/** + * A DFA walker that knows how to dump them to serialized strings. + */ +class DFASerializer { + constructor(dfa, literalNames, symbolicNames) { + this.dfa = dfa; + this.literalNames = literalNames || []; + this.symbolicNames = symbolicNames || []; + } - -function DFASerializer(dfa, literalNames, symbolicNames) { - this.dfa = dfa; - this.literalNames = literalNames || []; - this.symbolicNames = symbolicNames || []; - return this; -} - -DFASerializer.prototype.toString = function() { - if(this.dfa.s0 === null) { - return null; - } - var buf = ""; - var states = this.dfa.sortedStates(); - for(var i=0;i"); - buf = buf.concat(this.getStateString(t)); - buf = buf.concat('\n'); - } - } + toString() { + if(this.dfa.s0 === null) { + return null; } - } - return buf.length===0 ? null : buf; -}; - -DFASerializer.prototype.getEdgeLabel = function(i) { - if (i===0) { - return "EOF"; - } else if(this.literalNames !==null || this.symbolicNames!==null) { - return this.literalNames[i-1] || this.symbolicNames[i-1]; - } else { - return String.fromCharCode(i-1); + let buf = ""; + const states = this.dfa.sortedStates(); + for(let i=0; i"); + buf = buf.concat(this.getStateString(t)); + buf = buf.concat('\n'); + } + } + } + } + return buf.length===0 ? null : buf; } -}; -DFASerializer.prototype.getStateString = function(s) { - var baseStateStr = ( s.isAcceptState ? ":" : "") + "s" + s.stateNumber + ( s.requiresFullContext ? "^" : ""); - if(s.isAcceptState) { - if (s.predicates !== null) { - return baseStateStr + "=>" + s.predicates.toString(); + getEdgeLabel(i) { + if (i===0) { + return "EOF"; + } else if(this.literalNames !==null || this.symbolicNames!==null) { + return this.literalNames[i-1] || this.symbolicNames[i-1]; } else { - return baseStateStr + "=>" + s.prediction.toString(); + return String.fromCharCode(i-1); } - } else { - return baseStateStr; } -}; -function LexerDFASerializer(dfa) { - DFASerializer.call(this, dfa, null); - return this; + getStateString(s) { + const baseStateStr = ( s.isAcceptState ? ":" : "") + "s" + s.stateNumber + ( s.requiresFullContext ? "^" : ""); + if(s.isAcceptState) { + if (s.predicates !== null) { + return baseStateStr + "=>" + s.predicates.toString(); + } else { + return baseStateStr + "=>" + s.prediction.toString(); + } + } else { + return baseStateStr; + } + } } -LexerDFASerializer.prototype = Object.create(DFASerializer.prototype); -LexerDFASerializer.prototype.constructor = LexerDFASerializer; +class LexerDFASerializer extends DFASerializer { + constructor(dfa) { + super(dfa, null); + } -LexerDFASerializer.prototype.getEdgeLabel = function(i) { - return "'" + String.fromCharCode(i) + "'"; -}; + getEdgeLabel(i) { + return "'" + String.fromCharCode(i) + "'"; + } +} -exports.DFASerializer = DFASerializer; -exports.LexerDFASerializer = LexerDFASerializer; +module.exports = { DFASerializer , LexerDFASerializer }; diff --git a/runtime/JavaScript/src/antlr4/dfa/DFAState.js b/runtime/JavaScript/src/antlr4/dfa/DFAState.js index b80df0365..49a1ee3d4 100644 --- a/runtime/JavaScript/src/antlr4/dfa/DFAState.js +++ b/runtime/JavaScript/src/antlr4/dfa/DFAState.js @@ -1,146 +1,156 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -var ATNConfigSet = require('./../atn/ATNConfigSet').ATNConfigSet; -var Utils = require('./../Utils'); -var Hash = Utils.Hash; -var Set = Utils.Set; +const {ATNConfigSet} = require('./../atn/ATNConfigSet'); +const {Hash, Set} = require('./../Utils'); -// Map a predicate to a predicted alternative./// +/** + * Map a predicate to a predicted alternative. + */ +class PredPrediction { + constructor(pred, alt) { + this.alt = alt; + this.pred = pred; + } -function PredPrediction(pred, alt) { - this.alt = alt; - this.pred = pred; - return this; + toString() { + return "(" + this.pred + ", " + this.alt + ")"; + } } -PredPrediction.prototype.toString = function() { - return "(" + this.pred + ", " + this.alt + ")"; -}; - -// A DFA state represents a set of possible ATN configurations. -// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state -// to keep track of all possible states the ATN can be in after -// reading each input symbol. That is to say, after reading -// input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the -// ATN's start state along some path labeled a1a2..an." -// In conventional NFA→DFA conversion, therefore, the subset T -// would be a bitset representing the set of states the -// ATN could be in. We need to track the alt predicted by each -// state as well, however. More importantly, we need to maintain -// a stack of states, tracking the closure operations as they -// jump from rule to rule, emulating rule invocations (method calls). -// I have to add a stack to simulate the proper lookahead sequences for -// the underlying LL grammar from which the ATN was derived. -// -//

            I use a set of ATNConfig objects not simple states. An ATNConfig -// is both a state (ala normal conversion) and a RuleContext describing -// the chain of rules (if any) followed to arrive at that state.

            -// -//

            A DFA state may have multiple references to a particular state, -// but with different ATN contexts (with same or different alts) -// meaning that state was reached via a different set of rule invocations.

            -// / - -function DFAState(stateNumber, configs) { - if (stateNumber === null) { - stateNumber = -1; +/** + * A DFA state represents a set of possible ATN configurations. + * As Aho, Sethi, Ullman p. 117 says "The DFA uses its state + * to keep track of all possible states the ATN can be in after + * reading each input symbol. That is to say, after reading + * input a1a2..an, the DFA is in a state that represents the + * subset T of the states of the ATN that are reachable from the + * ATN's start state along some path labeled a1a2..an." + * In conventional NFA→DFA conversion, therefore, the subset T + * would be a bitset representing the set of states the + * ATN could be in. We need to track the alt predicted by each + * state as well, however. More importantly, we need to maintain + * a stack of states, tracking the closure operations as they + * jump from rule to rule, emulating rule invocations (method calls). + * I have to add a stack to simulate the proper lookahead sequences for + * the underlying LL grammar from which the ATN was derived. + * + *

            I use a set of ATNConfig objects not simple states. An ATNConfig + * is both a state (ala normal conversion) and a RuleContext describing + * the chain of rules (if any) followed to arrive at that state.

            + * + *

            A DFA state may have multiple references to a particular state, + * but with different ATN contexts (with same or different alts) + * meaning that state was reached via a different set of rule invocations.

            + */ +class DFAState { + constructor(stateNumber, configs) { + if (stateNumber === null) { + stateNumber = -1; + } + if (configs === null) { + configs = new ATNConfigSet(); + } + this.stateNumber = stateNumber; + this.configs = configs; + /** + * {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + * {@link Token//EOF} maps to {@code edges[0]}. + */ + this.edges = null; + this.isAcceptState = false; + /** + * if accept state, what ttype do we match or alt do we predict? + * This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link//predicates} + * {@code !=null} or {@link //requiresFullContext}. + */ + this.prediction = 0; + this.lexerActionExecutor = null; + /** + * Indicates that this state was created during SLL prediction that + * discovered a conflict between the configurations in the state. Future + * {@link ParserATNSimulator//execATN} invocations immediately jumped doing + * full context prediction if this field is true. + */ + this.requiresFullContext = false; + /** + * During SLL parsing, this is a list of predicates associated with the + * ATN configurations of the DFA state. When we have predicates, + * {@link //requiresFullContext} is {@code false} since full context + * prediction evaluates predicates + * on-the-fly. If this is not null, then {@link //prediction} is + * {@link ATN//INVALID_ALT_NUMBER}. + * + *

            We only use these for non-{@link //requiresFullContext} but + * conflicting states. That + * means we know from the context (it's $ or we don't dip into outer + * context) that it's an ambiguity not a conflict.

            + * + *

            This list is computed by {@link + * ParserATNSimulator//predicateDFAState}.

            + */ + this.predicates = null; + return this; } - if (configs === null) { - configs = new ATNConfigSet(); - } - this.stateNumber = stateNumber; - this.configs = configs; - // {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) - // {@link Token//EOF} maps to {@code edges[0]}. - this.edges = null; - this.isAcceptState = false; - // if accept state, what ttype do we match or alt do we predict? - // This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link - // //predicates}{@code !=null} or - // {@link //requiresFullContext}. - this.prediction = 0; - this.lexerActionExecutor = null; - // Indicates that this state was created during SLL prediction that - // discovered a conflict between the configurations in the state. Future - // {@link ParserATNSimulator//execATN} invocations immediately jumped doing - // full context prediction if this field is true. - this.requiresFullContext = false; - // During SLL parsing, this is a list of predicates associated with the - // ATN configurations of the DFA state. When we have predicates, - // {@link //requiresFullContext} is {@code false} since full context - // prediction evaluates predicates - // on-the-fly. If this is not null, then {@link //prediction} is - // {@link ATN//INVALID_ALT_NUMBER}. - // - //

            We only use these for non-{@link //requiresFullContext} but - // conflicting states. That - // means we know from the context (it's $ or we don't dip into outer - // context) that it's an ambiguity not a conflict.

            - // - //

            This list is computed by {@link - // ParserATNSimulator//predicateDFAState}.

            - this.predicates = null; - return this; -} -// Get the set of all alts mentioned by all ATN configurations in this -// DFA state. -DFAState.prototype.getAltSet = function() { - var alts = new Set(); - if (this.configs !== null) { - for (var i = 0; i < this.configs.length; i++) { - var c = this.configs[i]; - alts.add(c.alt); + /** + * Get the set of all alts mentioned by all ATN configurations in this + * DFA state. + */ + getAltSet() { + const alts = new Set(); + if (this.configs !== null) { + for (let i = 0; i < this.configs.length; i++) { + const c = this.configs[i]; + alts.add(c.alt); + } + } + if (alts.length === 0) { + return null; + } else { + return alts; } } - if (alts.length === 0) { - return null; - } else { - return alts; + + /** + * Two {@link DFAState} instances are equal if their ATN configuration sets + * are the same. This method is used to see if a state already exists. + * + *

            Because the number of alternatives and number of ATN configurations are + * finite, there is a finite number of DFA states that can be processed. + * This is necessary to show that the algorithm terminates.

            + * + *

            Cannot test the DFA state numbers here because in + * {@link ParserATNSimulator//addDFAState} we need to know if any other state + * exists that has this exact set of ATN configurations. The + * {@link //stateNumber} is irrelevant.

            + */ + equals(other) { + // compare set of ATN configurations in this set with other + return this === other || + (other instanceof DFAState && + this.configs.equals(other.configs)); } -}; -// Two {@link DFAState} instances are equal if their ATN configuration sets -// are the same. This method is used to see if a state already exists. -// -//

            Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. -// This is necessary to show that the algorithm terminates.

            -// -//

            Cannot test the DFA state numbers here because in -// {@link ParserATNSimulator//addDFAState} we need to know if any other state -// exists that has this exact set of ATN configurations. The -// {@link //stateNumber} is irrelevant.

            -DFAState.prototype.equals = function(other) { - // compare set of ATN configurations in this set with other - return this === other || - (other instanceof DFAState && - this.configs.equals(other.configs)); -}; + toString() { + let s = "" + this.stateNumber + ":" + this.configs; + if(this.isAcceptState) { + s = s + "=>"; + if (this.predicates !== null) + s = s + this.predicates; + else + s = s + this.prediction; + } + return s; + } -DFAState.prototype.toString = function() { - var s = "" + this.stateNumber + ":" + this.configs; - if(this.isAcceptState) { - s = s + "=>"; - if (this.predicates !== null) - s = s + this.predicates; - else - s = s + this.prediction; - } - return s; -}; + hashCode() { + const hash = new Hash(); + hash.update(this.configs); + return hash.finish(); + } +} -DFAState.prototype.hashCode = function() { - var hash = new Hash(); - hash.update(this.configs); - return hash.finish(); -}; - -exports.DFAState = DFAState; -exports.PredPrediction = PredPrediction; +module.exports = { DFAState, PredPrediction }; diff --git a/runtime/JavaScript/src/antlr4/dfa/index.js b/runtime/JavaScript/src/antlr4/dfa/index.js index fbe38cef1..9a98bd076 100644 --- a/runtime/JavaScript/src/antlr4/dfa/index.js +++ b/runtime/JavaScript/src/antlr4/dfa/index.js @@ -3,7 +3,7 @@ * can be found in the LICENSE.txt file in the project root. */ -exports.DFA = require('./DFA').DFA; +exports.DFA = require('./DFA'); exports.DFASerializer = require('./DFASerializer').DFASerializer; exports.LexerDFASerializer = require('./DFASerializer').LexerDFASerializer; exports.PredPrediction = require('./DFAState').PredPrediction; diff --git a/runtime/JavaScript/src/antlr4/error/DiagnosticErrorListener.js b/runtime/JavaScript/src/antlr4/error/DiagnosticErrorListener.js index 55a948ac9..4536fa484 100644 --- a/runtime/JavaScript/src/antlr4/error/DiagnosticErrorListener.js +++ b/runtime/JavaScript/src/antlr4/error/DiagnosticErrorListener.js @@ -1,111 +1,105 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -// -// This implementation of {@link ANTLRErrorListener} can be used to identify -// certain potential correctness and performance problems in grammars. "Reports" -// are made by calling {@link Parser//notifyErrorListeners} with the appropriate -// message. -// -//
              -//
            • Ambiguities: These are cases where more than one path through the -// grammar can match the input.
            • -//
            • Weak context sensitivity: These are cases where full-context -// prediction resolved an SLL conflict to a unique alternative which equaled the -// minimum alternative of the SLL conflict.
            • -//
            • Strong (forced) context sensitivity: These are cases where the -// full-context prediction resolved an SLL conflict to a unique alternative, -// and the minimum alternative of the SLL conflict was found to not be -// a truly viable alternative. Two-stage parsing cannot be used for inputs where -// this situation occurs.
            • -//
            +const {BitSet} = require('./../Utils'); +const {ErrorListener} = require('./ErrorListener') +const {Interval} = require('./../IntervalSet') -var BitSet = require('./../Utils').BitSet; -var ErrorListener = require('./ErrorListener').ErrorListener; -var Interval = require('./../IntervalSet').Interval; -function DiagnosticErrorListener(exactOnly) { - ErrorListener.call(this); - exactOnly = exactOnly || true; - // whether all ambiguities or only exact ambiguities are reported. - this.exactOnly = exactOnly; - return this; -} - -DiagnosticErrorListener.prototype = Object.create(ErrorListener.prototype); -DiagnosticErrorListener.prototype.constructor = DiagnosticErrorListener; - -DiagnosticErrorListener.prototype.reportAmbiguity = function(recognizer, dfa, - startIndex, stopIndex, exact, ambigAlts, configs) { - if (this.exactOnly && !exact) { - return; +/** + * This implementation of {@link ANTLRErrorListener} can be used to identify + * certain potential correctness and performance problems in grammars. "Reports" + * are made by calling {@link Parser//notifyErrorListeners} with the appropriate + * message. + * + *
              + *
            • Ambiguities: These are cases where more than one path through the + * grammar can match the input.
            • + *
            • Weak context sensitivity: These are cases where full-context + * prediction resolved an SLL conflict to a unique alternative which equaled the + * minimum alternative of the SLL conflict.
            • + *
            • Strong (forced) context sensitivity: These are cases where the + * full-context prediction resolved an SLL conflict to a unique alternative, + * and the minimum alternative of the SLL conflict was found to not be + * a truly viable alternative. Two-stage parsing cannot be used for inputs where + * this situation occurs.
            • + *
            + */ +class DiagnosticErrorListener extends ErrorListener { + constructor(exactOnly) { + super(); + exactOnly = exactOnly || true; + // whether all ambiguities or only exact ambiguities are reported. + this.exactOnly = exactOnly; } - var msg = "reportAmbiguity d=" + + + reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { + if (this.exactOnly && !exact) { + return; + } + const msg = "reportAmbiguity d=" + this.getDecisionDescription(recognizer, dfa) + ": ambigAlts=" + this.getConflictingAlts(ambigAlts, configs) + ", input='" + - recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'"; - recognizer.notifyErrorListeners(msg); -}; + recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'" + recognizer.notifyErrorListeners(msg); + } -DiagnosticErrorListener.prototype.reportAttemptingFullContext = function( - recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { - var msg = "reportAttemptingFullContext d=" + + reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { + const msg = "reportAttemptingFullContext d=" + this.getDecisionDescription(recognizer, dfa) + ", input='" + - recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'"; - recognizer.notifyErrorListeners(msg); -}; + recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'" + recognizer.notifyErrorListeners(msg); + } -DiagnosticErrorListener.prototype.reportContextSensitivity = function( - recognizer, dfa, startIndex, stopIndex, prediction, configs) { - var msg = "reportContextSensitivity d=" + + reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) { + const msg = "reportContextSensitivity d=" + this.getDecisionDescription(recognizer, dfa) + ", input='" + - recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'"; - recognizer.notifyErrorListeners(msg); -}; - -DiagnosticErrorListener.prototype.getDecisionDescription = function(recognizer, dfa) { - var decision = dfa.decision; - var ruleIndex = dfa.atnStartState.ruleIndex; - - var ruleNames = recognizer.ruleNames; - if (ruleIndex < 0 || ruleIndex >= ruleNames.length) { - return "" + decision; + recognizer.getTokenStream().getText(new Interval(startIndex, stopIndex)) + "'" + recognizer.notifyErrorListeners(msg); } - var ruleName = ruleNames[ruleIndex] || null; - if (ruleName === null || ruleName.length === 0) { - return "" + decision; - } - return "" + decision + " (" + ruleName + ")"; -}; -// -// Computes the set of conflicting or ambiguous alternatives from a -// configuration set, if that information was not already provided by the -// parser. -// -// @param reportedAlts The set of conflicting or ambiguous alternatives, as -// reported by the parser. -// @param configs The conflicting or ambiguous configuration set. -// @return Returns {@code reportedAlts} if it is not {@code null}, otherwise -// returns the set of alternatives represented in {@code configs}. -// -DiagnosticErrorListener.prototype.getConflictingAlts = function(reportedAlts, configs) { - if (reportedAlts !== null) { - return reportedAlts; - } - var result = new BitSet(); - for (var i = 0; i < configs.items.length; i++) { - result.add(configs.items[i].alt); - } - return "{" + result.values().join(", ") + "}"; -}; + getDecisionDescription(recognizer, dfa) { + const decision = dfa.decision + const ruleIndex = dfa.atnStartState.ruleIndex -exports.DiagnosticErrorListener = DiagnosticErrorListener; \ No newline at end of file + const ruleNames = recognizer.ruleNames + if (ruleIndex < 0 || ruleIndex >= ruleNames.length) { + return "" + decision; + } + const ruleName = ruleNames[ruleIndex] || null + if (ruleName === null || ruleName.length === 0) { + return "" + decision; + } + return `${decision} (${ruleName})`; + } + + /** + * Computes the set of conflicting or ambiguous alternatives from a + * configuration set, if that information was not already provided by the + * parser. + * + * @param reportedAlts The set of conflicting or ambiguous alternatives, as + * reported by the parser. + * @param configs The conflicting or ambiguous configuration set. + * @return Returns {@code reportedAlts} if it is not {@code null}, otherwise + * returns the set of alternatives represented in {@code configs}. + */ + getConflictingAlts(reportedAlts, configs) { + if (reportedAlts !== null) { + return reportedAlts; + } + const result = new BitSet() + for (let i = 0; i < configs.items.length; i++) { + result.add(configs.items[i].alt); + } + return `{${result.values().join(", ")}}`; + } +} + +module.exports = DiagnosticErrorListener diff --git a/runtime/JavaScript/src/antlr4/error/ErrorListener.js b/runtime/JavaScript/src/antlr4/error/ErrorListener.js index f0e0d56a6..41da189f1 100644 --- a/runtime/JavaScript/src/antlr4/error/ErrorListener.js +++ b/runtime/JavaScript/src/antlr4/error/ErrorListener.js @@ -1,87 +1,82 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. +/** + * Provides an empty default implementation of {@link ANTLRErrorListener}. The + * default implementation of each method does nothing, but can be overridden as + * necessary. + */ +class ErrorListener { + syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + } -function ErrorListener() { - return this; + reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { + } + + reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { + } + + reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) { + } } -ErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) { -}; +/** + * {@inheritDoc} + * + *

            + * This implementation prints messages to {@link System//err} containing the + * values of {@code line}, {@code charPositionInLine}, and {@code msg} using + * the following format.

            + * + *
            + * line line:charPositionInLine msg
            + * 
            + * + */ +class ConsoleErrorListener extends ErrorListener { + constructor() { + super(); + } -ErrorListener.prototype.reportAmbiguity = function(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { -}; - -ErrorListener.prototype.reportAttemptingFullContext = function(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { -}; - -ErrorListener.prototype.reportContextSensitivity = function(recognizer, dfa, startIndex, stopIndex, prediction, configs) { -}; - -function ConsoleErrorListener() { - ErrorListener.call(this); - return this; + syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + console.error("line " + line + ":" + column + " " + msg); + } } -ConsoleErrorListener.prototype = Object.create(ErrorListener.prototype); -ConsoleErrorListener.prototype.constructor = ConsoleErrorListener; -// -// Provides a default instance of {@link ConsoleErrorListener}. -// +/** + * Provides a default instance of {@link ConsoleErrorListener}. + */ ConsoleErrorListener.INSTANCE = new ConsoleErrorListener(); -// -// {@inheritDoc} -// -//

            -// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

            -// -//
            -// line line:charPositionInLine msg
            -// 
            -// -ConsoleErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) { - console.error("line " + line + ":" + column + " " + msg); -}; - -function ProxyErrorListener(delegates) { - ErrorListener.call(this); - if (delegates===null) { - throw "delegates"; +class ProxyErrorListener extends ErrorListener { + constructor(delegates) { + super(); + if (delegates===null) { + throw "delegates"; + } + this.delegates = delegates; + return this; + } + + syntaxError(recognizer, offendingSymbol, line, column, msg, e) { + this.delegates.map(d => d.syntaxError(recognizer, offendingSymbol, line, column, msg, e)); + } + + reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { + this.delegates.map(d => d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)); + } + + reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { + this.delegates.map(d => d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)); + } + + reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) { + this.delegates.map(d => d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)); } - this.delegates = delegates; - return this; } -ProxyErrorListener.prototype = Object.create(ErrorListener.prototype); -ProxyErrorListener.prototype.constructor = ProxyErrorListener; - -ProxyErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) { - this.delegates.map(function(d) { d.syntaxError(recognizer, offendingSymbol, line, column, msg, e); }); -}; - -ProxyErrorListener.prototype.reportAmbiguity = function(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) { - this.delegates.map(function(d) { d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs); }); -}; - -ProxyErrorListener.prototype.reportAttemptingFullContext = function(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) { - this.delegates.map(function(d) { d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs); }); -}; - -ProxyErrorListener.prototype.reportContextSensitivity = function(recognizer, dfa, startIndex, stopIndex, prediction, configs) { - this.delegates.map(function(d) { d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs); }); -}; - -exports.ErrorListener = ErrorListener; -exports.ConsoleErrorListener = ConsoleErrorListener; -exports.ProxyErrorListener = ProxyErrorListener; +module.exports = {ErrorListener, ConsoleErrorListener, ProxyErrorListener} diff --git a/runtime/JavaScript/src/antlr4/error/ErrorStrategy.js b/runtime/JavaScript/src/antlr4/error/ErrorStrategy.js index 94038e13a..70bbb5971 100644 --- a/runtime/JavaScript/src/antlr4/error/ErrorStrategy.js +++ b/runtime/JavaScript/src/antlr4/error/ErrorStrategy.js @@ -1,756 +1,764 @@ -// /* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -// -var Token = require('./../Token').Token; -var Errors = require('./Errors'); -var NoViableAltException = Errors.NoViableAltException; -var InputMismatchException = Errors.InputMismatchException; -var FailedPredicateException = Errors.FailedPredicateException; -var ParseCancellationException = Errors.ParseCancellationException; -var ATNState = require('./../atn/ATNState').ATNState; -var Interval = require('./../IntervalSet').Interval; -var IntervalSet = require('./../IntervalSet').IntervalSet; +const {Token} = require('./../Token') +const {NoViableAltException, InputMismatchException, FailedPredicateException, ParseCancellationException} = require('./Errors') +const {ATNState} = require('./../atn/ATNState') +const {Interval, IntervalSet} = require('./../IntervalSet') -function ErrorStrategy() { +class ErrorStrategy { + reset(recognizer) { + } + + recoverInline(recognizer) { + } + + recover(recognizer, e) { + } + + sync(recognizer) { + } + + inErrorRecoveryMode(recognizer) { + } + + reportError(recognizer) { + } } -ErrorStrategy.prototype.reset = function(recognizer){ -}; -ErrorStrategy.prototype.recoverInline = function(recognizer){ -}; +/** + * This is the default implementation of {@link ANTLRErrorStrategy} used for + * error reporting and recovery in ANTLR parsers. +*/ +class DefaultErrorStrategy extends ErrorStrategy { + constructor() { + super(); + /** + * Indicates whether the error strategy is currently "recovering from an + * error". This is used to suppress reporting multiple error messages while + * attempting to recover from a detected syntax error. + * + * @see //inErrorRecoveryMode + */ + this.errorRecoveryMode = false; -ErrorStrategy.prototype.recover = function(recognizer, e){ -}; - -ErrorStrategy.prototype.sync = function(recognizer){ -}; - -ErrorStrategy.prototype.inErrorRecoveryMode = function(recognizer){ -}; - -ErrorStrategy.prototype.reportError = function(recognizer){ -}; - - - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error reporting and recovery in ANTLR parsers. -// -function DefaultErrorStrategy() { - ErrorStrategy.call(this); - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - this.errorRecoveryMode = false; - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - this.lastErrorIndex = -1; - this.lastErrorStates = null; - return this; -} - -DefaultErrorStrategy.prototype = Object.create(ErrorStrategy.prototype); -DefaultErrorStrategy.prototype.constructor = DefaultErrorStrategy; - -//

            The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

            -DefaultErrorStrategy.prototype.reset = function(recognizer) { - this.endErrorCondition(recognizer); -}; - -// -// This method is called to enter error recovery mode when a recognition -// exception is reported. -// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.beginErrorCondition = function(recognizer) { - this.errorRecoveryMode = true; -}; - -DefaultErrorStrategy.prototype.inErrorRecoveryMode = function(recognizer) { - return this.errorRecoveryMode; -}; - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -DefaultErrorStrategy.prototype.endErrorCondition = function(recognizer) { - this.errorRecoveryMode = false; - this.lastErrorStates = null; - this.lastErrorIndex = -1; -}; - -// -// {@inheritDoc} -// -//

            The default implementation simply calls {@link //endErrorCondition}.

            -// -DefaultErrorStrategy.prototype.reportMatch = function(recognizer) { - this.endErrorCondition(recognizer); -}; - -// -// {@inheritDoc} -// -//

            The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the reporting task based on the runtime type of {@code e} -// according to the following table.

            -// -//
              -//
            • {@link NoViableAltException}: Dispatches the call to -// {@link //reportNoViableAlternative}
            • -//
            • {@link InputMismatchException}: Dispatches the call to -// {@link //reportInputMismatch}
            • -//
            • {@link FailedPredicateException}: Dispatches the call to -// {@link //reportFailedPredicate}
            • -//
            • All other types: calls {@link Parser//notifyErrorListeners} to report -// the exception
            • -//
            -// -DefaultErrorStrategy.prototype.reportError = function(recognizer, e) { - // if we've already reported an error and have not matched a token - // yet successfully, don't report any errors. - if(this.inErrorRecoveryMode(recognizer)) { - return; // don't report spurious errors + /** + * The index into the input stream where the last error occurred. + * This is used to prevent infinite loops where an error is found + * but no token is consumed during recovery...another error is found, + * ad nauseum. This is a failsafe mechanism to guarantee that at least + * one token/tree node is consumed for two errors. + */ + this.lastErrorIndex = -1; + this.lastErrorStates = null; } - this.beginErrorCondition(recognizer); - if ( e instanceof NoViableAltException ) { - this.reportNoViableAlternative(recognizer, e); - } else if ( e instanceof InputMismatchException ) { - this.reportInputMismatch(recognizer, e); - } else if ( e instanceof FailedPredicateException ) { - this.reportFailedPredicate(recognizer, e); - } else { - console.log("unknown recognition error type: " + e.constructor.name); - console.log(e.stack); - recognizer.notifyErrorListeners(e.getOffendingToken(), e.getMessage(), e); - } -}; -// -// {@inheritDoc} -// -//

            The default implementation resynchronizes the parser by consuming tokens -// until we find one in the resynchronization set--loosely the set of tokens -// that can follow the current rule.

            -// -DefaultErrorStrategy.prototype.recover = function(recognizer, e) { - if (this.lastErrorIndex===recognizer.getInputStream().index && - this.lastErrorStates !== null && this.lastErrorStates.indexOf(recognizer.state)>=0) { - // uh oh, another error at same token index and previously-visited - // state in ATN; must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop; this is a failsafe. - recognizer.consume(); - } - this.lastErrorIndex = recognizer._input.index; - if (this.lastErrorStates === null) { - this.lastErrorStates = []; - } - this.lastErrorStates.push(recognizer.state); - var followSet = this.getErrorRecoverySet(recognizer); - this.consumeUntil(recognizer, followSet); -}; -// The default implementation of {@link ANTLRErrorStrategy//sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at this point in the ATN. You can call this anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

            Implements Jim Idle's magic sync mechanism in closures and optional -// subrules. E.g.,

            -// -//
            -// a : sync ( stuff sync )* ;
            -// sync : {consume to what can follow sync} ;
            -// 
            -// -// At the start of a sub rule upon error, {@link //sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// resynchronization set of the current rule. -// -//

            If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

            -// -//

            During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

            -// -//

            ORIGINS

            -// -//

            Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

            -// -//
            -// classDef : 'class' ID '{' member* '}'
            -// 
            -// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

            This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off this -// functionality by simply overriding this method as a blank { }.

            -// -DefaultErrorStrategy.prototype.sync = function(recognizer) { - // If already recovering, don't try to sync - if (this.inErrorRecoveryMode(recognizer)) { - return; + /** + *

            The default implementation simply calls {@link //endErrorCondition} to + * ensure that the handler is not in error recovery mode.

            + */ + reset(recognizer) { + this.endErrorCondition(recognizer); } - var s = recognizer._interp.atn.states[recognizer.state]; - var la = recognizer.getTokenStream().LA(1); - // try cheaper subset first; might get lucky. seems to shave a wee bit off - var nextTokens = recognizer.atn.nextTokens(s); - if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) { - return; + + /** + * This method is called to enter error recovery mode when a recognition + * exception is reported. + * + * @param recognizer the parser instance + */ + beginErrorCondition(recognizer) { + this.errorRecoveryMode = true; } - switch (s.stateType) { - case ATNState.BLOCK_START: - case ATNState.STAR_BLOCK_START: - case ATNState.PLUS_BLOCK_START: - case ATNState.STAR_LOOP_ENTRY: - // report error and recover if possible - if( this.singleTokenDeletion(recognizer) !== null) { + + inErrorRecoveryMode(recognizer) { + return this.errorRecoveryMode; + } + + /** + * This method is called to leave error recovery mode after recovering from + * a recognition exception. + * @param recognizer + */ + endErrorCondition(recognizer) { + this.errorRecoveryMode = false; + this.lastErrorStates = null; + this.lastErrorIndex = -1; + } + + /** + * {@inheritDoc} + *

            The default implementation simply calls {@link //endErrorCondition}.

            + */ + reportMatch(recognizer) { + this.endErrorCondition(recognizer); + } + + /** + * {@inheritDoc} + * + *

            The default implementation returns immediately if the handler is already + * in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} + * and dispatches the reporting task based on the runtime type of {@code e} + * according to the following table.

            + * + *
              + *
            • {@link NoViableAltException}: Dispatches the call to + * {@link //reportNoViableAlternative}
            • + *
            • {@link InputMismatchException}: Dispatches the call to + * {@link //reportInputMismatch}
            • + *
            • {@link FailedPredicateException}: Dispatches the call to + * {@link //reportFailedPredicate}
            • + *
            • All other types: calls {@link Parser//notifyErrorListeners} to report + * the exception
            • + *
            + */ + reportError(recognizer, e) { + // if we've already reported an error and have not matched a token + // yet successfully, don't report any errors. + if(this.inErrorRecoveryMode(recognizer)) { + return; // don't report spurious errors + } + this.beginErrorCondition(recognizer); + if ( e instanceof NoViableAltException ) { + this.reportNoViableAlternative(recognizer, e); + } else if ( e instanceof InputMismatchException ) { + this.reportInputMismatch(recognizer, e); + } else if ( e instanceof FailedPredicateException ) { + this.reportFailedPredicate(recognizer, e); + } else { + console.log("unknown recognition error type: " + e.constructor.name); + console.log(e.stack); + recognizer.notifyErrorListeners(e.getOffendingToken(), e.getMessage(), e); + } + } + + /** + * + * {@inheritDoc} + * + *

            The default implementation resynchronizes the parser by consuming tokens + * until we find one in the resynchronization set--loosely the set of tokens + * that can follow the current rule.

            + * + */ + recover(recognizer, e) { + if (this.lastErrorIndex===recognizer.getInputStream().index && + this.lastErrorStates !== null && this.lastErrorStates.indexOf(recognizer.state)>=0) { + // uh oh, another error at same token index and previously-visited + // state in ATN; must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop; this is a failsafe. + recognizer.consume(); + } + this.lastErrorIndex = recognizer._input.index; + if (this.lastErrorStates === null) { + this.lastErrorStates = []; + } + this.lastErrorStates.push(recognizer.state); + const followSet = this.getErrorRecoverySet(recognizer) + this.consumeUntil(recognizer, followSet); + } + + /** + * The default implementation of {@link ANTLRErrorStrategy//sync} makes sure + * that the current lookahead symbol is consistent with what were expecting + * at this point in the ATN. You can call this anytime but ANTLR only + * generates code to check before subrules/loops and each iteration. + * + *

            Implements Jim Idle's magic sync mechanism in closures and optional + * subrules. E.g.,

            + * + *
            +     * a : sync ( stuff sync )* ;
            +     * sync : {consume to what can follow sync} ;
            +     * 
            + * + * At the start of a sub rule upon error, {@link //sync} performs single + * token deletion, if possible. If it can't do that, it bails on the current + * rule and uses the default error recovery, which consumes until the + * resynchronization set of the current rule. + * + *

            If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + * with an empty alternative), then the expected set includes what follows + * the subrule.

            + * + *

            During loop iteration, it consumes until it sees a token that can start a + * sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + * stay in the loop as long as possible.

            + * + *

            ORIGINS

            + * + *

            Previous versions of ANTLR did a poor job of their recovery within loops. + * A single mismatch token or missing token would force the parser to bail + * out of the entire rules surrounding the loop. So, for rule

            + * + *
            +     * classDef : 'class' ID '{' member* '}'
            +     * 
            + * + * input with an extra token between members would force the parser to + * consume until it found the next class definition rather than the next + * member definition of the current class. + * + *

            This functionality cost a little bit of effort because the parser has to + * compare token set at the start of the loop and at each iteration. If for + * some reason speed is suffering for you, you can turn off this + * functionality by simply overriding this method as a blank { }.

            + * + */ + sync(recognizer) { + // If already recovering, don't try to sync + if (this.inErrorRecoveryMode(recognizer)) { return; - } else { - throw new InputMismatchException(recognizer); } - break; - case ATNState.PLUS_LOOP_BACK: - case ATNState.STAR_LOOP_BACK: - this.reportUnwantedToken(recognizer); - var expecting = new IntervalSet(); - expecting.addSet(recognizer.getExpectedTokens()); - var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)); - this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule); - break; - default: - // do nothing if we can't identify the exact kind of ATN state - } -}; - -// This is called by {@link //reportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //reportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -DefaultErrorStrategy.prototype.reportNoViableAlternative = function(recognizer, e) { - var tokens = recognizer.getTokenStream(); - var input; - if(tokens !== null) { - if (e.startToken.type===Token.EOF) { - input = ""; - } else { - input = tokens.getText(new Interval(e.startToken.tokenIndex, e.offendingToken.tokenIndex)); + const s = recognizer._interp.atn.states[recognizer.state] + const la = recognizer.getTokenStream().LA(1) + // try cheaper subset first; might get lucky. seems to shave a wee bit off + const nextTokens = recognizer.atn.nextTokens(s) + if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) { + return; } - } else { - input = ""; - } - var msg = "no viable alternative at input " + this.escapeWSAndQuote(input); - recognizer.notifyErrorListeners(msg, e.offendingToken, e); -}; - -// -// This is called by {@link //reportError} when the exception is an -// {@link InputMismatchException}. -// -// @see //reportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -DefaultErrorStrategy.prototype.reportInputMismatch = function(recognizer, e) { - var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames); - recognizer.notifyErrorListeners(msg, e.offendingToken, e); -}; - -// -// This is called by {@link //reportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //reportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -DefaultErrorStrategy.prototype.reportFailedPredicate = function(recognizer, e) { - var ruleName = recognizer.ruleNames[recognizer._ctx.ruleIndex]; - var msg = "rule " + ruleName + " " + e.message; - recognizer.notifyErrorListeners(msg, e.offendingToken, e); -}; - -// This method is called to report a syntax error which requires the removal -// of a token from the input stream. At the time this method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When this method returns, -// {@code recognizer} is in error recovery mode. -// -//

            This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

            -// -//

            The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//notifyErrorListeners}.

            -// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.reportUnwantedToken = function(recognizer) { - if (this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - var t = recognizer.getCurrentToken(); - var tokenName = this.getTokenErrorDisplay(t); - var expecting = this.getExpectedTokens(recognizer); - var msg = "extraneous input " + tokenName + " expecting " + - expecting.toString(recognizer.literalNames, recognizer.symbolicNames); - recognizer.notifyErrorListeners(msg, t, null); -}; -// This method is called to report a syntax error which requires the -// insertion of a missing token into the input stream. At the time this -// method is called, the missing token has not yet been inserted. When this -// method returns, {@code recognizer} is in error recovery mode. -// -//

            This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

            -// -//

            The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//notifyErrorListeners}.

            -// -// @param recognizer the parser instance -// -DefaultErrorStrategy.prototype.reportMissingToken = function(recognizer) { - if ( this.inErrorRecoveryMode(recognizer)) { - return; - } - this.beginErrorCondition(recognizer); - var t = recognizer.getCurrentToken(); - var expecting = this.getExpectedTokens(recognizer); - var msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + - " at " + this.getTokenErrorDisplay(t); - recognizer.notifyErrorListeners(msg, t, null); -}; - -//

            The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, this method throws an -// {@link InputMismatchException}.

            -// -//

            EXTRA TOKEN (single token deletion)

            -// -//

            {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the match operation.

            -// -//

            This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

            -// -//

            MISSING TOKEN (single token insertion)

            -// -//

            If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the match operation.

            -// -//

            This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

            -// -//

            EXAMPLE

            -// -//

            For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

            -// -//
            -// stat → expr → atom
            -// 
            -// -// and it will be trying to match the {@code ')'} at this point in the -// derivation: -// -//
            -// => ID '=' '(' INT ')' ('+' atom)* ';'
            -// ^
            -// 
            -// -// The attempt to match {@code ')'} will fail when it sees {@code ';'} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==';'} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -DefaultErrorStrategy.prototype.recoverInline = function(recognizer) { - // SINGLE TOKEN DELETION - var matchedSymbol = this.singleTokenDeletion(recognizer); - if (matchedSymbol !== null) { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.consume(); - return matchedSymbol; - } - // SINGLE TOKEN INSERTION - if (this.singleTokenInsertion(recognizer)) { - return this.getMissingSymbol(recognizer); - } - // even that didn't work; must throw the exception - throw new InputMismatchException(recognizer); -}; - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

            This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully matched -// if it were instead the {@code LA(2)} symbol. If this method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce this behavior.

            -// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -DefaultErrorStrategy.prototype.singleTokenInsertion = function(recognizer) { - var currentSymbolType = recognizer.getTokenStream().LA(1); - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token; error recovery - // is free to conjure up and insert the missing token - var atn = recognizer._interp.atn; - var currentState = atn.states[recognizer.state]; - var next = currentState.transitions[0].target; - var expectingAtLL2 = atn.nextTokens(next, recognizer._ctx); - if (expectingAtLL2.contains(currentSymbolType) ){ - this.reportMissingToken(recognizer); - return true; - } else { - return false; - } -}; - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns null, the parser and error -// handler state will not have changed. If this method returns non-null, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful match. -// -//

            If the single-token deletion is successful, this method calls -// {@link //reportUnwantedToken} to report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //reportMatch} is called to signal a successful -// match.

            -// -// @param recognizer the parser instance -// @return the successfully matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code null} -// -DefaultErrorStrategy.prototype.singleTokenDeletion = function(recognizer) { - var nextTokenType = recognizer.getTokenStream().LA(2); - var expecting = this.getExpectedTokens(recognizer); - if (expecting.contains(nextTokenType)) { - this.reportUnwantedToken(recognizer); - // print("recoverFromMismatchedToken deleting " \ - // + str(recognizer.getTokenStream().LT(1)) \ - // + " since " + str(recognizer.getTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.consume(); // simply delete extra token - // we want to return the token we're actually matching - var matchedSymbol = recognizer.getCurrentToken(); - this.reportMatch(recognizer); // we know current token is correct - return matchedSymbol; - } else { - return null; - } -}; - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x);}. The action clearly assumes -// that there has been an identifier matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// this token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override this method to create the appropriate tokens. -// -DefaultErrorStrategy.prototype.getMissingSymbol = function(recognizer) { - var currentSymbol = recognizer.getCurrentToken(); - var expecting = this.getExpectedTokens(recognizer); - var expectedTokenType = expecting.first(); // get any element - var tokenText; - if (expectedTokenType===Token.EOF) { - tokenText = ""; - } else { - tokenText = ""; - } - var current = currentSymbol; - var lookback = recognizer.getTokenStream().LT(-1); - if (current.type===Token.EOF && lookback !== null) { - current = lookback; - } - return recognizer.getTokenFactory().create(current.source, - expectedTokenType, tokenText, Token.DEFAULT_CHANNEL, - -1, -1, current.line, current.column); -}; - -DefaultErrorStrategy.prototype.getExpectedTokens = function(recognizer) { - return recognizer.getExpectedTokens(); -}; - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.toString() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a new Java type. -// -DefaultErrorStrategy.prototype.getTokenErrorDisplay = function(t) { - if (t === null) { - return ""; - } - var s = t.text; - if (s === null) { - if (t.type===Token.EOF) { - s = ""; - } else { - s = "<" + t.type + ">"; + switch (s.stateType) { + case ATNState.BLOCK_START: + case ATNState.STAR_BLOCK_START: + case ATNState.PLUS_BLOCK_START: + case ATNState.STAR_LOOP_ENTRY: + // report error and recover if possible + if( this.singleTokenDeletion(recognizer) !== null) { + return; + } else { + throw new InputMismatchException(recognizer); + } + case ATNState.PLUS_LOOP_BACK: + case ATNState.STAR_LOOP_BACK: + this.reportUnwantedToken(recognizer); + const expecting = new IntervalSet() + expecting.addSet(recognizer.getExpectedTokens()); + const whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)) + this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule); + break; + default: + // do nothing if we can't identify the exact kind of ATN state } } - return this.escapeWSAndQuote(s); -}; -DefaultErrorStrategy.prototype.escapeWSAndQuote = function(s) { - s = s.replace(/\n/g,"\\n"); - s = s.replace(/\r/g,"\\r"); - s = s.replace(/\t/g,"\\t"); - return "'" + s + "'"; -}; - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack; this amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule; i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// ; -// b : c '^' INT ; -// c : ID -// | INT -// ; -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// resync to one of those tokens. Note that FOLLOW(c)='^' and if -// we resync'd to that token, we'd consume until EOF. We need to -// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and throws an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -// -DefaultErrorStrategy.prototype.getErrorRecoverySet = function(recognizer) { - var atn = recognizer._interp.atn; - var ctx = recognizer._ctx; - var recoverSet = new IntervalSet(); - while (ctx !== null && ctx.invokingState>=0) { - // compute what follows who invoked us - var invokingState = atn.states[ctx.invokingState]; - var rt = invokingState.transitions[0]; - var follow = atn.nextTokens(rt.followState); - recoverSet.addSet(follow); - ctx = ctx.parentCtx; + /** + * This is called by {@link //reportError} when the exception is a + * {@link NoViableAltException}. + * + * @see //reportError + * + * @param recognizer the parser instance + * @param e the recognition exception + */ + reportNoViableAlternative(recognizer, e) { + const tokens = recognizer.getTokenStream() + let input + if(tokens !== null) { + if (e.startToken.type===Token.EOF) { + input = ""; + } else { + input = tokens.getText(new Interval(e.startToken.tokenIndex, e.offendingToken.tokenIndex)); + } + } else { + input = ""; + } + const msg = "no viable alternative at input " + this.escapeWSAndQuote(input) + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /** + * This is called by {@link //reportError} when the exception is an + * {@link InputMismatchException}. + * + * @see //reportError + * + * @param recognizer the parser instance + * @param e the recognition exception + */ + reportInputMismatch(recognizer, e) { + const msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /** + * This is called by {@link //reportError} when the exception is a + * {@link FailedPredicateException}. + * + * @see //reportError + * + * @param recognizer the parser instance + * @param e the recognition exception + */ + reportFailedPredicate(recognizer, e) { + const ruleName = recognizer.ruleNames[recognizer._ctx.ruleIndex] + const msg = "rule " + ruleName + " " + e.message + recognizer.notifyErrorListeners(msg, e.offendingToken, e); + } + + /** + * This method is called to report a syntax error which requires the removal + * of a token from the input stream. At the time this method is called, the + * erroneous symbol is current {@code LT(1)} symbol and has not yet been + * removed from the input stream. When this method returns, + * {@code recognizer} is in error recovery mode. + * + *

            This method is called when {@link //singleTokenDeletion} identifies + * single-token deletion as a viable recovery strategy for a mismatched + * input error.

            + * + *

            The default implementation simply returns if the handler is already in + * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to + * enter error recovery mode, followed by calling + * {@link Parser//notifyErrorListeners}.

            + * + * @param recognizer the parser instance + * + */ + reportUnwantedToken(recognizer) { + if (this.inErrorRecoveryMode(recognizer)) { + return; + } + this.beginErrorCondition(recognizer); + const t = recognizer.getCurrentToken() + const tokenName = this.getTokenErrorDisplay(t) + const expecting = this.getExpectedTokens(recognizer) + const msg = "extraneous input " + tokenName + " expecting " + + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, t, null); + } + + /** + * This method is called to report a syntax error which requires the + * insertion of a missing token into the input stream. At the time this + * method is called, the missing token has not yet been inserted. When this + * method returns, {@code recognizer} is in error recovery mode. + * + *

            This method is called when {@link //singleTokenInsertion} identifies + * single-token insertion as a viable recovery strategy for a mismatched + * input error.

            + * + *

            The default implementation simply returns if the handler is already in + * error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to + * enter error recovery mode, followed by calling + * {@link Parser//notifyErrorListeners}.

            + * + * @param recognizer the parser instance + */ + reportMissingToken(recognizer) { + if ( this.inErrorRecoveryMode(recognizer)) { + return; + } + this.beginErrorCondition(recognizer); + const t = recognizer.getCurrentToken() + const expecting = this.getExpectedTokens(recognizer) + const msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + + " at " + this.getTokenErrorDisplay(t) + recognizer.notifyErrorListeners(msg, t, null); + } + + /** + *

            The default implementation attempts to recover from the mismatched input + * by using single token insertion and deletion as described below. If the + * recovery attempt fails, this method throws an + * {@link InputMismatchException}.

            + * + *

            EXTRA TOKEN (single token deletion)

            + * + *

            {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + * right token, however, then assume {@code LA(1)} is some extra spurious + * token and delete it. Then consume and return the next token (which was + * the {@code LA(2)} token) as the successful result of the match operation.

            + * + *

            This recovery strategy is implemented by {@link + * //singleTokenDeletion}.

            + * + *

            MISSING TOKEN (single token insertion)

            + * + *

            If current token (at {@code LA(1)}) is consistent with what could come + * after the expected {@code LA(1)} token, then assume the token is missing + * and use the parser's {@link TokenFactory} to create it on the fly. The + * "insertion" is performed by returning the created token as the successful + * result of the match operation.

            + * + *

            This recovery strategy is implemented by {@link + * //singleTokenInsertion}.

            + * + *

            EXAMPLE

            + * + *

            For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + * the parser returns from the nested call to {@code expr}, it will have + * call chain:

            + * + *
            +     * stat → expr → atom
            +     * 
            + * + * and it will be trying to match the {@code ')'} at this point in the + * derivation: + * + *
            +     * => ID '=' '(' INT ')' ('+' atom)* ';'
            +     * ^
            +     * 
            + * + * The attempt to match {@code ')'} will fail when it sees {@code ';'} and + * call {@link //recoverInline}. To recover, it sees that {@code LA(1)==';'} + * is in the set of tokens that can follow the {@code ')'} token reference + * in rule {@code atom}. It can assume that you forgot the {@code ')'}. + */ + recoverInline(recognizer) { + // SINGLE TOKEN DELETION + const matchedSymbol = this.singleTokenDeletion(recognizer) + if (matchedSymbol !== null) { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.consume(); + return matchedSymbol; + } + // SINGLE TOKEN INSERTION + if (this.singleTokenInsertion(recognizer)) { + return this.getMissingSymbol(recognizer); + } + // even that didn't work; must throw the exception + throw new InputMismatchException(recognizer); + } + + /** + * This method implements the single-token insertion inline error recovery + * strategy. It is called by {@link //recoverInline} if the single-token + * deletion strategy fails to recover from the mismatched input. If this + * method returns {@code true}, {@code recognizer} will be in error recovery + * mode. + * + *

            This method determines whether or not single-token insertion is viable by + * checking if the {@code LA(1)} input symbol could be successfully matched + * if it were instead the {@code LA(2)} symbol. If this method returns + * {@code true}, the caller is responsible for creating and inserting a + * token with the correct type to produce this behavior.

            + * + * @param recognizer the parser instance + * @return {@code true} if single-token insertion is a viable recovery + * strategy for the current mismatched input, otherwise {@code false} + */ + singleTokenInsertion(recognizer) { + const currentSymbolType = recognizer.getTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token; error recovery + // is free to conjure up and insert the missing token + const atn = recognizer._interp.atn + const currentState = atn.states[recognizer.state] + const next = currentState.transitions[0].target + const expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) + if (expectingAtLL2.contains(currentSymbolType) ){ + this.reportMissingToken(recognizer); + return true; + } else { + return false; + } + } + + /** + * This method implements the single-token deletion inline error recovery + * strategy. It is called by {@link //recoverInline} to attempt to recover + * from mismatched input. If this method returns null, the parser and error + * handler state will not have changed. If this method returns non-null, + * {@code recognizer} will not be in error recovery mode since the + * returned token was a successful match. + * + *

            If the single-token deletion is successful, this method calls + * {@link //reportUnwantedToken} to report the error, followed by + * {@link Parser//consume} to actually "delete" the extraneous token. Then, + * before returning {@link //reportMatch} is called to signal a successful + * match.

            + * + * @param recognizer the parser instance + * @return the successfully matched {@link Token} instance if single-token + * deletion successfully recovers from the mismatched input, otherwise + * {@code null} + */ + singleTokenDeletion(recognizer) { + const nextTokenType = recognizer.getTokenStream().LA(2) + const expecting = this.getExpectedTokens(recognizer) + if (expecting.contains(nextTokenType)) { + this.reportUnwantedToken(recognizer); + // print("recoverFromMismatchedToken deleting " \ + // + str(recognizer.getTokenStream().LT(1)) \ + // + " since " + str(recognizer.getTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.consume(); // simply delete extra token + // we want to return the token we're actually matching + const matchedSymbol = recognizer.getCurrentToken() + this.reportMatch(recognizer); // we know current token is correct + return matchedSymbol; + } else { + return null; + } + } + + /** + * Conjure up a missing token during error recovery. + * + * The recognizer attempts to recover from single missing + * symbols. But, actions might refer to that missing symbol. + * For example, x=ID {f($x);}. The action clearly assumes + * that there has been an identifier matched previously and that + * $x points at that token. If that token is missing, but + * the next token in the stream is what we want we assume that + * this token is missing and we keep going. Because we + * have to return some token to replace the missing token, + * we have to conjure one up. This method gives the user control + * over the tokens returned for missing tokens. Mostly, + * you will want to create something special for identifier + * tokens. For literals such as '{' and ',', the default + * action in the parser or tree parser works. It simply creates + * a CommonToken of the appropriate type. The text will be the token. + * If you change what tokens must be created by the lexer, + * override this method to create the appropriate tokens. + * + */ + getMissingSymbol(recognizer) { + const currentSymbol = recognizer.getCurrentToken() + const expecting = this.getExpectedTokens(recognizer) + const expectedTokenType = expecting.first() // get any element + let tokenText + if (expectedTokenType===Token.EOF) { + tokenText = ""; + } else { + tokenText = ""; + } + let current = currentSymbol + const lookback = recognizer.getTokenStream().LT(-1) + if (current.type===Token.EOF && lookback !== null) { + current = lookback; + } + return recognizer.getTokenFactory().create(current.source, + expectedTokenType, tokenText, Token.DEFAULT_CHANNEL, + -1, -1, current.line, current.column); + } + + getExpectedTokens(recognizer) { + return recognizer.getExpectedTokens(); + } + + /** + * How should a token be displayed in an error message? The default + * is to display just the text, but during development you might + * want to have a lot of information spit out. Override in that case + * to use t.toString() (which, for CommonToken, dumps everything about + * the token). This is better than forcing you to override a method in + * your token objects because you don't have to go modify your lexer + * so that it creates a new Java type. + */ + getTokenErrorDisplay(t) { + if (t === null) { + return ""; + } + let s = t.text + if (s === null) { + if (t.type===Token.EOF) { + s = ""; + } else { + s = "<" + t.type + ">"; + } + } + return this.escapeWSAndQuote(s); + } + + escapeWSAndQuote(s) { + s = s.replace(/\n/g,"\\n"); + s = s.replace(/\r/g,"\\r"); + s = s.replace(/\t/g,"\\t"); + return "'" + s + "'"; + } + + /** + * Compute the error recovery set for the current rule. During + * rule invocation, the parser pushes the set of tokens that can + * follow that rule reference on the stack; this amounts to + * computing FIRST of what follows the rule reference in the + * enclosing rule. See LinearApproximator.FIRST(). + * This local follow set only includes tokens + * from within the rule; i.e., the FIRST computation done by + * ANTLR stops at the end of a rule. + * + * EXAMPLE + * + * When you find a "no viable alt exception", the input is not + * consistent with any of the alternatives for rule r. The best + * thing to do is to consume tokens until you see something that + * can legally follow a call to r//or* any rule that called r. + * You don't want the exact set of viable next tokens because the + * input might just be missing a token--you might consume the + * rest of the input looking for one of the missing tokens. + * + * Consider grammar: + * + * a : '[' b ']' + * | '(' b ')' + * ; + * b : c '^' INT ; + * c : ID + * | INT + * ; + * + * At each rule invocation, the set of tokens that could follow + * that rule is pushed on a stack. Here are the various + * context-sensitive follow sets: + * + * FOLLOW(b1_in_a) = FIRST(']') = ']' + * FOLLOW(b2_in_a) = FIRST(')') = ')' + * FOLLOW(c_in_b) = FIRST('^') = '^' + * + * Upon erroneous input "[]", the call chain is + * + * a -> b -> c + * + * and, hence, the follow context stack is: + * + * depth follow set start of rule execution + * 0 a (from main()) + * 1 ']' b + * 2 '^' c + * + * Notice that ')' is not included, because b would have to have + * been called from a different context in rule a for ')' to be + * included. + * + * For error recovery, we cannot consider FOLLOW(c) + * (context-sensitive or otherwise). We need the combined set of + * all context-sensitive FOLLOW sets--the set of all tokens that + * could follow any reference in the call chain. We need to + * resync to one of those tokens. Note that FOLLOW(c)='^' and if + * we resync'd to that token, we'd consume until EOF. We need to + * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. + * In this case, for input "[]", LA(1) is ']' and in the set, so we would + * not consume anything. After printing an error, rule c would + * return normally. Rule b would not find the required '^' though. + * At this point, it gets a mismatched token error and throws an + * exception (since LA(1) is not in the viable following token + * set). The rule exception handler tries to recover, but finds + * the same recovery set and doesn't consume anything. Rule b + * exits normally returning to rule a. Now it finds the ']' (and + * with the successful match exits errorRecovery mode). + * + * So, you can see that the parser walks up the call chain looking + * for the token that was a member of the recovery set. + * + * Errors are not generated in errorRecovery mode. + * + * ANTLR's error recovery mechanism is based upon original ideas: + * + * "Algorithms + Data Structures = Programs" by Niklaus Wirth + * + * and + * + * "A note on error recovery in recursive descent parsers": + * http://portal.acm.org/citation.cfm?id=947902.947905 + * + * Later, Josef Grosch had some good ideas: + * + * "Efficient and Comfortable Error Recovery in Recursive Descent + * Parsers": + * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip + * + * Like Grosch I implement context-sensitive FOLLOW sets that are combined + * at run-time upon error to avoid overhead during parsing. + */ + getErrorRecoverySet(recognizer) { + const atn = recognizer._interp.atn + let ctx = recognizer._ctx + const recoverSet = new IntervalSet() + while (ctx !== null && ctx.invokingState>=0) { + // compute what follows who invoked us + const invokingState = atn.states[ctx.invokingState] + const rt = invokingState.transitions[0] + const follow = atn.nextTokens(rt.followState) + recoverSet.addSet(follow); + ctx = ctx.parentCtx; + } + recoverSet.removeOne(Token.EPSILON); + return recoverSet; } - recoverSet.removeOne(Token.EPSILON); - return recoverSet; -}; // Consume tokens until one matches the given token set.// -DefaultErrorStrategy.prototype.consumeUntil = function(recognizer, set) { - var ttype = recognizer.getTokenStream().LA(1); - while( ttype !== Token.EOF && !set.contains(ttype)) { - recognizer.consume(); - ttype = recognizer.getTokenStream().LA(1); + consumeUntil(recognizer, set) { + let ttype = recognizer.getTokenStream().LA(1) + while( ttype !== Token.EOF && !set.contains(ttype)) { + recognizer.consume(); + ttype = recognizer.getTokenStream().LA(1); + } } -}; - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

            -// This error strategy is useful in the following scenarios.

            -// -//
              -//
            • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//sync} improves the performance of -// the first stage.
            • -//
            • Silent validation: When syntax errors are not being -// reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
            • -//
            -// -//

            -// {@code myparser.setErrorHandler(new BailErrorStrategy());}

            -// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) -// -function BailErrorStrategy() { - DefaultErrorStrategy.call(this); - return this; } -BailErrorStrategy.prototype = Object.create(DefaultErrorStrategy.prototype); -BailErrorStrategy.prototype.constructor = BailErrorStrategy; -// Instead of recovering from exception {@code e}, re-throw it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule function catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -BailErrorStrategy.prototype.recover = function(recognizer, e) { - var context = recognizer._ctx; - while (context !== null) { - context.exception = e; - context = context.parentCtx; +/** + * This implementation of {@link ANTLRErrorStrategy} responds to syntax errors + * by immediately canceling the parse operation with a + * {@link ParseCancellationException}. The implementation ensures that the + * {@link ParserRuleContext//exception} field is set for all parse tree nodes + * that were not completed prior to encountering the error. + * + *

            + * This error strategy is useful in the following scenarios.

            + * + *
              + *
            • Two-stage parsing: This error strategy allows the first + * stage of two-stage parsing to immediately terminate if an error is + * encountered, and immediately fall back to the second stage. In addition to + * avoiding wasted work by attempting to recover from errors here, the empty + * implementation of {@link BailErrorStrategy//sync} improves the performance of + * the first stage.
            • + *
            • Silent validation: When syntax errors are not being + * reported or logged, and the parse result is simply ignored if errors occur, + * the {@link BailErrorStrategy} avoids wasting work on recovering from errors + * when the result will be ignored either way.
            • + *
            + * + *

            + * {@code myparser.setErrorHandler(new BailErrorStrategy());}

            + * + * @see Parser//setErrorHandler(ANTLRErrorStrategy) + * */ +class BailErrorStrategy extends DefaultErrorStrategy { + constructor() { + super(); } - throw new ParseCancellationException(e); -}; -// Make sure we don't attempt to recover inline; if the parser -// successfully recovers, it won't throw an exception. -// -BailErrorStrategy.prototype.recoverInline = function(recognizer) { - this.recover(recognizer, new InputMismatchException(recognizer)); -}; + /** + * Instead of recovering from exception {@code e}, re-throw it wrapped + * in a {@link ParseCancellationException} so it is not caught by the + * rule function catches. Use {@link Exception//getCause()} to get the + * original {@link RecognitionException}. + */ + recover(recognizer, e) { + let context = recognizer._ctx + while (context !== null) { + context.exception = e; + context = context.parentCtx; + } + throw new ParseCancellationException(e); + } + + /** + * Make sure we don't attempt to recover inline; if the parser + * successfully recovers, it won't throw an exception. + */ + recoverInline(recognizer) { + this.recover(recognizer, new InputMismatchException(recognizer)); + } // Make sure we don't attempt to recover from problems in subrules.// -BailErrorStrategy.prototype.sync = function(recognizer) { - // pass -}; + sync(recognizer) { + // pass + } +} -exports.BailErrorStrategy = BailErrorStrategy; -exports.DefaultErrorStrategy = DefaultErrorStrategy; + +module.exports = {BailErrorStrategy, DefaultErrorStrategy}; diff --git a/runtime/JavaScript/src/antlr4/error/Errors.js b/runtime/JavaScript/src/antlr4/error/Errors.js index 95de0b012..1308e9e5e 100644 --- a/runtime/JavaScript/src/antlr4/error/Errors.js +++ b/runtime/JavaScript/src/antlr4/error/Errors.js @@ -3,167 +3,171 @@ * can be found in the LICENSE.txt file in the project root. */ -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. +/** + * The root of the ANTLR exception hierarchy. In general, ANTLR tracks just + * 3 kinds of errors: prediction errors, failed predicate errors, and + * mismatched input errors. In each case, the parser knows where it is + * in the input, where it is in the ATN, the rule invocation stack, + * and what kind of problem occurred. + */ -var PredicateTransition = require('./../atn/Transition').PredicateTransition; +const {PredicateTransition} = require('./../atn/Transition') -function RecognitionException(params) { - Error.call(this); - if (!!Error.captureStackTrace) { - Error.captureStackTrace(this, RecognitionException); - } else { - var stack = new Error().stack; - } - this.message = params.message; - this.recognizer = params.recognizer; - this.input = params.input; - this.ctx = params.ctx; - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - this.offendingToken = null; - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't match. - this.offendingState = -1; - if (this.recognizer!==null) { - this.offendingState = this.recognizer.state; +class RecognitionException extends Error { + constructor(params) { + super(params.message); + if (!!Error.captureStackTrace) { + Error.captureStackTrace(this, RecognitionException); + } else { + var stack = new Error().stack; + } + this.message = params.message; + this.recognizer = params.recognizer; + this.input = params.input; + this.ctx = params.ctx; + /** + * The current {@link Token} when an error occurred. Since not all streams + * support accessing symbols by index, we have to track the {@link Token} + * instance itself + */ + this.offendingToken = null; + /** + * Get the ATN state number the parser was in at the time the error + * occurred. For {@link NoViableAltException} and + * {@link LexerNoViableAltException} exceptions, this is the + * {@link DecisionState} number. For others, it is the state whose outgoing + * edge we couldn't match. + */ + this.offendingState = -1; + if (this.recognizer!==null) { + this.offendingState = this.recognizer.state; + } } - return this; -} -RecognitionException.prototype = Object.create(Error.prototype); -RecognitionException.prototype.constructor = RecognitionException; - -//

            If the state number is not known, this method returns -1.

            - -// -// Gets the set of input symbols which could potentially follow the -// previously matched symbol at the time this exception was thrown. -// -//

            If the set of expected tokens is not known and could not be computed, -// this method returns {@code null}.

            -// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code null} if the information is not available. -// / -RecognitionException.prototype.getExpectedTokens = function() { - if (this.recognizer!==null) { - return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx); - } else { - return null; + /** + * Gets the set of input symbols which could potentially follow the + * previously matched symbol at the time this exception was thrown. + * + *

            If the set of expected tokens is not known and could not be computed, + * this method returns {@code null}.

            + * + * @return The set of token types that could potentially follow the current + * state in the ATN, or {@code null} if the information is not available. + */ + getExpectedTokens() { + if (this.recognizer!==null) { + return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx); + } else { + return null; + } } -}; -RecognitionException.prototype.toString = function() { - return this.message; -}; - -function LexerNoViableAltException(lexer, input, startIndex, deadEndConfigs) { - RecognitionException.call(this, {message:"", recognizer:lexer, input:input, ctx:null}); - this.startIndex = startIndex; - this.deadEndConfigs = deadEndConfigs; - return this; -} - -LexerNoViableAltException.prototype = Object.create(RecognitionException.prototype); -LexerNoViableAltException.prototype.constructor = LexerNoViableAltException; - -LexerNoViableAltException.prototype.toString = function() { - var symbol = ""; - if (this.startIndex >= 0 && this.startIndex < this.input.size) { - symbol = this.input.getText((this.startIndex,this.startIndex)); + //

            If the state number is not known, this method returns -1.

            + toString() { + return this.message; } - return "LexerNoViableAltException" + symbol; -}; - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by reportNoViableAlternative() -// -function NoViableAltException(recognizer, input, startToken, offendingToken, deadEndConfigs, ctx) { - ctx = ctx || recognizer._ctx; - offendingToken = offendingToken || recognizer.getCurrentToken(); - startToken = startToken || recognizer.getCurrentToken(); - input = input || recognizer.getInputStream(); - RecognitionException.call(this, {message:"", recognizer:recognizer, input:input, ctx:ctx}); - // Which configurations did we try at input.index() that couldn't match - // input.LT(1)?// - this.deadEndConfigs = deadEndConfigs; - // The token object at the start index; the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - this.startToken = startToken; - this.offendingToken = offendingToken; } -NoViableAltException.prototype = Object.create(RecognitionException.prototype); -NoViableAltException.prototype.constructor = NoViableAltException; - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not match the expected token. -// -function InputMismatchException(recognizer) { - RecognitionException.call(this, {message:"", recognizer:recognizer, input:recognizer.getInputStream(), ctx:recognizer._ctx}); - this.offendingToken = recognizer.getCurrentToken(); -} - -InputMismatchException.prototype = Object.create(RecognitionException.prototype); -InputMismatchException.prototype.constructor = InputMismatchException; - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -function FailedPredicateException(recognizer, predicate, message) { - RecognitionException.call(this, {message:this.formatMessage(predicate,message || null), recognizer:recognizer, - input:recognizer.getInputStream(), ctx:recognizer._ctx}); - var s = recognizer._interp.atn.states[recognizer.state]; - var trans = s.transitions[0]; - if (trans instanceof PredicateTransition) { - this.ruleIndex = trans.ruleIndex; - this.predicateIndex = trans.predIndex; - } else { - this.ruleIndex = 0; - this.predicateIndex = 0; +class LexerNoViableAltException extends RecognitionException { + constructor(lexer, input, startIndex, deadEndConfigs) { + super({message: "", recognizer: lexer, input: input, ctx: null}); + this.startIndex = startIndex; + this.deadEndConfigs = deadEndConfigs; + } + + toString() { + let symbol = "" + if (this.startIndex >= 0 && this.startIndex < this.input.size) { + symbol = this.input.getText((this.startIndex,this.startIndex)); + } + return "LexerNoViableAltException" + symbol; } - this.predicate = predicate; - this.offendingToken = recognizer.getCurrentToken(); - return this; } -FailedPredicateException.prototype = Object.create(RecognitionException.prototype); -FailedPredicateException.prototype.constructor = FailedPredicateException; -FailedPredicateException.prototype.formatMessage = function(predicate, message) { +/** + * Indicates that the parser could not decide which of two or more paths + * to take based upon the remaining input. It tracks the starting token + * of the offending input and also knows where the parser was + * in the various paths when the error. Reported by reportNoViableAlternative() + */ +class NoViableAltException extends RecognitionException { + constructor(recognizer, input, startToken, offendingToken, deadEndConfigs, ctx) { + ctx = ctx || recognizer._ctx; + offendingToken = offendingToken || recognizer.getCurrentToken(); + startToken = startToken || recognizer.getCurrentToken(); + input = input || recognizer.getInputStream(); + super({message: "", recognizer: recognizer, input: input, ctx: ctx}); + // Which configurations did we try at input.index() that couldn't match + // input.LT(1)?// + this.deadEndConfigs = deadEndConfigs; + // The token object at the start index; the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + this.startToken = startToken; + this.offendingToken = offendingToken; + } +} + +/** + * This signifies any kind of mismatched input exceptions such as + * when the current input does not match the expected token. +*/ +class InputMismatchException extends RecognitionException { + constructor(recognizer) { + super({message: "", recognizer: recognizer, input: recognizer.getInputStream(), ctx: recognizer._ctx}); + this.offendingToken = recognizer.getCurrentToken(); + } +} + +function formatMessage(predicate, message) { if (message !==null) { return message; } else { return "failed predicate: {" + predicate + "}?"; } -}; - -function ParseCancellationException() { - Error.call(this); - Error.captureStackTrace(this, ParseCancellationException); - return this; } -ParseCancellationException.prototype = Object.create(Error.prototype); -ParseCancellationException.prototype.constructor = ParseCancellationException; +/** + * A semantic predicate failed during validation. Validation of predicates + * occurs when normally parsing the alternative just like matching a token. + * Disambiguating predicate evaluation occurs when we test a predicate during + * prediction. +*/ +class FailedPredicateException extends RecognitionException { + constructor(recognizer, predicate, message) { + super({ + message: formatMessage(predicate, message || null), recognizer: recognizer, + input: recognizer.getInputStream(), ctx: recognizer._ctx + }); + const s = recognizer._interp.atn.states[recognizer.state] + const trans = s.transitions[0] + if (trans instanceof PredicateTransition) { + this.ruleIndex = trans.ruleIndex; + this.predicateIndex = trans.predIndex; + } else { + this.ruleIndex = 0; + this.predicateIndex = 0; + } + this.predicate = predicate; + this.offendingToken = recognizer.getCurrentToken(); + } +} -exports.RecognitionException = RecognitionException; -exports.NoViableAltException = NoViableAltException; -exports.LexerNoViableAltException = LexerNoViableAltException; -exports.InputMismatchException = InputMismatchException; -exports.FailedPredicateException = FailedPredicateException; -exports.ParseCancellationException = ParseCancellationException; + +class ParseCancellationException extends Error{ + constructor() { + super() + Error.captureStackTrace(this, ParseCancellationException); + } +} + +module.exports = { + RecognitionException, + NoViableAltException, + LexerNoViableAltException, + InputMismatchException, + FailedPredicateException, + ParseCancellationException +}; diff --git a/runtime/JavaScript/src/antlr4/error/index.js b/runtime/JavaScript/src/antlr4/error/index.js index 5569260a8..482b47edb 100644 --- a/runtime/JavaScript/src/antlr4/error/index.js +++ b/runtime/JavaScript/src/antlr4/error/index.js @@ -3,11 +3,12 @@ * can be found in the LICENSE.txt file in the project root. */ -exports.RecognitionException = require('./Errors').RecognitionException; -exports.NoViableAltException = require('./Errors').NoViableAltException; -exports.LexerNoViableAltException = require('./Errors').LexerNoViableAltException; -exports.InputMismatchException = require('./Errors').InputMismatchException; -exports.FailedPredicateException = require('./Errors').FailedPredicateException; -exports.DiagnosticErrorListener = require('./DiagnosticErrorListener').DiagnosticErrorListener; -exports.BailErrorStrategy = require('./ErrorStrategy').BailErrorStrategy; -exports.ErrorListener = require('./ErrorListener').ErrorListener; +module.exports.RecognitionException = require('./Errors').RecognitionException; +module.exports.NoViableAltException = require('./Errors').NoViableAltException; +module.exports.LexerNoViableAltException = require('./Errors').LexerNoViableAltException; +module.exports.InputMismatchException = require('./Errors').InputMismatchException; +module.exports.FailedPredicateException = require('./Errors').FailedPredicateException; +module.exports.DiagnosticErrorListener = require('./DiagnosticErrorListener'); +module.exports.BailErrorStrategy = require('./ErrorStrategy').BailErrorStrategy; +module.exports.DefaultErrorStrategy = require('./ErrorStrategy').DefaultErrorStrategy; +module.exports.ErrorListener = require('./ErrorListener').ErrorListener; diff --git a/runtime/JavaScript/src/antlr4/index.js b/runtime/JavaScript/src/antlr4/index.js index 340e96bfc..a8392d6d7 100644 --- a/runtime/JavaScript/src/antlr4/index.js +++ b/runtime/JavaScript/src/antlr4/index.js @@ -9,15 +9,17 @@ exports.fromcodepoint = require('./polyfills/fromcodepoint'); exports.tree = require('./tree/index'); exports.error = require('./error/index'); exports.Token = require('./Token').Token; -exports.CharStreams = require('./CharStreams').CharStreams; +exports.CharStreams = require('./CharStreams'); exports.CommonToken = require('./Token').CommonToken; -exports.InputStream = require('./InputStream').InputStream; -exports.FileStream = require('./FileStream').FileStream; -exports.CommonTokenStream = require('./CommonTokenStream').CommonTokenStream; -exports.Lexer = require('./Lexer').Lexer; -exports.Parser = require('./Parser').Parser; +exports.InputStream = require('./InputStream'); +exports.FileStream = require('./FileStream'); +exports.CommonTokenStream = require('./CommonTokenStream'); +exports.Lexer = require('./Lexer'); +exports.Parser = require('./Parser'); var pc = require('./PredictionContext'); exports.PredictionContextCache = pc.PredictionContextCache; -exports.ParserRuleContext = require('./ParserRuleContext').ParserRuleContext; +exports.ParserRuleContext = require('./ParserRuleContext'); exports.Interval = require('./IntervalSet').Interval; +exports.IntervalSet = require('./IntervalSet').IntervalSet; exports.Utils = require('./Utils'); +exports.LL1Analyzer = require('./LL1Analyzer').LL1Analyzer; diff --git a/runtime/JavaScript/src/antlr4/tree/Tree.js b/runtime/JavaScript/src/antlr4/tree/Tree.js index 515b122b5..cd87ef8b0 100644 --- a/runtime/JavaScript/src/antlr4/tree/Tree.js +++ b/runtime/JavaScript/src/antlr4/tree/Tree.js @@ -2,229 +2,227 @@ * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ -/// -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// +const {Token} = require('./../Token'); +const {Interval} = require('./../IntervalSet'); +const INVALID_INTERVAL = new Interval(-1, -2); -var Token = require('./../Token').Token; -var Interval = require('./../IntervalSet').Interval; -var INVALID_INTERVAL = new Interval(-1, -2); -var Utils = require('../Utils.js'); +/** + * The basic notion of a tree has a parent, a payload, and a list of children. + * It is the most abstract interface for all the trees used by ANTLR. + */ +class Tree {} - -function Tree() { - return this; -} - -function SyntaxTree() { - Tree.call(this); - return this; -} - -SyntaxTree.prototype = Object.create(Tree.prototype); -SyntaxTree.prototype.constructor = SyntaxTree; - -function ParseTree() { - SyntaxTree.call(this); - return this; -} - -ParseTree.prototype = Object.create(SyntaxTree.prototype); -ParseTree.prototype.constructor = ParseTree; - -function RuleNode() { - ParseTree.call(this); - return this; -} - -RuleNode.prototype = Object.create(ParseTree.prototype); -RuleNode.prototype.constructor = RuleNode; - -function TerminalNode() { - ParseTree.call(this); - return this; -} - -TerminalNode.prototype = Object.create(ParseTree.prototype); -TerminalNode.prototype.constructor = TerminalNode; - -function ErrorNode() { - TerminalNode.call(this); - return this; -} - -ErrorNode.prototype = Object.create(TerminalNode.prototype); -ErrorNode.prototype.constructor = ErrorNode; - -function ParseTreeVisitor() { - return this; -} - -ParseTreeVisitor.prototype.visit = function(ctx) { - if (Array.isArray(ctx)) { - return ctx.map(function(child) { - return child.accept(this); - }, this); - } else { - return ctx.accept(this); +class SyntaxTree extends Tree { + constructor() { + super(); } -}; +} -ParseTreeVisitor.prototype.visitChildren = function(ctx) { - if (ctx.children) { - return this.visit(ctx.children); - } else { +class ParseTree extends SyntaxTree { + constructor() { + super(); + } +} + +class RuleNode extends ParseTree { + constructor() { + super(); + } + + getRuleContext(){ + throw new Error("missing interface implementation") + } +} + +class TerminalNode extends ParseTree { + constructor() { + super(); + } +} + +class ErrorNode extends TerminalNode { + constructor() { + super(); + } +} + +class ParseTreeVisitor { + visit(ctx) { + if (Array.isArray(ctx)) { + return ctx.map(function(child) { + return child.accept(this); + }, this); + } else { + return ctx.accept(this); + } + } + + visitChildren(ctx) { + if (ctx.children) { + return this.visit(ctx.children); + } else { + return null; + } + } + + visitTerminal(node) { + } + + visitErrorNode(node) { + } +} + +class ParseTreeListener { + visitTerminal(node) { + } + + visitErrorNode(node) { + } + + enterEveryRule(node) { + } + + exitEveryRule(node) { + } +} + +class TerminalNodeImpl extends TerminalNode { + constructor(symbol) { + super(); + this.parentCtx = null; + this.symbol = symbol; + } + + getChild(i) { return null; } -} -ParseTreeVisitor.prototype.visitTerminal = function(node) { -}; - -ParseTreeVisitor.prototype.visitErrorNode = function(node) { -}; - - -function ParseTreeListener() { - return this; -} - -ParseTreeListener.prototype.visitTerminal = function(node) { -}; - -ParseTreeListener.prototype.visitErrorNode = function(node) { -}; - -ParseTreeListener.prototype.enterEveryRule = function(node) { -}; - -ParseTreeListener.prototype.exitEveryRule = function(node) { -}; - -function TerminalNodeImpl(symbol) { - TerminalNode.call(this); - this.parentCtx = null; - this.symbol = symbol; - return this; -} - -TerminalNodeImpl.prototype = Object.create(TerminalNode.prototype); -TerminalNodeImpl.prototype.constructor = TerminalNodeImpl; - -TerminalNodeImpl.prototype.getChild = function(i) { - return null; -}; - -TerminalNodeImpl.prototype.getSymbol = function() { - return this.symbol; -}; - -TerminalNodeImpl.prototype.getParent = function() { - return this.parentCtx; -}; - -TerminalNodeImpl.prototype.getPayload = function() { - return this.symbol; -}; - -TerminalNodeImpl.prototype.getSourceInterval = function() { - if (this.symbol === null) { - return INVALID_INTERVAL; + getSymbol() { + return this.symbol; } - var tokenIndex = this.symbol.tokenIndex; - return new Interval(tokenIndex, tokenIndex); -}; -TerminalNodeImpl.prototype.getChildCount = function() { - return 0; -}; + getParent() { + return this.parentCtx; + } -TerminalNodeImpl.prototype.accept = function(visitor) { - return visitor.visitTerminal(this); -}; + getPayload() { + return this.symbol; + } -TerminalNodeImpl.prototype.getText = function() { - return this.symbol.text; -}; + getSourceInterval() { + if (this.symbol === null) { + return INVALID_INTERVAL; + } + const tokenIndex = this.symbol.tokenIndex; + return new Interval(tokenIndex, tokenIndex); + } -TerminalNodeImpl.prototype.toString = function() { - if (this.symbol.type === Token.EOF) { - return ""; - } else { + getChildCount() { + return 0; + } + + accept(visitor) { + return visitor.visitTerminal(this); + } + + getText() { return this.symbol.text; } -}; -// Represents a token that was consumed during resynchronization -// rather than during a valid match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -function ErrorNodeImpl(token) { - TerminalNodeImpl.call(this, token); - return this; -} - -ErrorNodeImpl.prototype = Object.create(TerminalNodeImpl.prototype); -ErrorNodeImpl.prototype.constructor = ErrorNodeImpl; - -ErrorNodeImpl.prototype.isErrorNode = function() { - return true; -}; - -ErrorNodeImpl.prototype.accept = function(visitor) { - return visitor.visitErrorNode(this); -}; - -function ParseTreeWalker() { - return this; -} - -ParseTreeWalker.prototype.walk = function(listener, t) { - var errorNode = t instanceof ErrorNode || - (t.isErrorNode !== undefined && t.isErrorNode()); - if (errorNode) { - listener.visitErrorNode(t); - } else if (t instanceof TerminalNode) { - listener.visitTerminal(t); - } else { - this.enterRule(listener, t); - for (var i = 0; i < t.getChildCount(); i++) { - var child = t.getChild(i); - this.walk(listener, child); + toString() { + if (this.symbol.type === Token.EOF) { + return ""; + } else { + return this.symbol.text; } - this.exitRule(listener, t); } -}; -// -// The discovery of a rule node, involves sending two events: the generic -// {@link ParseTreeListener//enterEveryRule} and a -// {@link RuleContext}-specific event. First we trigger the generic and then -// the rule specific. We to them in reverse order upon finishing the node. -// -ParseTreeWalker.prototype.enterRule = function(listener, r) { - var ctx = r.getRuleContext(); - listener.enterEveryRule(ctx); - ctx.enterRule(listener); -}; +} -ParseTreeWalker.prototype.exitRule = function(listener, r) { - var ctx = r.getRuleContext(); - ctx.exitRule(listener); - listener.exitEveryRule(ctx); -}; + +/** + * Represents a token that was consumed during resynchronization + * rather than during a valid match operation. For example, + * we will create this kind of a node during single token insertion + * and deletion as well as during "consume until error recovery set" + * upon no viable alternative exceptions. + */ +class ErrorNodeImpl extends TerminalNodeImpl { + constructor(token) { + super(token); + } + + isErrorNode() { + return true; + } + + accept(visitor) { + return visitor.visitErrorNode(this); + } +} + +class ParseTreeWalker { + + /** + * Performs a walk on the given parse tree starting at the root and going down recursively + * with depth-first search. On each node, {@link ParseTreeWalker//enterRule} is called before + * recursively walking down into child nodes, then + * {@link ParseTreeWalker//exitRule} is called after the recursive call to wind up. + * @param listener The listener used by the walker to process grammar rules + * @param t The parse tree to be walked on + */ + walk(listener, t) { + const errorNode = t instanceof ErrorNode || + (t.isErrorNode !== undefined && t.isErrorNode()); + if (errorNode) { + listener.visitErrorNode(t); + } else if (t instanceof TerminalNode) { + listener.visitTerminal(t); + } else { + this.enterRule(listener, t); + for (let i = 0; i < t.getChildCount(); i++) { + const child = t.getChild(i); + this.walk(listener, child); + } + this.exitRule(listener, t); + } + } + + /** + * Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//enterEveryRule} + * then by triggering the event specific to the given parse tree node + * @param listener The listener responding to the trigger events + * @param r The grammar rule containing the rule context + */ + enterRule(listener, r) { + const ctx = r.getRuleContext(); + listener.enterEveryRule(ctx); + ctx.enterRule(listener); + } + + /** + * Exits a grammar rule by first triggering the event specific to the given parse tree node + * then by triggering the generic event {@link ParseTreeListener//exitEveryRule} + * @param listener The listener responding to the trigger events + * @param r The grammar rule containing the rule context + */ + exitRule(listener, r) { + const ctx = r.getRuleContext(); + ctx.exitRule(listener); + listener.exitEveryRule(ctx); + } +} ParseTreeWalker.DEFAULT = new ParseTreeWalker(); -exports.RuleNode = RuleNode; -exports.ErrorNode = ErrorNode; -exports.TerminalNode = TerminalNode; -exports.ErrorNodeImpl = ErrorNodeImpl; -exports.TerminalNodeImpl = TerminalNodeImpl; -exports.ParseTreeListener = ParseTreeListener; -exports.ParseTreeVisitor = ParseTreeVisitor; -exports.ParseTreeWalker = ParseTreeWalker; -exports.INVALID_INTERVAL = INVALID_INTERVAL; +module.exports = { + RuleNode, + ErrorNode, + TerminalNode, + ErrorNodeImpl, + TerminalNodeImpl, + ParseTreeListener, + ParseTreeVisitor, + ParseTreeWalker, + INVALID_INTERVAL +} diff --git a/runtime/JavaScript/src/antlr4/tree/Trees.js b/runtime/JavaScript/src/antlr4/tree/Trees.js index 884272e20..95343abb9 100644 --- a/runtime/JavaScript/src/antlr4/tree/Trees.js +++ b/runtime/JavaScript/src/antlr4/tree/Trees.js @@ -3,138 +3,136 @@ * can be found in the LICENSE.txt file in the project root. */ -var Utils = require('./../Utils'); -var Token = require('./../Token').Token; -var RuleNode = require('./Tree').RuleNode; -var ErrorNode = require('./Tree').ErrorNode; -var TerminalNode = require('./Tree').TerminalNode; -var ParserRuleContext = require('./../ParserRuleContext').ParserRuleContext; -var RuleContext = require('./../RuleContext').RuleContext; -var INVALID_ALT_NUMBER = require('./../atn/ATN').INVALID_ALT_NUMBER; - +const Utils = require('./../Utils'); +const {Token} = require('./../Token'); +const {ErrorNode, TerminalNode, RuleNode} = require('./Tree'); /** A set of utility routines useful for all kinds of ANTLR trees. */ -function Trees() { +const Trees = { + /** + * Print out a whole tree in LISP form. {@link //getNodeText} is used on the + * node payloads to get the text for the nodes. Detect + * parse trees and extract data appropriately. + */ + toStringTree: function(tree, ruleNames, recog) { + ruleNames = ruleNames || null; + recog = recog || null; + if(recog!==null) { + ruleNames = recog.ruleNames; + } + let s = Trees.getNodeText(tree, ruleNames); + s = Utils.escapeWhitespace(s, false); + const c = tree.getChildCount(); + if(c===0) { + return s; + } + let res = "(" + s + ' '; + if(c>0) { + s = Trees.toStringTree(tree.getChild(0), ruleNames); + res = res.concat(s); + } + for(let i=1;i0) { - s = Trees.toStringTree(tree.getChild(0), ruleNames); - res = res.concat(s); - } - for(var i=1;i. -// -//////////////////////////////////////////////////////////////////////////////// - -// NOTE The load parameter points to the function, which prepares the -// environment for each module and runs its code. Scroll down to the end of -// the file to see the function definition. -(function(load) { 'use strict'; - -// NOTE Mozilla still sets the wrong fileName property for errors that occur -// inside an eval call (even with sourceURL). However, the stack -// contains the correct source, so it can be used to re-threw the error -// with the correct fileName property. -// WARN Re-throwing an error object will mess up the stack trace and the -// column number. - if (typeof (new Error()).fileName == "string") { - self.addEventListener("error", function(evt) { - if (evt.error instanceof Error) { - if (pwd[0]) { - evt.preventDefault(); - throw new evt.error.constructor(evt.error.message, pwd[0].uri, evt.error.lineNumber); - } - else { - var m = evt.error.stack.match(/^[^\n@]*@([^\n]+):\d+:\d+/); - if (m === null) { - console.warn("Honey: unable to read file name from stack"); - } - else if (evt.error.fileName != m[1]) { - evt.preventDefault(); - throw new evt.error.constructor(evt.error.message, m[1], evt.error.lineNumber); - } - } - } - }, false); - } - -// INFO Current module descriptors -// pwd[0] contains the descriptor of the currently loaded module, -// pwd[1] contains the descriptor its parent module and so on. - - var pwd = Array(); - -// INFO Path parser -// Older browsers don't support the URL interface, therefore we use an -// anchor element as parser in that case. Thes breaks web worker support, -// but we don't care since these browsers also don't support web workers. - - var parser = (function() { - try { - return new URL(location.href); - } catch(e) { - var p = Object.create(location); - // need to set writable, because WorkerLocation is read-only - Object.defineProperty(p, "href", {writable:true}); - return p; - } - })(); - -// INFO Module cache -// Contains getter functions for the exports objects of all the loaded -// modules. The getter for the module 'mymod' is name '$name' to prevent -// collisions with predefined object properties (see note below). -// As long as a module has not been loaded the getter is either undefined -// or contains the module code as a function (in case the module has been -// pre-loaded in a bundle). -// WARN IE8 supports defineProperty only for DOM objects, therfore we use a -// HTMLDivElement as cache in that case. This breaks web worker support, -// but we don't care since IE8 has no web workers at all. - - try { - var cache = new Object(); - Object.defineProperty(cache, "foo", {'value':"bar",'configurable':true}); - delete cache.foo; - } - catch (e) { - console.warn("Honey: falling back to DOM workaround for cache object ("+e+")"); - cache = document.createElement('DIV'); - } - -// INFO Send lock -// Sending the request causes the event loop to continue. Therefore -// pending AJAX load events for the same url might be executed before -// the synchronous onLoad is called. This should be no problem, but in -// Chrome the responseText of the sneaked in load events will be empty. -// Therefore we have to lock the loading while executing send(). - - var lock = new Object(); - -// INFO Honey options -// The values can be set by defining a object called Honey. The -// Honey object has to be defined before this script here is loaded -// and changing the values in the Honey object will have no effect -// afterwards! - - var requirePath = self.Honey&&self.Honey.requirePath!==undefined ? self.Honey.requirePath.slice(0) : ['./']; - var requireCompiler = self.Honey&&self.Honey.requireCompiler!==undefined ? self.Honey.requireCompiler : null; - -// NOTE Parse module root paths - var base = [location.origin, location.href.substr(0, location.href.lastIndexOf("/")+1)]; - for (var i=0; i0) diff --git a/runtime/Python3/src/antlr4/InputStream.py b/runtime/Python3/src/antlr4/InputStream.py index 87e66af72..ca63d083a 100644 --- a/runtime/Python3/src/antlr4/InputStream.py +++ b/runtime/Python3/src/antlr4/InputStream.py @@ -3,7 +3,6 @@ # Use of this file is governed by the BSD 3-clause license that # can be found in the LICENSE.txt file in the project root. # -import unittest # @@ -85,20 +84,3 @@ class InputStream (object): def __str__(self): return self.strdata - - -class TestInputStream(unittest.TestCase): - - def testStream(self): - stream = InputStream("abcde") - self.assertEqual(0, stream.index) - self.assertEqual(5, stream.size) - self.assertEqual(ord("a"), stream.LA(1)) - stream.consume() - self.assertEqual(1, stream.index) - stream.seek(5) - self.assertEqual(Token.EOF, stream.LA(1)) - self.assertEqual("bcd", stream.getText(1, 3)) - stream.reset() - self.assertEqual(0, stream.index) - diff --git a/runtime/Python3/src/antlr4/IntervalSet.py b/runtime/Python3/src/antlr4/IntervalSet.py index 543209943..9742426c9 100644 --- a/runtime/Python3/src/antlr4/IntervalSet.py +++ b/runtime/Python3/src/antlr4/IntervalSet.py @@ -5,7 +5,6 @@ # from io import StringIO -import unittest from antlr4.Token import Token # need forward declarations @@ -178,105 +177,3 @@ class IntervalSet(object): if a" - - -class TestIntervalSet(unittest.TestCase): - - def testEmpty(self): - s = IntervalSet() - self.assertIsNone(s.intervals) - self.assertFalse(30 in s) - - def testOne(self): - s = IntervalSet() - s.addOne(30) - self.assertTrue(30 in s) - self.assertFalse(29 in s) - self.assertFalse(31 in s) - - def testTwo(self): - s = IntervalSet() - s.addOne(30) - s.addOne(40) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - self.assertFalse(35 in s) - - def testRange(self): - s = IntervalSet() - s.addRange(range(30,41)) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - self.assertTrue(35 in s) - - def testDistinct1(self): - s = IntervalSet() - s.addRange(range(30,32)) - s.addRange(range(40,42)) - self.assertEquals(2,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - self.assertFalse(35 in s) - - def testDistinct2(self): - s = IntervalSet() - s.addRange(range(40,42)) - s.addRange(range(30,32)) - self.assertEquals(2,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - self.assertFalse(35 in s) - - def testContiguous1(self): - s = IntervalSet() - s.addRange(range(30,36)) - s.addRange(range(36,41)) - self.assertEquals(1,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - self.assertTrue(35 in s) - - def testContiguous2(self): - s = IntervalSet() - s.addRange(range(36,41)) - s.addRange(range(30,36)) - self.assertEquals(1,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(40 in s) - - def testOverlapping1(self): - s = IntervalSet() - s.addRange(range(30,40)) - s.addRange(range(35,45)) - self.assertEquals(1,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(44 in s) - - def testOverlapping2(self): - s = IntervalSet() - s.addRange(range(35,45)) - s.addRange(range(30,40)) - self.assertEquals(1,len(s.intervals)) - self.assertTrue(30 in s) - self.assertTrue(44 in s) - - def testOverlapping3(self): - s = IntervalSet() - s.addRange(range(30,32)) - s.addRange(range(40,42)) - s.addRange(range(50,52)) - s.addRange(range(20,61)) - self.assertEquals(1,len(s.intervals)) - self.assertTrue(20 in s) - self.assertTrue(60 in s) - - def testComplement(self): - s = IntervalSet() - s.addRange(range(10,21)) - c = s.complement(1,100) - self.assertTrue(1 in c) - self.assertTrue(100 in c) - self.assertTrue(10 not in c) - self.assertTrue(20 not in c) - - diff --git a/runtime/Python3/src/antlr4/Lexer.py b/runtime/Python3/src/antlr4/Lexer.py index a60b5bcc9..0a96b70af 100644 --- a/runtime/Python3/src/antlr4/Lexer.py +++ b/runtime/Python3/src/antlr4/Lexer.py @@ -9,8 +9,12 @@ # of speed. #/ from io import StringIO -from typing.io import TextIO + import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO from antlr4.CommonTokenFactory import CommonTokenFactory from antlr4.atn.LexerATNSimulator import LexerATNSimulator from antlr4.InputStream import InputStream diff --git a/runtime/Python3/src/antlr4/Parser.py b/runtime/Python3/src/antlr4/Parser.py index c461bbdc0..11bf41796 100644 --- a/runtime/Python3/src/antlr4/Parser.py +++ b/runtime/Python3/src/antlr4/Parser.py @@ -3,7 +3,10 @@ # Use of this file is governed by the BSD 3-clause license that # can be found in the LICENSE.txt file in the project root. import sys -from typing.io import TextIO +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO from antlr4.BufferedTokenStream import TokenStream from antlr4.CommonTokenFactory import TokenFactory from antlr4.error.ErrorStrategy import DefaultErrorStrategy diff --git a/runtime/Python3/src/antlr4/PredictionContext.py b/runtime/Python3/src/antlr4/PredictionContext.py index e3736ad2e..7cbb7e49e 100644 --- a/runtime/Python3/src/antlr4/PredictionContext.py +++ b/runtime/Python3/src/antlr4/PredictionContext.py @@ -158,7 +158,7 @@ class SingletonPredictionContext(PredictionContext): class EmptyPredictionContext(SingletonPredictionContext): def __init__(self): - super().__init__(None, self.EMPTY_RETURN_STATE) + super().__init__(None, PredictionContext.EMPTY_RETURN_STATE) def isEmpty(self): return True diff --git a/runtime/Python3/src/antlr4/Recognizer.py b/runtime/Python3/src/antlr4/Recognizer.py index 01017351f..a41d32fa8 100644 --- a/runtime/Python3/src/antlr4/Recognizer.py +++ b/runtime/Python3/src/antlr4/Recognizer.py @@ -33,7 +33,7 @@ class Recognizer(object): return major, minor def checkVersion(self, toolVersion): - runtimeVersion = "4.7.2" + runtimeVersion = "4.8" rvmajor, rvminor = self.extractVersion(runtimeVersion) tvmajor, tvminor = self.extractVersion(toolVersion) if rvmajor!=tvmajor or rvminor!=tvminor: @@ -144,17 +144,3 @@ class Recognizer(object): self._stateNumber = atnState del RecognitionException - -import unittest -class Test(unittest.TestCase): - - def testVersion(self): - major, minor = Recognizer().extractVersion("1.2") - self.assertEqual("1", major) - self.assertEqual("2", minor) - major, minor = Recognizer().extractVersion("1.2.3") - self.assertEqual("1", major) - self.assertEqual("2", minor) - major, minor = Recognizer().extractVersion("1.2-snapshot") - self.assertEqual("1", major) - self.assertEqual("2", minor) diff --git a/runtime/Python3/src/antlr4/tree/Tree.py b/runtime/Python3/src/antlr4/tree/Tree.py index 2b9db2d1c..68660b48b 100644 --- a/runtime/Python3/src/antlr4/tree/Tree.py +++ b/runtime/Python3/src/antlr4/tree/Tree.py @@ -140,6 +140,14 @@ class ParseTreeWalker(object): DEFAULT = None def walk(self, listener:ParseTreeListener, t:ParseTree): + """ + Performs a walk on the given parse tree starting at the root and going down recursively + with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before + recursively walking down into child nodes, then + {@link ParseTreeWalker#exitRule} is called after the recursive call to wind up. + @param listener The listener used by the walker to process grammar rules + @param t The parse tree to be walked on + """ if isinstance(t, ErrorNode): listener.visitErrorNode(t) return @@ -158,11 +166,23 @@ class ParseTreeWalker(object): # the rule specific. We to them in reverse order upon finishing the node. # def enterRule(self, listener:ParseTreeListener, r:RuleNode): + """ + Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule} + then by triggering the event specific to the given parse tree node + @param listener The listener responding to the trigger events + @param r The grammar rule containing the rule context + """ ctx = r.getRuleContext() listener.enterEveryRule(ctx) ctx.enterRule(listener) def exitRule(self, listener:ParseTreeListener, r:RuleNode): + """ + Exits a grammar rule by first triggering the event specific to the given parse tree node + then by triggering the generic event {@link ParseTreeListener#exitEveryRule} + @param listener The listener responding to the trigger events + @param r The grammar rule containing the rule context + """ ctx = r.getRuleContext() ctx.exitRule(listener) listener.exitEveryRule(ctx) diff --git a/runtime/Python3/src/antlr4/xpath/XPath.py b/runtime/Python3/src/antlr4/xpath/XPath.py index 58b05c466..3ac7d0c82 100644 --- a/runtime/Python3/src/antlr4/xpath/XPath.py +++ b/runtime/Python3/src/antlr4/xpath/XPath.py @@ -119,7 +119,7 @@ class XPathLexer(Lexer): def __init__(self, input=None): super().__init__(input) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None diff --git a/runtime/Python3/test/TestFileStream.py b/runtime/Python3/test/TestFileStream.py new file mode 100644 index 000000000..a8ba9da06 --- /dev/null +++ b/runtime/Python3/test/TestFileStream.py @@ -0,0 +1,8 @@ +import unittest +from antlr4.FileStream import FileStream + + +class TestFileStream(unittest.TestCase): + def testStream(self): + stream = FileStream(__file__) + self.assertTrue(stream.size > 0) diff --git a/runtime/Python3/test/TestInputStream.py b/runtime/Python3/test/TestInputStream.py new file mode 100644 index 000000000..6c5938041 --- /dev/null +++ b/runtime/Python3/test/TestInputStream.py @@ -0,0 +1,19 @@ +import unittest +from antlr4.Token import Token +from antlr4.InputStream import InputStream + + +class TestInputStream(unittest.TestCase): + + def testStream(self): + stream = InputStream("abcde") + self.assertEqual(0, stream.index) + self.assertEqual(5, stream.size) + self.assertEqual(ord("a"), stream.LA(1)) + stream.consume() + self.assertEqual(1, stream.index) + stream.seek(5) + self.assertEqual(Token.EOF, stream.LA(1)) + self.assertEqual("bcd", stream.getText(1, 3)) + stream.reset() + self.assertEqual(0, stream.index) diff --git a/runtime/Python3/test/TestIntervalSet.py b/runtime/Python3/test/TestIntervalSet.py new file mode 100644 index 000000000..6428c0d47 --- /dev/null +++ b/runtime/Python3/test/TestIntervalSet.py @@ -0,0 +1,101 @@ +import unittest +from antlr4.IntervalSet import IntervalSet + + +class TestIntervalSet(unittest.TestCase): + def testEmpty(self): + s = IntervalSet() + self.assertIsNone(s.intervals) + self.assertFalse(30 in s) + + def testOne(self): + s = IntervalSet() + s.addOne(30) + self.assertTrue(30 in s) + self.assertFalse(29 in s) + self.assertFalse(31 in s) + + def testTwo(self): + s = IntervalSet() + s.addOne(30) + s.addOne(40) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + self.assertFalse(35 in s) + + def testRange(self): + s = IntervalSet() + s.addRange(range(30,41)) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + self.assertTrue(35 in s) + + def testDistinct1(self): + s = IntervalSet() + s.addRange(range(30,32)) + s.addRange(range(40,42)) + self.assertEquals(2,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + self.assertFalse(35 in s) + + def testDistinct2(self): + s = IntervalSet() + s.addRange(range(40,42)) + s.addRange(range(30,32)) + self.assertEquals(2,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + self.assertFalse(35 in s) + + def testContiguous1(self): + s = IntervalSet() + s.addRange(range(30,36)) + s.addRange(range(36,41)) + self.assertEquals(1,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + self.assertTrue(35 in s) + + def testContiguous2(self): + s = IntervalSet() + s.addRange(range(36,41)) + s.addRange(range(30,36)) + self.assertEquals(1,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(40 in s) + + def testOverlapping1(self): + s = IntervalSet() + s.addRange(range(30,40)) + s.addRange(range(35,45)) + self.assertEquals(1,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(44 in s) + + def testOverlapping2(self): + s = IntervalSet() + s.addRange(range(35,45)) + s.addRange(range(30,40)) + self.assertEquals(1,len(s.intervals)) + self.assertTrue(30 in s) + self.assertTrue(44 in s) + + def testOverlapping3(self): + s = IntervalSet() + s.addRange(range(30,32)) + s.addRange(range(40,42)) + s.addRange(range(50,52)) + s.addRange(range(20,61)) + self.assertEquals(1,len(s.intervals)) + self.assertTrue(20 in s) + self.assertTrue(60 in s) + + def testComplement(self): + s = IntervalSet() + s.addRange(range(10,21)) + c = s.complement(1,100) + self.assertTrue(1 in c) + self.assertTrue(100 in c) + self.assertTrue(10 not in c) + self.assertTrue(20 not in c) diff --git a/runtime/Python3/test/TestRecognizer.py b/runtime/Python3/test/TestRecognizer.py new file mode 100644 index 000000000..3a5d18266 --- /dev/null +++ b/runtime/Python3/test/TestRecognizer.py @@ -0,0 +1,15 @@ +import unittest +from antlr4.Recognizer import Recognizer + + +class TestRecognizer(unittest.TestCase): + def testVersion(self): + major, minor = Recognizer().extractVersion("1.2") + self.assertEqual("1", major) + self.assertEqual("2", minor) + major, minor = Recognizer().extractVersion("1.2.3") + self.assertEqual("1", major) + self.assertEqual("2", minor) + major, minor = Recognizer().extractVersion("1.2-snapshot") + self.assertEqual("1", major) + self.assertEqual("2", minor) diff --git a/runtime/Python3/test/expr/ExprLexer.py b/runtime/Python3/test/expr/ExprLexer.py index e338b0b9e..67df69b89 100644 --- a/runtime/Python3/test/expr/ExprLexer.py +++ b/runtime/Python3/test/expr/ExprLexer.py @@ -86,7 +86,7 @@ class ExprLexer(Lexer): def __init__(self, input=None, output:TextIO = sys.stdout): super().__init__(input, output) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None diff --git a/runtime/Python3/test/expr/ExprParser.py b/runtime/Python3/test/expr/ExprParser.py index 598c778d1..a642f189e 100644 --- a/runtime/Python3/test/expr/ExprParser.py +++ b/runtime/Python3/test/expr/ExprParser.py @@ -86,7 +86,7 @@ class ExprParser ( Parser ): def __init__(self, input:TokenStream, output:TextIO = sys.stdout): super().__init__(input, output) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) self._predicates = None diff --git a/runtime/Python3/test/mocks/TestLexer.py b/runtime/Python3/test/mocks/TestLexer.py index 9c54007b2..63147241a 100644 --- a/runtime/Python3/test/mocks/TestLexer.py +++ b/runtime/Python3/test/mocks/TestLexer.py @@ -39,7 +39,7 @@ class TestLexer(Lexer): def __init__(self, input=None): super(TestLexer, self).__init__(input) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None @@ -95,7 +95,7 @@ class TestLexer2(Lexer): def __init__(self, input=None): super(TestLexer2, self).__init__(input) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None diff --git a/runtime/Python3/test/parser/clexer.py b/runtime/Python3/test/parser/clexer.py index c35320593..345572c98 100644 --- a/runtime/Python3/test/parser/clexer.py +++ b/runtime/Python3/test/parser/clexer.py @@ -792,7 +792,7 @@ class CLexer(Lexer): def __init__(self, input=None): super().__init__(input) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None diff --git a/runtime/Python3/test/parser/cparser.py b/runtime/Python3/test/parser/cparser.py index 1eb1d741b..17016be12 100644 --- a/runtime/Python3/test/parser/cparser.py +++ b/runtime/Python3/test/parser/cparser.py @@ -915,7 +915,7 @@ class CParser ( Parser ): def __init__(self, input:TokenStream): super().__init__(input) - self.checkVersion("4.7.2") + self.checkVersion("4.8") self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) self._predicates = None diff --git a/runtime/Python3/test/run.py b/runtime/Python3/test/run.py index 5aad7896c..5f842044f 100644 --- a/runtime/Python3/test/run.py +++ b/runtime/Python3/test/run.py @@ -4,5 +4,9 @@ src_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__ sys.path.insert(0,src_path) from TestTokenStreamRewriter import TestTokenStreamRewriter from xpathtest import XPathTest +from TestFileStream import TestFileStream +from TestInputStream import TestInputStream +from TestIntervalSet import TestIntervalSet +from TestRecognizer import TestRecognizer import unittest -unittest.main() \ No newline at end of file +unittest.main() diff --git a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift index 4109f672b..144d34921 100644 --- a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift @@ -101,7 +101,7 @@ public class DiagnosticErrorListener: BaseErrorListener { let decision: Int = dfa.decision let ruleIndex: Int = dfa.atnStartState.ruleIndex! - var ruleNames: [String] = recognizer.getRuleNames() + let ruleNames: [String] = recognizer.getRuleNames() if ruleIndex < 0 || ruleIndex >= ruleNames.count { return String(decision) } diff --git a/runtime/Swift/Sources/Antlr4/Parser.swift b/runtime/Swift/Sources/Antlr4/Parser.swift index 1f70f2eea..75c134ce5 100644 --- a/runtime/Swift/Sources/Antlr4/Parser.swift +++ b/runtime/Swift/Sources/Antlr4/Parser.swift @@ -941,7 +941,7 @@ open class Parser: Recognizer { public func getRuleInvocationStack(_ p: RuleContext?) -> [String] { var p = p - var ruleNames = getRuleNames() + let ruleNames = getRuleNames() var stack = [String]() while let pWrap = p { // compute what follows who invoked us diff --git a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift index b5ca8ceda..3ca97f244 100644 --- a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift +++ b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift @@ -63,7 +63,7 @@ public class RuntimeMetaData { /// omitted, the `-` (hyphen-minus) appearing before it is also /// omitted. /// - public static let VERSION: String = "4.7.2" + public static let VERSION: String = "4.8" /// /// Gets the currently executing version of the ANTLR 4 runtime library. diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift index 7ef18facb..6b2841d2a 100644 --- a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift +++ b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift @@ -343,6 +343,8 @@ fileprivate struct UInt8StreamIterator: IteratorProtocol { return nil case .opening, .open, .reading: break + @unknown default: + fatalError() } let count = stream.read(&buffer, maxLength: buffer.count) diff --git a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift index 663ff5372..37cd3b23b 100644 --- a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift +++ b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift @@ -155,11 +155,9 @@ public class Vocabulary: Hashable { return String(tokenType) } - public var hashValue: Int { - return Unmanaged.passUnretained(self).toOpaque().hashValue -// return unsafeAddress(of: self).hashValue + public func hash(into hasher: inout Hasher) { + hasher.combine(ObjectIdentifier(self)) } - } public func ==(lhs: Vocabulary, rhs: Vocabulary) -> Bool { diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift index b1069a126..cfbe8b615 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift @@ -126,20 +126,11 @@ public class ATNConfig: Hashable, CustomStringConvertible { } } - /// - /// An ATN configuration is equal to another if both have - /// the same state, they predict the same alternative, and - /// syntactic/semantic contexts are the same. - /// - - public var hashValue: Int { - var hashCode = MurmurHash.initialize(7) - hashCode = MurmurHash.update(hashCode, state.stateNumber) - hashCode = MurmurHash.update(hashCode, alt) - hashCode = MurmurHash.update(hashCode, context) - hashCode = MurmurHash.update(hashCode, semanticContext) - return MurmurHash.finish(hashCode, 4) - + public func hash(into hasher: inout Hasher) { + hasher.combine(state.stateNumber) + hasher.combine(alt) + hasher.combine(context) + hasher.combine(semanticContext) } public var description: String { @@ -166,6 +157,11 @@ public class ATNConfig: Hashable, CustomStringConvertible { } } +/// +/// An ATN configuration is equal to another if both have +/// the same state, they predict the same alternative, and +/// syntactic/semantic contexts are the same. +/// public func ==(lhs: ATNConfig, rhs: ATNConfig) -> Bool { if lhs === rhs { diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift index d1f6a1bb3..7da94377d 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift @@ -203,16 +203,16 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { return false } - public var hashValue: Int { + public func hash(into hasher: inout Hasher) { if isReadonly() { if cachedHashCode == -1 { - cachedHashCode = configsHashValue//configs.hashValue ; + cachedHashCode = configsHashValue } - - return cachedHashCode + hasher.combine(cachedHashCode) + } + else { + hasher.combine(configsHashValue) } - - return configsHashValue // configs.hashValue; } private var configsHashValue: Int { diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift b/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift index 65b02efc6..ce8349bc8 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift @@ -78,8 +78,8 @@ public class ATNDeserializer { /// internal func isFeatureSupported(_ feature: UUID, _ actualUuid: UUID) -> Bool { let supported = ATNDeserializer.SUPPORTED_UUIDS - guard let featureIndex = supported.index(of: feature), - let actualIndex = supported.index(of: actualUuid) else { + guard let featureIndex = supported.firstIndex(of: feature), + let actualIndex = supported.firstIndex(of: actualUuid) else { return false } return actualIndex >= featureIndex diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNState.swift b/runtime/Swift/Sources/Antlr4/atn/ATNState.swift index 02481d381..24f2dfab3 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNState.swift @@ -123,11 +123,10 @@ public class ATNState: Hashable, CustomStringConvertible { public internal(set) final var nextTokenWithinRule: IntervalSet? - public var hashValue: Int { - return stateNumber + public func hash(into hasher: inout Hasher) { + hasher.combine(stateNumber) } - public func isNonGreedyExitState() -> Bool { return false } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift index 3cacb48cb..1c4a0bb13 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift @@ -72,22 +72,14 @@ public class LexerATNConfig: ATNConfig { return passedThroughNonGreedyDecision } - override - /*public func hashCode() -> Int { - - }*/ - public var hashValue: Int { - var hashCode = MurmurHash.initialize(7) - hashCode = MurmurHash.update(hashCode, state.stateNumber) - hashCode = MurmurHash.update(hashCode, alt) - hashCode = MurmurHash.update(hashCode, context) - hashCode = MurmurHash.update(hashCode, semanticContext) - hashCode = MurmurHash.update(hashCode, passedThroughNonGreedyDecision ? 1 : 0) - hashCode = MurmurHash.update(hashCode, lexerActionExecutor) - return MurmurHash.finish(hashCode, 6) - + public override func hash(into hasher: inout Hasher) { + hasher.combine(state.stateNumber) + hasher.combine(alt) + hasher.combine(context) + hasher.combine(semanticContext) + hasher.combine(passedThroughNonGreedyDecision) + hasher.combine(lexerActionExecutor) } - } //useless diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift index 99d604da5..5920e405b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift @@ -56,7 +56,7 @@ public class LexerAction: Hashable { fatalError(#function + " must be overridden") } - public var hashValue: Int { + public func hash(into hasher: inout Hasher) { fatalError(#function + " must be overridden") } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift b/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift index e33e92e9b..365f41a58 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift @@ -176,11 +176,9 @@ public class LexerActionExecutor: Hashable { } - public var hashValue: Int { - return self.hashCode + public func hash(into hasher: inout Hasher) { + hasher.combine(hashCode) } - - } public func ==(lhs: LexerActionExecutor, rhs: LexerActionExecutor) -> Bool { diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift index 4d099f28c..a8fd17db6 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift @@ -63,12 +63,9 @@ public final class LexerChannelAction: LexerAction, CustomStringConvertible { } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - hash = MurmurHash.update(hash, channel) - return MurmurHash.finish(hash, 2) + public override func hash(into hasher: inout Hasher) { + hasher.combine(getActionType()) + hasher.combine(channel) } public var description: String { diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift index 3fe954236..fc6741ec2 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift @@ -92,24 +92,17 @@ public final class LexerCustomAction: LexerAction { try lexer.action(nil, ruleIndex, actionIndex) } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - hash = MurmurHash.update(hash, ruleIndex) - hash = MurmurHash.update(hash, actionIndex) - return MurmurHash.finish(hash, 3) + public override func hash(into hasher: inout Hasher) { + hasher.combine(ruleIndex) + hasher.combine(actionIndex) } - } public func ==(lhs: LexerCustomAction, rhs: LexerCustomAction) -> Bool { - if lhs === rhs { return true } - return lhs.ruleIndex == rhs.ruleIndex && lhs.actionIndex == rhs.actionIndex } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift index 501b2e637..b9db97289 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift @@ -96,24 +96,17 @@ public final class LexerIndexedCustomAction: LexerAction { } - public override var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, offset) - hash = MurmurHash.update(hash, action) - return MurmurHash.finish(hash, 2) + public override func hash(into hasher: inout Hasher) { + hasher.combine(offset) + hasher.combine(action) } - - } public func ==(lhs: LexerIndexedCustomAction, rhs: LexerIndexedCustomAction) -> Bool { - if lhs === rhs { return true } - return lhs.offset == rhs.offset && lhs.action == rhs.action - } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift index bdcaf6fdc..fca51619a 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift @@ -62,25 +62,20 @@ public final class LexerModeAction: LexerAction, CustomStringConvertible { public func execute(_ lexer: Lexer) { lexer.mode(mode) } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - hash = MurmurHash.update(hash, mode) - return MurmurHash.finish(hash, 2) + + public override func hash(into hasher: inout Hasher) { + hasher.combine(mode) } + public var description: String { return "mode(\(mode))" } } public func ==(lhs: LexerModeAction, rhs: LexerModeAction) -> Bool { - if lhs === rhs { return true } - return lhs.mode == rhs.mode - } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift index bb9f197f3..c84f54ffd 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift @@ -56,23 +56,15 @@ public final class LexerMoreAction: LexerAction, CustomStringConvertible { } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - return MurmurHash.finish(hash, 1) - + public override func hash(into hasher: inout Hasher) { + hasher.combine(ObjectIdentifier(self)) } - public var description: String { return "more" } } public func ==(lhs: LexerMoreAction, rhs: LexerMoreAction) -> Bool { - return lhs === rhs - - } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift index f35e78304..68fbe3fa6 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift @@ -57,21 +57,15 @@ public final class LexerPopModeAction: LexerAction, CustomStringConvertible { } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - return MurmurHash.finish(hash, 1) - + public override func hash(into hasher: inout Hasher) { + hasher.combine(ObjectIdentifier(self)) } + public var description: String { return "popMode" } } public func ==(lhs: LexerPopModeAction, rhs: LexerPopModeAction) -> Bool { - return lhs === rhs - - } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift index fb432c497..ffac047aa 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift @@ -63,15 +63,10 @@ public final class LexerPushModeAction: LexerAction, CustomStringConvertible { lexer.pushMode(mode) } - - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - hash = MurmurHash.update(hash, mode) - return MurmurHash.finish(hash, 2) - + public override func hash(into hasher: inout Hasher) { + hasher.combine(mode) } + public var description: String { return "pushMode(\(mode))" } @@ -79,10 +74,8 @@ public final class LexerPushModeAction: LexerAction, CustomStringConvertible { public func ==(lhs: LexerPushModeAction, rhs: LexerPushModeAction) -> Bool { - if lhs === rhs { return true } - return lhs.mode == rhs.mode } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift index bbdd06d2f..db8b1d8e7 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift @@ -56,19 +56,15 @@ public final class LexerSkipAction: LexerAction, CustomStringConvertible { } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - return MurmurHash.finish(hash, 1) + public override func hash(into hasher: inout Hasher) { + hasher.combine(ObjectIdentifier(self)) } + public var description: String { return "skip" } - } public func ==(lhs: LexerSkipAction, rhs: LexerSkipAction) -> Bool { - return lhs === rhs } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift index c08649ea3..8c7157be2 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift @@ -62,24 +62,18 @@ public class LexerTypeAction: LexerAction, CustomStringConvertible { } - override - public var hashValue: Int { - var hash = MurmurHash.initialize() - hash = MurmurHash.update(hash, getActionType().rawValue) - hash = MurmurHash.update(hash, type) - return MurmurHash.finish(hash, 2) + public override func hash(into hasher: inout Hasher) { + hasher.combine(type) } + public var description: String { return "type(\(type))" } - } public func ==(lhs: LexerTypeAction, rhs: LexerTypeAction) -> Bool { - if lhs === rhs { return true } - return lhs.type == rhs.type } diff --git a/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift index aef419b1d..d3a04a1cc 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift @@ -20,17 +20,12 @@ public class LookupATNConfig: Hashable { // dup config = old } - public var hashValue: Int { - - var hashCode: Int = 7 - hashCode = 31 * hashCode + config.state.stateNumber - hashCode = 31 * hashCode + config.alt - hashCode = 31 * hashCode + config.semanticContext.hashValue - return hashCode + public func hash(into hasher: inout Hasher) { + hasher.combine(config.state.stateNumber) + hasher.combine(config.alt) + hasher.combine(config.semanticContext) } - - } public func ==(lhs: LookupATNConfig, rhs: LookupATNConfig) -> Bool { diff --git a/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift b/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift index 244226a5c..2eb404592 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift @@ -40,7 +40,7 @@ public class ParseInfo { /// full-context predictions during parsing. /// public func getLLDecisions() -> Array { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var LL: Array = Array() let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var t: Int64 = 0 let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 let length = decisions.count for i in 0.. Int64 { - var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() + let decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 let length = decisions.count for i in 0.. DFAState? { - var edges = previousD.edges + let edges = previousD.edges if edges == nil || (t + 1) < 0 || (t + 1) >= (edges!.count) { return nil } diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift index 376088905..0e7026df4 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift @@ -105,8 +105,8 @@ public class PredictionContext: Hashable, CustomStringConvertible { return getReturnState(size() - 1) == PredictionContext.EMPTY_RETURN_STATE } - public final var hashValue: Int { - return cachedHashCode + public func hash(into hasher: inout Hasher) { + hasher.combine(cachedHashCode) } static func calculateEmptyHashCode() -> Int { @@ -668,7 +668,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { } public func toString(_ recog: Recognizer) -> String { - return NSStringFromClass(PredictionContext.self) + return String(describing: PredictionContext.self) // return toString(recog, ParserRuleContext.EMPTY); } diff --git a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift index 118f549f5..dffe24a1f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift @@ -61,7 +61,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { return self } - public var hashValue: Int { + public func hash(into hasher: inout Hasher) { fatalError(#function + " must be overridden") } @@ -94,16 +94,12 @@ public class SemanticContext: Hashable, CustomStringConvertible { return try parser.sempred(localctx, ruleIndex, predIndex) } - override - public var hashValue: Int { - var hashCode = MurmurHash.initialize() - hashCode = MurmurHash.update(hashCode, ruleIndex) - hashCode = MurmurHash.update(hashCode, predIndex) - hashCode = MurmurHash.update(hashCode, isCtxDependent ? 1 : 0) - return MurmurHash.finish(hashCode, 3) + public override func hash(into hasher: inout Hasher) { + hasher.combine(ruleIndex) + hasher.combine(predIndex) + hasher.combine(isCtxDependent) } - override public var description: String { return "{\(ruleIndex):\(predIndex)}?" @@ -138,11 +134,8 @@ public class SemanticContext: Hashable, CustomStringConvertible { } - override - public var hashValue: Int { - var hashCode: Int = 1 - hashCode = 31 * hashCode + precedence - return hashCode + public override func hash(into hasher: inout Hasher) { + hasher.combine(precedence) } override @@ -214,12 +207,8 @@ public class SemanticContext: Hashable, CustomStringConvertible { } - override - public var hashValue: Int { - //MurmurHash.hashCode(opnds, AND.class.hashCode()); - let seed = 1554547125 - //NSStringFromClass(AND.self).hashValue - return MurmurHash.hashCode(opnds, seed) + public override func hash(into hasher: inout Hasher) { + hasher.combine(opnds) } /// @@ -323,11 +312,8 @@ public class SemanticContext: Hashable, CustomStringConvertible { return opnds } - - override - public var hashValue: Int { - - return MurmurHash.hashCode(opnds, NSStringFromClass(OR.self).hashValue) + public override func hash(into hasher: inout Hasher) { + hasher.combine(opnds) } /// diff --git a/runtime/Swift/Sources/Antlr4/atn/Transition.swift b/runtime/Swift/Sources/Antlr4/atn/Transition.swift index 5af20e22a..af31c134e 100644 --- a/runtime/Swift/Sources/Antlr4/atn/Transition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/Transition.swift @@ -54,16 +54,16 @@ public class Transition { public static let serializationTypes: Dictionary = [ - NSStringFromClass(EpsilonTransition.self): EPSILON, - NSStringFromClass(RangeTransition.self): RANGE, - NSStringFromClass(RuleTransition.self): RULE, - NSStringFromClass(PredicateTransition.self): PREDICATE, - NSStringFromClass(AtomTransition.self): ATOM, - NSStringFromClass(ActionTransition.self): ACTION, - NSStringFromClass(SetTransition.self): SET, - NSStringFromClass(NotSetTransition.self): NOT_SET, - NSStringFromClass(WildcardTransition.self): WILDCARD, - NSStringFromClass(PrecedencePredicateTransition.self): PRECEDENCE, + String(describing: EpsilonTransition.self): EPSILON, + String(describing: RangeTransition.self): RANGE, + String(describing: RuleTransition.self): RULE, + String(describing: PredicateTransition.self): PREDICATE, + String(describing: AtomTransition.self): ATOM, + String(describing: ActionTransition.self): ACTION, + String(describing: SetTransition.self): SET, + String(describing: NotSetTransition.self): NOT_SET, + String(describing: WildcardTransition.self): WILDCARD, + String(describing: PrecedencePredicateTransition.self): PRECEDENCE, ] diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift index d44dd34ec..8a705872c 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift @@ -109,10 +109,8 @@ public final class DFAState: Hashable, CustomStringConvertible { } - public var hashValue: Int { - var hash = MurmurHash.initialize(7) - hash = MurmurHash.update(hash, configs.hashValue) - return MurmurHash.finish(hash, 1) + public func hash(into hasher: inout Hasher) { + hasher.combine(configs) } public var description: String { diff --git a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift index ef03b8aa7..5e611efc2 100644 --- a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift @@ -1053,7 +1053,7 @@ public class BitSet: Hashable, CustomStringConvertible { /// /// - returns: the hash code value for this bit set /// - public var hashValue: Int { + private var hashCode: Int { var h: Int64 = 1234 var i: Int = wordsInUse i -= 1 @@ -1065,6 +1065,10 @@ public class BitSet: Hashable, CustomStringConvertible { return Int(Int32((h >> 32) ^ h)) } + public func hash(into hasher: inout Hasher) { + hasher.combine(hashCode) + } + /// /// Returns the number of bits of space actually in use by this /// `BitSet` to represent bit values. diff --git a/runtime/Swift/Sources/Antlr4/misc/Interval.swift b/runtime/Swift/Sources/Antlr4/misc/Interval.swift index e3b4ad49f..f03ad9b08 100644 --- a/runtime/Swift/Sources/Antlr4/misc/Interval.swift +++ b/runtime/Swift/Sources/Antlr4/misc/Interval.swift @@ -62,13 +62,12 @@ public class Interval: Hashable { } - public var hashValue: Int { - var hash: Int = 23 - hash = hash * 31 + a - hash = hash * 31 + b - return hash + public func hash(into hasher: inout Hasher) { + hasher.combine(a) + hasher.combine(b) } - /// + + /// /// Does this start completely before other? Disjoint /// public func startsBeforeDisjoint(_ other: Interval) -> Bool { diff --git a/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift b/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift index 04c590db0..16bbb929a 100644 --- a/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift @@ -326,8 +326,8 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { return nil // nothing in common with null set } - var myIntervals = self.intervals - var theirIntervals = (other as! IntervalSet).intervals + let myIntervals = self.intervals + let theirIntervals = (other as! IntervalSet).intervals var intersection: IntervalSet? = nil let mySize = myIntervals.count let theirSize = theirIntervals.count @@ -470,25 +470,13 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { return intervals } - - public func hashCode() -> Int { - var hash = MurmurHash.initialize() - for I: Interval in intervals { - hash = MurmurHash.update(hash, I.a) - hash = MurmurHash.update(hash, I.b) + public func hash(into hasher: inout Hasher) { + for interval in intervals { + hasher.combine(interval.a) + hasher.combine(interval.b) } - - return MurmurHash.finish(hash, intervals.count * 2) } - public var hashValue: Int { - var hash = MurmurHash.initialize() - for I: Interval in intervals { - hash = MurmurHash.update(hash, I.a) - hash = MurmurHash.update(hash, I.b) - } - return MurmurHash.finish(hash, intervals.count * 2) - } /// /// Are two IntervalSets equal? Because all intervals are sorted /// and disjoint, equals is a simple linear walk over both lists diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTreeProperty.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTreeProperty.swift new file mode 100644 index 000000000..49c98ddae --- /dev/null +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTreeProperty.swift @@ -0,0 +1,15 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +import Foundation + +public class ParseTreeProperty { + var annotations = Dictionary() + + public init() {} + + open func get(_ node: ParseTree) -> V? { return annotations[ObjectIdentifier(node)] } + open func put(_ node: ParseTree, _ value: V) { annotations[ObjectIdentifier(node)] = value } + open func removeFrom(_ node: ParseTree) { annotations.removeValue(forKey: ObjectIdentifier(node)) } +} diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift index 2e4a84e5c..6a09b61b5 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift @@ -10,6 +10,14 @@ public class ParseTreeWalker { public init() { } + /** + * Performs a walk on the given parse tree starting at the root and going down recursively + * with depth-first search. On each node, ParseTreeWalker.enterRule is called before + * recursively walking down into child nodes, then + * ParseTreeWalker.exitRule is called after the recursive call to wind up. + * - Parameter listener: The listener used by the walker to process grammar rules + * - Parameter t: The parse tree to be walked on + */ public func walk(_ listener: ParseTreeListener, _ t: ParseTree) throws { if let errNode = t as? ErrorNode { listener.visitErrorNode(errNode) @@ -30,18 +38,24 @@ public class ParseTreeWalker { } } - /// - /// The discovery of a rule node, involves sending two events: the generic - /// _org.antlr.v4.runtime.tree.ParseTreeListener#enterEveryRule_ and a - /// _org.antlr.v4.runtime.RuleContext_-specific event. First we trigger the generic and then - /// the rule specific. We to them in reverse order upon finishing the node. - /// + /** + * Enters a grammar rule by first triggering the generic event ParseTreeListener.enterEveryRule + * then by triggering the event specific to the given parse tree node + * - Parameter listener: The listener responding to the trigger events + * - Parameter r: The grammar rule containing the rule context + */ internal func enterRule(_ listener: ParseTreeListener, _ r: RuleNode) throws { let ctx = r.getRuleContext() as! ParserRuleContext try listener.enterEveryRule(ctx) ctx.enterRule(listener) } + /** + * Exits a grammar rule by first triggering the event specific to the given parse tree node + * then by triggering the generic event ParseTreeListener.exitEveryRule + * - Parameter listener: The listener responding to the trigger events + * - Parameter r: The grammar rule containing the rule context + */ internal func exitRule(_ listener: ParseTreeListener, _ r: RuleNode) throws { let ctx = r.getRuleContext() as! ParserRuleContext ctx.exitRule(listener) diff --git a/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift b/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift index c3845c922..97ea0880a 100644 --- a/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift +++ b/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift @@ -15,6 +15,7 @@ class RuntimeMetaDataTests: XCTestCase { doGetMajorMinorVersionTest("4.7", "4.7") doGetMajorMinorVersionTest("4.7.1", "4.7") doGetMajorMinorVersionTest("4.7.2", "4.7") + doGetMajorMinorVersionTest("4.8", "4.8") doGetMajorMinorVersionTest("4-SNAPSHOT", "4") doGetMajorMinorVersionTest("4.-SNAPSHOT", "4.") doGetMajorMinorVersionTest("4.7-SNAPSHOT", "4.7") diff --git a/scripts/github_release_notes.py b/scripts/github_release_notes.py index 10b8cca8c..0d5a0a492 100644 --- a/scripts/github_release_notes.py +++ b/scripts/github_release_notes.py @@ -5,7 +5,7 @@ from github import Github from collections import Counter import sys -TARGETS = ['csharp', 'cpp', 'go', 'java', 'javascript', 'python2', 'python3', 'swift'] +TARGETS = ['csharp', 'cpp', 'go', 'java', 'javascript', 'python2', 'python3', 'swift', 'php'] TOKEN=sys.argv[1] MILESTONE=sys.argv[2] diff --git a/tool-testsuite/pom.xml b/tool-testsuite/pom.xml index 4ef8bb482..0b8bf4d66 100644 --- a/tool-testsuite/pom.xml +++ b/tool-testsuite/pom.xml @@ -10,7 +10,7 @@ org.antlr antlr4-master - 4.7.3-SNAPSHOT + 4.8-2-SNAPSHOT antlr4-tool-testsuite ANTLR 4 Tool Tests @@ -26,7 +26,7 @@ org.antlr ST4 - 4.1 + 4.3 test diff --git a/tool/pom.xml b/tool/pom.xml index f7ca644dc..11edf014d 100644 --- a/tool/pom.xml +++ b/tool/pom.xml @@ -9,7 +9,7 @@ org.antlr antlr4-master - 4.7.3-SNAPSHOT + 4.8-2-SNAPSHOT antlr4 ANTLR 4 Tool @@ -29,7 +29,7 @@ org.antlr ST4 - 4.1 + 4.3 org.abego.treelayout diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg index a665ba67b..13d497dcb 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg @@ -116,6 +116,7 @@ using ParserRuleContext = Antlr4.Runtime.ParserRuleContext; /// of the available methods. /// \
[System.CodeDom.Compiler.GeneratedCode("ANTLR", "")] +[System.Diagnostics.DebuggerNonUserCode] [System.CLSCompliant(false)] public partial class BaseListener : IListener { /// \The return type of the visit operation.\ [System.CodeDom.Compiler.GeneratedCode("ANTLR", "")] +[System.Diagnostics.DebuggerNonUserCode] [System.CLSCompliant(false)] public partial class BaseVisitor\ : AbstractParseTreeVisitor\, IVisitor\ { ContextTokenGetterDecl(t) ::= - "public ITerminalNode () { return GetToken(., 0); }" + "[System.Diagnostics.DebuggerNonUserCode] public ITerminalNode () { return GetToken(., 0); }" ContextTokenListGetterDecl(t) ::= << -public () { return GetTokens(.); } +[System.Diagnostics.DebuggerNonUserCode] public () { return GetTokens(.); } >> ContextTokenListIndexedGetterDecl(t) ::= << -public ITerminalNode (int i) { +[System.Diagnostics.DebuggerNonUserCode] public ITerminalNode (int i) { return GetToken(., i); } >> ContextRuleGetterDecl(r) ::= << -public () { +[System.Diagnostics.DebuggerNonUserCode] public () { return GetRuleContext\<\>(0); } >> ContextRuleListGetterDecl(r) ::= << -public })> () { +[System.Diagnostics.DebuggerNonUserCode] public })> () { return GetRuleContexts\<\>(); } >> ContextRuleListIndexedGetterDecl(r) ::= << -public (int i) { +[System.Diagnostics.DebuggerNonUserCode] public (int i) { return GetRuleContext\<\>(i); } >> @@ -887,6 +889,7 @@ public partial class : Context { >> ListenerDispatchMethod(method) ::= << +[System.Diagnostics.DebuggerNonUserCode] public override void EnterExitRule(IParseTreeListener listener) { IListener typedListener = listener as IListener; if (typedListener != null) typedListener.EnterExit(this); @@ -894,6 +897,7 @@ public override void EnterExitRule(IParseTreeLi >> VisitorDispatchMethod(method) ::= << +[System.Diagnostics.DebuggerNonUserCode] public override TResult Accept\(IParseTreeVisitor\ visitor) { IVisitor\ typedVisitor = visitor as IVisitor\; if (typedVisitor != null) return typedVisitor.Visit(this); diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg index 4c367dda5..fb63f3d61 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg @@ -63,7 +63,7 @@ public: }; - (antlr4::CharStream *input); + explicit (antlr4::CharStream *input); ~(); @@ -192,23 +192,23 @@ atn::ATN ::_atn; std::vector\ ::_serializedATN; std::vector\ ::_ruleNames = { - "}; separator = ", ", wrap, anchor> + "}; separator = ", ", wrap, anchor> }; std::vector\ ::_channelNames = { - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", "}; separator = ", ", wrap, anchor> + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", "}; separator = ", ", wrap, anchor> }; std::vector\ ::_modeNames = { - "}; separator = ", ", wrap, anchor> + "}; separator = ", ", wrap, anchor> }; std::vector\ ::_literalNames = { - }; null = "\"\"", separator = ", ", wrap, anchor> + }; null = "\"\"", separator = ", ", wrap, anchor> }; std::vector\ ::_symbolicNames = { - }; null = "\"\"", separator = ", ", wrap, anchor> + }; null = "\"\"", separator = ", ", wrap, anchor> }; dfa::Vocabulary ::_vocabulary(_literalNames, _symbolicNames); @@ -290,7 +290,7 @@ public: }; - (antlr4::TokenStream *input); + explicit (antlr4::TokenStream *input); ~(); virtual std::string getGrammarFileName() const override; @@ -464,7 +464,11 @@ RuleFunction(currentRule, args, code, locals, ruleCtx, altLabelCtxs, namedAction +#if __cplusplus > 201703L + auto onExit = finally([=, this] { +#else auto onExit = finally([=] { +#endif exitRule(); }); @@ -517,7 +521,11 @@ LeftRecursiveRuleFunction(currentRule, args, code, locals, ruleCtx, altLabelCtxs +#if __cplusplus > 201703L + auto onExit = finally([=, this] { +#else auto onExit = finally([=] { +#endif unrollRecursionContexts(parentContext); }); @@ -539,7 +547,7 @@ LeftRecursiveRuleFunction(currentRule, args, code, locals, ruleCtx, altLabelCtxs StructDeclHeader(struct, ctorAttrs, attrs, getters, dispatchMethods, interfaces, extensionMembers) ::= << class : public antlr4::ParserRuleContext, { public: - ;}; separator="\n"> + ;}; separator = "\n"> (antlr4::ParserRuleContext *parent, size_t invokingState); (antlr4::ParserRuleContext *parent, size_t invokingState}>); @@ -593,7 +601,7 @@ class : public Context *ctx); - }; separator = "\n"> + ;}; separator = "\n"> }; separator = "\n"> }; @@ -710,6 +718,8 @@ switch (getInterpreter\()->adaptivePredict(_input, +default: + break; } >> @@ -724,6 +734,8 @@ switch (getInterpreter\()->adaptivePredict(_input, +default: + break; } >> @@ -961,19 +973,19 @@ AddToLabelList(a) ::= << TokenLabelType() ::= " *" -TokenDeclHeader(t) ::= "antlr4:: = nullptr;" +TokenDeclHeader(t) ::= "antlr4:: = nullptr" TokenDecl(t) ::= "" TokenTypeDeclHeader(t) ::= "" TokenTypeDecl(t) ::= "size_t = 0;" -TokenListDeclHeader(t) ::= "std::vector\ ;" +TokenListDeclHeader(t) ::= "std::vector\ " TokenListDecl(t) ::= "" -RuleContextDeclHeader(r) ::= ":: * = nullptr;" +RuleContextDeclHeader(r) ::= ":: * = nullptr" RuleContextDecl(r) ::= "" -RuleContextListDeclHeader(rdecl) ::= "std::vector\< *> ;" +RuleContextListDeclHeader(rdecl) ::= "std::vector\< *> " RuleContextListDecl(rdecl) ::= "" ContextTokenGetterDeclHeader(t) ::= "antlr4::tree::TerminalNode *();" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Dart/Dart.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Dart/Dart.stg new file mode 100644 index 000000000..39f453bec --- /dev/null +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Dart/Dart.stg @@ -0,0 +1,908 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012 Terence Parr + * Copyright (c) 2012 Sam Harwell + * Copyright (c) 2014 Tiago Mazzutti + * Copyright (c) 2017 Tobe Osakwe + * Copyright (c) 2020 Larry Li + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +dartTypeInitMap ::= [ + "int":"0", + "double":"0.0", + "bool":"false", + default:"null" // anything other than a primitive type is an object +] + +// args must be , + +ParserFile(file, parser, namedActions, contextSuperClass) ::= << + + +library ; + +import 'package:antlr4/antlr4.dart'; +import 'dart:io'; + + +part 'Listener.dart'; +part 'BaseListener.dart'; + + +part 'Visitor.dart'; +part 'BaseVisitor.dart'; + +part 'Lexer.dart'; + +import 'package:antlr4/antlr4.dart'; +import 'dart:io'; + + +import 'Listener.dart'; +import 'BaseListener.dart'; + + +import 'Visitor.dart'; +import 'BaseVisitor.dart'; + + + + + +>> + +ListenerFile(file, header, namedActions) ::= << + + +part of ; + +import 'package:antlr4/antlr4.dart'; + +import '.dart'; + +
+ +/// This abstract class defines a complete listener for a parse tree produced by +/// []. +abstract class Listener extends ParseTreeListener { + + /// Enter a parse tree produced by the [] + /// labeled alternative in [file.parserName>.]. + + /// Enter a parse tree produced by [.]. + + /// [ctx] the parse tree + void enter(Context ctx); + + /// Exit a parse tree produced by the [] + /// labeled alternative in [.]. + + /// Exit a parse tree produced by [.]. + + /// [ctx] the parse tree + void exit(Context ctx);}; separator="\n"> +} +>> + +BaseListenerFile(file, header, namedActions) ::= << + + +part of ; + +import 'package:antlr4/antlr4.dart'; + +import '.dart'; +import 'Listener.dart'; + + +
+ +/// This class provides an empty implementation of [Listener], +/// which can be extended to create a listener which only needs to handle +/// a subset of the available methods. +class BaseListener implements Listener { +(Context ctx) {\} + + /// The default implementation does nothing. + @override + void exit(Context ctx) {\}}; separator="\n"> + + /// The default implementation does nothing. + @override + void enterEveryRule(ParserRuleContext ctx) {} + + /// The default implementation does nothing. + @override + void exitEveryRule(ParserRuleContext ctx) {} + + /// The default implementation does nothing. + @override + void visitTerminal(TerminalNode node) {} + + /// The default implementation does nothing. + @override + void visitErrorNode(ErrorNode node) {} +} + +>> + +VisitorFile(file, header, namedActions) ::= << + + +part of ; + +import 'package:antlr4/antlr4.dart'; + +import '.dart'; + +
+ +/// This abstract class defines a complete generic visitor for a parse tree +/// produced by []. +/// +/// [T] is the eturn type of the visit operation. Use `void` for +/// operations with no return type. +abstract class Visitor\ extends ParseTreeVisitor\ { + +/// Visit a parse tree produced by the {@code \} +/// labeled alternative in {@link #\}. + +/// Visit a parse tree produced by [.]. + +/// [ctx] the parse tree. +/// Return the visitor result. +T visit(Context ctx);}; separator="\n"> +} +>> + +BaseVisitorFile(file, header, namedActions) ::= << + + +part of ; + +import 'package:antlr4/antlr4.dart'; + +import '.dart'; +import 'Visitor.dart'; + +
+ +/// This class provides an empty implementation of [Visitor], +/// which can be extended to create a visitor which only needs to handle +/// a subset of the available methods. +/// +/// [T] is the return type of the visit operation. Use `void` for +/// operations with no return type. +class BaseVisitor\ extends ParseTreeVisitor\ implements Visitor\ { + (Context ctx) => visitChildren(ctx);}; separator="\n"> +} +>> + +fileHeader(grammarFileName, ANTLRVersion) ::= << +// Generated from by ANTLR +// ignore_for_file: unused_import, unused_local_variable, prefer_single_quotes +>> + +Parser(parser, funcs, atn, sempredFuncs, superClass) ::= << + +>> + +Parser_(parser, funcs, atn, sempredFuncs, ctor, superClass) ::= << + +const int = }; separator=", ", wrap, anchor>; +class extends { + static final checkVersion = () => RuntimeMetaData.checkVersion('', RuntimeMetaData.VERSION); + static const int TOKEN_EOF = IntStream.EOF; + + static final List\ _decisionToDFA = List.generate( + _ATN.numberOfDecisions, (i) => DFA(_ATN.getDecisionState(i), i)); + static final PredictionContextCache _sharedContextCache = PredictionContextCache(); + + static const int = }; separator=", ", wrap, anchor>; + + + @override + final List\ ruleNames = [ + '}; separator=", ", wrap, anchor> + ]; + + + + @override + String get grammarFileName => ''; + + @override + String get serializedATN => _serializedATN; + + @override + ATN getATN() { + return _ATN; + } + + + + + + + @override + bool sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + : + return __sempred(_localctx, predIndex);}; separator="\n"> + } + return true; + } + + + + +} +}; separator="\n\n"> + +}; separator="\n\n">}> +>> + +vocabulary(literalNames, symbolicNames) ::= << +static final List\ _LITERAL_NAMES = [ + }; null="null", separator=", ", wrap, anchor> +]; +static final List\ _SYMBOLIC_NAMES = [ + }; null="null", separator=", ", wrap, anchor> +]; +static final Vocabulary VOCABULARY = VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + +@override +Vocabulary get vocabulary { + return VOCABULARY; +} +>> + +dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= << + +void action(RuleContext _localctx, int ruleIndex, int actionIndex) { + switch (ruleIndex) { + : + __action(_localctx, actionIndex); + break;}; separator="\n"> + } +} + + + +bool sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + : + return __sempred(_localctx, predIndex);}; separator="\n"> + } + return true; +} + + +>> + +parser_ctor(p) ::= << +(TokenStream input) : super(input) { + interpreter = ParserATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache); +} +>> + +/// This generates a private method since the actionIndex is generated, making an +/// overriding implementation impossible to maintain. +RuleActionFunction(r, actions) ::= << +void __action( _localctx, int actionIndex) { + switch (actionIndex) { + : break;}; separator="\n"> + } +} +>> + +/// This generates a private method since the predIndex is generated, making an +/// overriding implementation impossible to maintain. +RuleSempredFunction(r, actions) ::= << +bool __sempred( _localctx, int predIndex) { + switch (predIndex) { + : return ;}; separator="\n"> + } + return true; +} +>> + +RuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,namedActions,finallyAction,postamble,exceptions) ::= << + + }> () { + dynamic _localctx = (context, state}>); + enterRule(_localctx, , RULE_); + + + try { + + int _alt; + + + + + } on RecognitionException catch (re) { + _localctx.exception = re; + errorHandler.reportError(this, re); + errorHandler.recover(this, re); + } finally { + + exitRule(); + } + return _localctx; +} +>> + +LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs, + namedActions,finallyAction,postamble) ::= +<< + + ([int _p = 0]}>) { + final _parentctx = context; + final _parentState = state; + dynamic _localctx = (context, _parentState}>); + var _prevctx = _localctx; + var _startState = ; + enterRecursionRule(_localctx, , RULE_, _p); + + + try { + + int _alt; + + + + + } on RecognitionException catch (re) { + _localctx.exception = re; + errorHandler.reportError(this, re); + errorHandler.recover(this, re); + } finally { + + unrollRecursionContexts(_parentctx); + } + return _localctx; +} +>> + +CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= << +_localctx = Context(_localctx); +enterOuterAlt(_localctx, ); + +>> + +CodeBlockForAlt(currentAltCodeBlock, locals, preamble, ops) ::= << + + + +>> + +LL1AltBlock(choice, preamble, alts, error) ::= << +state = ; +errorHandler.sync(this); + = tokenStream.LT(1); + +switch (tokenStream.LA(1)) { + + + break;}; separator="\n"> +default: + +} +>> + +LL1OptionalBlock(choice, alts, error) ::= << +state = ; +errorHandler.sync(this); +switch (tokenStream.LA(1)) { + + + break;}; separator="\n"> +default: + break; +} +>> + +LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= << +state = ; +errorHandler.sync(this); + +if () { + +} +) ) !> +>> + +LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= << +state = ; +errorHandler.sync(this); + +while () { + + state = ; + errorHandler.sync(this); + +} +>> + +LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= << +state = ; +errorHandler.sync(this); + +do { + + state = ; + errorHandler.sync(this); + +} while (); +>> + +// LL(*) stuff + +AltBlock(choice, preamble, alts, error) ::= << +state = ; +errorHandler.sync(this); + = tokenStream.LT(1); + +switch (interpreter.adaptivePredict(tokenStream, , context)) { +: + + break;}; separator="\n"> +} +>> + +OptionalBlock(choice, alts, error) ::= << +state = ; +errorHandler.sync(this); +switch (interpreter.adaptivePredict(tokenStream, , context)) { ++1: + + break;}; separator="\n"> +} +>> + +StarBlock(choice, alts, sync, iteration) ::= << +state = ; +errorHandler.sync(this); +_alt = interpreter.adaptivePredict(tokenStream, , context); +while (_alt != && _alt != ATN.INVALID_ALT_NUMBER) { + if (_alt == 1 + 1) { + + + } + state = ; + errorHandler.sync(this); + _alt = interpreter.adaptivePredict(tokenStream, , context); +} +>> + +PlusBlock(choice, alts, error) ::= << +state = ; +errorHandler.sync(this); +_alt = 1+1; +do { + switch (_alt) { + + 1: + + break;}; separator="\n"> + default: + + } + state = ; + errorHandler.sync(this); + _alt = interpreter.adaptivePredict(tokenStream, , context); +} while (_alt != && _alt != ATN.INVALID_ALT_NUMBER); +>> + +Sync(s) ::= "sync();" + +ThrowNoViableAlt(t) ::= "throw NoViableAltException(this);" + +TestSetInline(s) ::= << +}; separator=" || "> +>> + +// Java language spec 15.19 - shift operators mask operands rather than overflow to 0... need range test +testShiftInRange(shiftAmount) ::= << +(() & ~0x3f) == 0 +>> + +// produces smaller bytecode only when bits.ttypes contains more than two items +bitsetBitfieldComparison(s, bits) ::= <% +(})> && ((BigInt.one \<\< ) & (}, bits.shift)>)}; separator=" | ">)) != BigInt.zero) +%> + +isZero ::= [ +"0":true, +default:false +] + +offsetShift(shiftAmount, offset) ::= <% +( - ) +%> + +// produces more efficient bytecode when bits.ttypes contains at most two items +bitsetInlineComparison(s, bits) ::= <% + == TOKEN_}; separator=" || "> +%> + +cases(ttypes) ::= << +:}; separator="\n"> +>> + +InvokeRule(r, argExprsChunks) ::=<< +state = ; + = }>(,); +>> + +MatchToken(m) ::= << +state = ; + = }>match(TOKEN_); +>> + +MatchSet(m, expr, capture) ::= "" + +MatchNotSet(m, expr, capture) ::= "" + +CommonSetStuff(m, expr, capture, invert) ::= << +state = ; + = }>tokenStream.LT(1); + +if ( \<= 0 || !()) { + = }>errorHandler.recoverInline(this); +} else { + if ( tokenStream.LA(1)==IntStream.EOF ) matchedEOF = true; + errorHandler.reportMatch(this); + consume(); +} +>> + +Wildcard(w) ::= << +state = ; + = }>matchWildcard(); +>> + +// ACTION STUFF + +Action(a, foo, chunks) ::= "" + +ArgAction(a, chunks) ::= "" + +SemPred(p, chunks, failChunks) ::= << +state = ; +if (!()) { + throw FailedPredicateException(this, , , ); +} +>> + +ExceptionClause(e, catchArg, catchAction) ::= << +catch () { + +} +>> + +// lexer actions are not associated with model objects + +LexerSkipCommand() ::= "skip();" +LexerMoreCommand() ::= "more();" +LexerPopModeCommand() ::= "popMode();" + +LexerTypeCommand(arg, grammar) ::= "type = ;" +LexerChannelCommand(arg, grammar) ::= "channel = ;" +LexerModeCommand(arg, grammar) ::= "mode_ = ;" +LexerPushModeCommand(arg, grammar) ::= "pushMode();" + +ActionText(t) ::= "" +ActionTemplate(t) ::= "" +ArgRef(a) ::= "_localctx." +LocalRef(a) ::= "_localctx." +RetValueRef(a) ::= "_localctx." +QRetValueRef(a) ::= ".." +/** How to translate $tokenLabel */ +TokenRef(t) ::= "." +LabelRef(t) ::= "." +ListLabelRef(t) ::= "." +SetAttr(s,rhsChunks) ::= ". = ;" + +TokenLabelType() ::= "" +InputSymbolType() ::= "" + +TokenPropertyRef_text(t) ::= ".?.text" +TokenPropertyRef_type(t) ::= ". != null ? ..type : 0" +TokenPropertyRef_line(t) ::= ". != null ? ..line : 0" +TokenPropertyRef_pos(t) ::= ". != null ? ..charPositionInLine : 0" +TokenPropertyRef_channel(t) ::= ". != null ? ..channel : 0" +TokenPropertyRef_index(t) ::= ". != null ? ..tokenIndex : 0" +TokenPropertyRef_int(t) ::= ". != null ? int.parse(..text) : 0" + +RulePropertyRef_start(r) ::= ".?.start" +RulePropertyRef_stop(r) ::= ".?.stop" +RulePropertyRef_text(r) ::= "(. != null ? tokenStream.getTextRange(..start, ..stop) : null)" +RulePropertyRef_ctx(r) ::= "." +RulePropertyRef_parser(r) ::= "this" + +ThisRulePropertyRef_start(r) ::= "_localctx.start" +ThisRulePropertyRef_stop(r) ::= "_localctx.stop" +ThisRulePropertyRef_text(r) ::= "tokenStream.getTextRange(_localctx.start, tokenStream.LT(-1))" +ThisRulePropertyRef_ctx(r) ::= "_localctx" +ThisRulePropertyRef_parser(r) ::= "this" + +NonLocalAttrRef(s) ::= "(getInvokingContext() as Context)." +SetNonLocalAttr(s, rhsChunks) ::= + "(getInvokingContext() as Context). = ;" + +AddToLabelList(a) ::= "..add();" + +TokenDecl(t) ::= " " +TokenTypeDecl(t) ::= "int ;" +TokenListDecl(t) ::= "List\ = List\()" +RuleContextDecl(r) ::= " " +RuleContextListDecl(rdecl) ::= "List\<> = List\<>()" + +ContextTokenGetterDecl(t) ::= << +TerminalNode () => getToken(.TOKEN_, 0); +>> +ContextTokenListGetterDecl(t) ::= << +List\ s() => getTokens(.TOKEN_); +>> +ContextTokenListIndexedGetterDecl(t) ::= << +TerminalNode (int i) => getToken(.TOKEN_, i); +>> +ContextRuleGetterDecl(r) ::= << + () => getRuleContext\<\>(0); +>> +ContextRuleListGetterDecl(r) ::= << +List\<\> s() => getRuleContexts\<\>(); +>> +ContextRuleListIndexedGetterDecl(r) ::= << + (int i) => getRuleContext\<\>(i); +>> + +LexerRuleContext() ::= "RuleContext" + +/// The rule context name is the rule followed by a suffix; e.g., +/// r becomes rContext. +RuleContextNameSuffix() ::= "Context" + +ImplicitTokenLabel(tokenName) ::= "_" +ImplicitRuleLabel(ruleName) ::= "_" +ImplicitSetLabel(id) ::= "_tset" +ListLabelName(label) ::= "

." + +PositionAdjustingLexerDef() ::= << +class PositionAdjustingLexerATNSimulator extends LexerATNSimulator { + PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn, + List\ decisionToDFA, PredictionContextCache sharedContextCache) + : super(atn, decisionToDFA, sharedContextCache, recog: recog); + + void resetAcceptPosition(CharStream input, int index, int line, + int charPositionInLine) { + input.seek(index); + this.line = line; + this.charPositionInLine = charPositionInLine; + consume(input); + } +} +>> + +PositionAdjustingLexer() ::= << +@override +Token nextToken() { + if (!(super.interpreter is PositionAdjustingLexerATNSimulator)) { + interpreter = new PositionAdjustingLexerATNSimulator( + this, _ATN, _decisionToDFA, _sharedContextCache); + } + + return super.nextToken(); +} + +@override +Token emit() { + switch (type) { + case TOKEN_TOKENS: + handleAcceptPositionForKeyword("tokens"); + break; + + case TOKEN_LABEL: + handleAcceptPositionForIdentifier(); + break; + + default: + break; + } + + return super.emit(); +} + +bool handleAcceptPositionForIdentifier() { + String tokenText = text; + int identifierLength = 0; + while (identifierLength \< tokenText.length && + isIdentifierChar(tokenText[identifierLength])) { + identifierLength++; + } + + if (inputStream.index > tokenStartCharIndex + identifierLength) { + int offset = identifierLength - 1; + interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset, + tokenStartLine, tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +bool handleAcceptPositionForKeyword(String keyword) { + if (inputStream.index > tokenStartCharIndex + keyword.length) { + int offset = keyword.length - 1; + interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset, + tokenStartLine, tokenStartCharPositionInLine + offset); + return true; + } + + return false; +} + +@override +PositionAdjustingLexerATNSimulator get interpreter { + return super.interpreter as PositionAdjustingLexerATNSimulator; +} + +static bool isIdentifierChar(String c) { + return isLetterOrDigit(c) || c == '_'; +} + +static const ZERO = 48; +static const LOWER_A = 97; +static const LOWER_Z = 122; +static const UPPER_A = 65; +static const UPPER_Z = 90; + +static bool isLetterOrDigit(String char) => isLetter(char) || isDigit(char); + +// Note: this is intentially ASCII only +static bool isLetter(String char) { + if (char == null) return false; + var cc = char.codeUnitAt(0); + return cc >= LOWER_A && cc \<= LOWER_Z || cc >= UPPER_A && cc \<= UPPER_Z; +} + +static bool isDigit(String char) { + if (char == null) return false; + var cc = char.codeUnitAt(0); + return cc >= ZERO && cc \< ZERO + 10; +} +>> + +BasicListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void visitTerminal(TerminalNode node) { + print(node.symbol.text); + } +} +} +>> + +WalkListener(s) ::= << +ParseTreeWalker walker = new ParseTreeWalker(); +walker.walk(new LeafListener(), ); +>> + +TreeNodeWithAltNumField(X) ::= << +@parser::definitions { +class MyRuleNode extends ParserRuleContext { + int altNum; + + MyRuleNode(ParserRuleContext parent, int invokingStateNumber) + : super(parent, invokingStateNumber); + + @override int get altNumber { + return altNum; + } + + @override void set altNumber(int altNum) { + this.altNum = altNum; + } +} +} +>> + +TokenGetterListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitA(AContext ctx) { + if (ctx.childCount==2) + stdout.write("${ctx.INT(0).symbol.text} ${ctx.INT(1).symbol.text} ${ctx.INTs()}"); + else + print(ctx.ID().symbol); + } +} +} +>> + +RuleGetterListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitA(AContext ctx) { + if (ctx.childCount==2) { + stdout.write("${ctx.b(0).start.text} ${ctx.b(1).start.text} ${ctx.bs()[0].start.text}"); + } else + print(ctx.b(0).start.text); + } +} +} +>> + + +LRListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitE(EContext ctx) { + if (ctx.childCount==3) { + stdout.write("${ctx.e(0).start.text} ${ctx.e(1).start.text} ${ctx.es()[0].start.text}\n"); + } else + print(ctx.INT().symbol.text); + } +} +} +>> + +LRWithLabelsListener(X) ::= << +@parser::definitions { +class LeafListener extends TBaseListener { + void exitCall(CallContext ctx) { + stdout.write("${ctx.e().start.text} ${ctx.eList()}"); + } + void exitInt(IntContext ctx) { + print(ctx.INT().symbol.text); + } +} +} +>> + +DeclareContextListGettersFunction() ::= << +void foo() { + SContext s = null; + List\ a = s.as(); + List\ b = s.bs(); +} +>> + +Declare_foo() ::= << + void foo() {print("foo");} +>> + +Invoke_foo() ::= "foo();" + +Declare_pred() ::= <> + +Invoke_pred(v) ::= <)>> + +ParserTokenType(t) ::= "Parser." +ContextRuleFunction(ctx, rule) ::= "." +StringType() ::= "String" +ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg index a3d4cb026..cf2802edc 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "var = ;" - AssertIsList(v) ::= <> AssignLocal(s,v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%> AddMember(n,v) ::= <%this. += ;%> -PlusMember(v,n) ::= <% + this.%> - MemberEquals(n,v) ::= <%this. === %> ModMemberEquals(n,m,v) ::= <%this. % === %> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg index c8a6c1d57..3882eb059 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "var = ;" - AssertIsList(v) ::= <> AssignLocal(s,v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%> AddMember(n,v) ::= <%this. += ;%> -PlusMember(v,n) ::= <% + this.%> - MemberEquals(n,v) ::= <%this. === %> ModMemberEquals(n,m,v) ::= <%this. % === %> @@ -80,11 +76,7 @@ LANotEquals(i, v) ::= <%this._input.LA()!=%> TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> +ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg index b2909f4e1..eb8def195 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -DeclareLocal(s, v) ::= "var = " - AssertIsList(v) ::= "" AssignLocal(s, v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n, v) ::= <% = ;%> AddMember(n, v) ::= <% += ;%> -PlusMember(v, n) ::= <% + fmt.Sprint()%> - MemberEquals(n, v) ::= <% == %> ModMemberEquals(n, m, v) ::= <% % == %> @@ -107,6 +103,8 @@ PositionAdjustingLexerDef() ::= "" PositionAdjustingLexer() ::= << func (p *PositionAdjustingLexer) NextToken() antlr.Token { if _, ok := p.Interpreter.(*PositionAdjustingLexerATNSimulator); !ok { + lexerDeserializer := antlr.NewATNDeserializer(nil) + lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) p.Interpreter = NewPositionAdjustingLexerATNSimulator(p, lexerAtn, p.Interpreter.DecisionToDFA(), p.Interpreter.SharedContextCache()) p.Virt = p } diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg index 11f159743..16607933f 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "Object = ;" - AssertIsList(v) ::= "List\ __ttt__ = ;" // just use static type system AssignLocal(s,v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%> AddMember(n,v) ::= <%this. += ;%> -PlusMember(v,n) ::= <% + this.%> - MemberEquals(n,v) ::= <%this. == %> ModMemberEquals(n,m,v) ::= <%this. % == %> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg index 81573d6c0..709f13e39 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Node.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "var = ;" - AssertIsList(v) ::= <> AssignLocal(s,v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%> AddMember(n,v) ::= <%this. += ;%> -PlusMember(v,n) ::= <% + this.%> - MemberEquals(n,v) ::= <%this. === %> ModMemberEquals(n,m,v) ::= <%this. % === %> @@ -78,11 +74,7 @@ LANotEquals(i, v) ::= <%this._input.LA()!=%> TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> +ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" @@ -192,18 +184,21 @@ walker.walk(new this.LeafListener(), ); TreeNodeWithAltNumField(X) ::= << @parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); +class MyRuleNode extends antlr4.ParserRuleContext { + constructor(parent, invokingState) { + super(parent, invokingState); + this.altNum = 0; + } - this.altNum = 0; - return this; + getAltNumber() { + return this.altNum; + } + + setAltNumber(altNumber){ + this.altNum = altNumber; + } }; -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -MyRuleNode.prototype.getAltNumber = function() { return this.altNum; } -MyRuleNode.prototype.setAltNumber = function(altNumber) { this.altNum = altNumber; } - } >> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg index 3102f4120..2292cafe7 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -DeclareLocal(s,v) ::= " = " - AssertIsList(v) ::= "assert isinstance(v, (list, tuple))" AssignLocal(s,v) ::= " = " @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%self. = %> AddMember(n,v) ::= <%self. += %> -PlusMember(v,n) ::= <% + str(self.)%> - MemberEquals(n,v) ::= <%self. == %> ModMemberEquals(n,m,v) ::= <%self. % == %> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg index 858b76113..65dcdcd83 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -DeclareLocal(s,v) ::= " = " - AssertIsList(v) ::= "assert isinstance(v, (list, tuple))" AssignLocal(s,v) ::= " = " @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%self. = %> AddMember(n,v) ::= <%self. += %> -PlusMember(v,n) ::= <% + str(self.)%> - MemberEquals(n,v) ::= <%self. == %> ModMemberEquals(n,m,v) ::= <%self. % == %> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg index 2a09970c2..a33f612d3 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "var = ;" - AssertIsList(v) ::= <> AssignLocal(s,v) ::= " = ;" @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%this. = ;%> AddMember(n,v) ::= <%this. += ;%> -PlusMember(v,n) ::= <% + this.%> - MemberEquals(n,v) ::= <%this. === %> ModMemberEquals(n,m,v) ::= <%this. % === %> @@ -78,11 +74,7 @@ LANotEquals(i, v) ::= <%this._input.LA()!=%> TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> +ImportListener(X) ::= "" GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg index d45168517..c2c12b63c 100755 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg @@ -18,8 +18,6 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -DeclareLocal(s,v) ::= "var = " - AssertIsList(v) ::= "var __ttt__ = ;" // just use static type system AssignLocal(s,v) ::= " = " @@ -40,8 +38,6 @@ SetMember(n,v) ::= <%self. = %> AddMember(n,v) ::= <%self. += %> -PlusMember(v,n) ::= <% + self.%> - MemberEquals(n,v) ::= <%self. == %> ModMemberEquals(n,m,v) ::= <%self. % == %> @@ -133,7 +129,7 @@ open func emit() -> Token { private func handleAcceptPositionForIdentifier() -> Bool { let tokenText = getText() var identifierLength = 0 - while ((identifierLength \< tokenText.characters.count) && isIdentifierChar(tokenText[tokenText.characters.index(tokenText.startIndex, offsetBy: identifierLength)])) { + while ((identifierLength \< tokenText.count) && isIdentifierChar(tokenText[tokenText.index(tokenText.startIndex, offsetBy: identifierLength)])) { identifierLength += 1 } @@ -147,8 +143,8 @@ private func handleAcceptPositionForIdentifier() -> Bool { } private func handleAcceptPositionForKeyword(_ keyword:String) -> Bool { - if getInputStream()!.index() > _tokenStartCharIndex + keyword.characters.count { - let offset = keyword.characters.count - 1 + if getInputStream()!.index() > _tokenStartCharIndex + keyword.count { + let offset = keyword.count - 1 (getInterpreter() as! PositionAdjustingLexerATNSimulator).resetAcceptPosition(getInputStream()!, _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset) return true } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java index f7874d671..501688388 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java @@ -46,15 +46,14 @@ import static org.junit.Assume.assumeFalse; public abstract class BaseRuntimeTest { public final static String[] Targets = { "Cpp", - "Java", - "Go", "CSharp", - "Python2", "Python3", + "Dart", + "Go", + "Java", + "Node", "PHP", - "Node", "Safari", "Firefox", "Explorer", "Chrome" - }; - public final static String[] JavaScriptTargets = { - "Node", "Safari", "Firefox", "Explorer", "Chrome" + "Python2", "Python3", + "Swift" }; static { @@ -97,10 +96,17 @@ public abstract class BaseRuntimeTest { public void setUp() throws Exception { // From http://junit.sourceforge.net/javadoc/org/junit/Assume.html // "The default JUnit runner treats tests with failing assumptions as ignored" - assumeFalse(descriptor.ignore(descriptor.getTarget())); + assumeFalse(checkIgnored()); delegate.testSetUp(); } + public boolean checkIgnored() { + boolean ignored = !TestContext.isSupportedTarget(descriptor.getTarget()) || descriptor.ignore(descriptor.getTarget()); + if(ignored) + System.out.println("Ignore " + descriptor); + return ignored; + } + @Rule public final TestRule testWatcher = new TestWatcher() { @Override @@ -114,7 +120,7 @@ public abstract class BaseRuntimeTest { public void testOne() throws Exception { // System.out.println(delegate.getTmpDir()); if ( descriptor.ignore(descriptor.getTarget()) ) { - System.out.printf("Ignore "+descriptor); + System.out.println("Ignore " + descriptor); return; } @@ -275,6 +281,8 @@ public abstract class BaseRuntimeTest { // ---- support ---- public static RuntimeTestDescriptor[] getRuntimeTestDescriptors(Class clazz, String targetName) { + if(!TestContext.isSupportedTarget(targetName)) + return new RuntimeTestDescriptor[0]; Class[] nestedClasses = clazz.getClasses(); List descriptors = new ArrayList(); for (Class nestedClass : nestedClasses) { @@ -282,8 +290,10 @@ public abstract class BaseRuntimeTest { if ( RuntimeTestDescriptor.class.isAssignableFrom(nestedClass) && !Modifier.isAbstract(modifiers) ) { try { RuntimeTestDescriptor d = (RuntimeTestDescriptor) nestedClass.newInstance(); - d.setTarget(targetName); - descriptors.add(d); + if(!d.ignore(targetName)) { + d.setTarget(targetName); + descriptors.add(d); + } } catch (Exception e) { e.printStackTrace(System.err); } @@ -302,6 +312,16 @@ public abstract class BaseRuntimeTest { } } + public static String readFile(String dir, String fileName) { + try { + return String.copyValueOf(Utils.readFile(dir+"/"+fileName, "UTF-8")); + } + catch (IOException ioe) { + System.err.println("can't read file"); + ioe.printStackTrace(System.err); + } + return null; + } protected static void assertCorrectOutput(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate, String actualOutput) { String actualParseErrors = delegate.getParseErrors(); diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java new file mode 100644 index 000000000..bb1cb78f6 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java @@ -0,0 +1,19 @@ +package org.antlr.v4.test.runtime; + +public abstract class TestContext { + + public static boolean isTravisCI() { + return "true".equals(String.valueOf(System.getenv("TRAVIS")).toLowerCase()); + } + + public static boolean isAppVeyorCI() { + return "true".equals(String.valueOf(System.getenv("APPVEYOR")).toLowerCase()); + } + + public static boolean isSupportedTarget(String target) { + if(isAppVeyorCI()) + return !target.matches("Swift|Node"); + else + return true; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java new file mode 100644 index 000000000..88c69d755 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java @@ -0,0 +1,1185 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.dart; + +import org.antlr.v4.Tool; +import org.antlr.v4.analysis.AnalysisPipeline; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.misc.Utils; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.test.runtime.BaseRuntimeTest; +import org.antlr.v4.test.runtime.ErrorQueue; +import org.antlr.v4.test.runtime.RuntimeTestSupport; +import org.antlr.v4.test.runtime.StreamVacuum; +import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors; +import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors; +import org.antlr.v4.tool.*; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STGroupString; + +import java.io.*; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.*; + +import static junit.framework.TestCase.*; +import static org.antlr.v4.test.runtime.BaseRuntimeTest.readFile; +import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; +import static org.junit.Assert.assertArrayEquals; + + +public class BaseDartTest implements RuntimeTestSupport { + private static final List AOT_COMPILE_TESTS = Arrays.asList( + new PerformanceDescriptors.DropLoopEntryBranchInLRRule_4().input, + new LexerExecDescriptors.LargeLexer().input + ); + + public static final String newline = System.getProperty("line.separator"); + public static final String pathSep = System.getProperty("path.separator"); + + + /** + * When the {@code antlr.preserve-test-dir} runtime property is set to + * {@code true}, the temporary directories created by the test run will not + * be removed at the end of the test run, even for tests that completed + * successfully. + *