diff --git a/.editorconfig b/.editorconfig index 53b65e9f3..daa6da0fb 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,5 +1,8 @@ root = true +[*] +tab_width = 4 + [*.{java,stg}] charset = utf-8 insert_final_newline = true diff --git a/.travis.yml b/.travis.yml index d27ee56b3..d9969b6eb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,30 +2,26 @@ sudo: true language: java +cache: + directories: + - $HOME/.m2 + - $HOME/Library/Caches/Homebrew + +stages: + - smoke-test + - main-test + - extended-test + matrix: include: - os: linux compiler: clang - jdk: oraclejdk7 + jdk: openjdk7 env: - TARGET=cpp - CXX=g++-5 - - GROUP=ALL - addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-precise-3.7 - packages: - - g++-5 - - uuid-dev - - clang-3.7 - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=cpp - GROUP=LEXER + stage: main-test addons: apt: sources: @@ -35,106 +31,150 @@ matrix: - g++-5 - uuid-dev - clang-3.7 - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=cpp - - GROUP=PARSER - addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-precise-3.7 - packages: - - g++-5 - - uuid-dev - - clang-3.7 - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=cpp - - GROUP=RECURSION - addons: - apt: - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-precise-3.7 - packages: - - g++-5 - - uuid-dev - - clang-3.7 - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=swift - - GROUP=LEXER - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=swift - - GROUP=PARSER - - os: osx - compiler: clang - osx_image: xcode8.1 - env: - - TARGET=swift - - GROUP=RECURSION - os: linux + compiler: clang + jdk: openjdk7 + env: + - TARGET=cpp + - CXX=g++-5 + - GROUP=PARSER + stage: main-test + addons: + apt: + sources: + - ubuntu-toolchain-r-test + - llvm-toolchain-precise-3.7 + packages: + - g++-5 + - uuid-dev + - clang-3.7 + - os: linux + compiler: clang + jdk: openjdk7 + env: + - TARGET=cpp + - CXX=g++-5 + - GROUP=RECURSION + stage: main-test + addons: + apt: + sources: + - ubuntu-toolchain-r-test + - llvm-toolchain-precise-3.7 + packages: + - g++-5 + - uuid-dev + - clang-3.7 + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=cpp + - GROUP=LEXER + stage: extended-test + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=cpp + - GROUP=PARSER + stage: extended-test + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=cpp + - GROUP=RECURSION + stage: extended-test + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=swift + - GROUP=LEXER + stage: main-test + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=swift + - GROUP=PARSER + stage: main-test + - os: osx + compiler: clang + osx_image: xcode9 + env: + - TARGET=swift + - GROUP=RECURSION + stage: main-test + - os: linux + dist: trusty compiler: clang env: - TARGET=swift - GROUP=ALL + stage: extended-test - os: osx - osx_image: xcode8.2 + osx_image: xcode9 env: - TARGET=dotnet - GROUP=LEXER + stage: extended-test - os: osx - osx_image: xcode8.2 + osx_image: xcode9 env: - TARGET=dotnet - GROUP=PARSER + stage: extended-test - os: osx - osx_image: xcode8.2 + osx_image: xcode9 env: - TARGET=dotnet - GROUP=RECURSION + stage: extended-test - os: linux - jdk: oraclejdk7 + jdk: openjdk7 env: TARGET=java + stage: extended-test + - os: linux + jdk: openjdk8 + env: TARGET=java + stage: extended-test - os: linux jdk: oraclejdk8 env: TARGET=java + stage: smoke-test - os: linux - jdk: oraclejdk7 + jdk: openjdk7 env: TARGET=csharp + stage: extended-test - os: linux jdk: oraclejdk8 dist: trusty env: - TARGET=dotnet - GROUP=LEXER + stage: main-test - os: linux - jdk: oraclejdk8 + jdk: openjdk8 dist: trusty env: - TARGET=dotnet - GROUP=PARSER + stage: main-test - os: linux jdk: oraclejdk8 dist: trusty env: - TARGET=dotnet - GROUP=RECURSION + stage: main-test - os: linux - jdk: oraclejdk7 + jdk: openjdk7 env: TARGET=python2 + stage: extended-test - os: linux - jdk: oraclejdk7 + jdk: openjdk7 env: TARGET=python3 addons: apt: @@ -142,16 +182,20 @@ matrix: - deadsnakes # source required so it finds the package definition below packages: - python3.5 + stage: main-test - os: linux - jdk: oraclejdk7 + dist: trusty + jdk: openjdk8 env: TARGET=javascript + stage: main-test - os: linux - jdk: oraclejdk7 + dist: trusty + jdk: openjdk8 env: TARGET=go + stage: main-test before_install: - - ./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh + - f="./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh"; ! [ -x "$f" ] || "$f" script: - - cd runtime-testsuite; ../.travis/run-tests-$TARGET.sh - + - cd runtime-testsuite; travis_wait 40 ../.travis/run-tests-$TARGET.sh diff --git a/.travis/before-install-linux-swift.sh b/.travis/before-install-linux-swift.sh index 607f04449..1a2b2a555 100755 --- a/.travis/before-install-linux-swift.sh +++ b/.travis/before-install-linux-swift.sh @@ -1,14 +1,12 @@ set -euo pipefail -# make sure we use trusty repositories (travis by default uses precise) -curl https://repogen.simplylinux.ch/txt/trusty/sources_c4aa56bd26c0f54f391d8fae3e687ef5f6e97c26.txt | sudo tee /etc/apt/sources.list - # install dependencies # some packages below will be update, swift assumes newer versions # of, for example, sqlite3 and libicu, without the update some # tools will not work sudo apt-get update -sudo apt-get install clang libicu-dev libxml2 sqlite3 +sudo apt-get install clang-3.6 libxml2 +sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 100 # This would fix a know linker issue mentioned in: # https://bugs.swift.org/browse/SR-2299 diff --git a/.travis/before-install-osx-cpp.sh b/.travis/before-install-osx-cpp.sh deleted file mode 100755 index 48152d221..000000000 --- a/.travis/before-install-osx-cpp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -thisdir=$(dirname "$0") - -brew update -brew install cmake - -# Work around apparent rvm bug that is in Travis's Xcode image. -# https://github.com/direnv/direnv/issues/210 -# https://github.com/travis-ci/travis-ci/issues/6307 -shell_session_update() { :; } diff --git a/.travis/before-install-osx-dotnet.sh b/.travis/before-install-osx-dotnet.sh index 428016fa6..c784ba091 100755 --- a/.travis/before-install-osx-dotnet.sh +++ b/.travis/before-install-osx-dotnet.sh @@ -4,9 +4,7 @@ set -euo pipefail thisdir=$(dirname "$0") -# pre-requisites for dotnet core -brew update -brew install openssl +# OpenSSL setup for dotnet core mkdir -p /usr/local/lib ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/ ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/ @@ -19,9 +17,3 @@ sudo installer -pkg /tmp/dotnet-dev-osx-x64.1.0.4.pkg -target / # make the link ln -s /usr/local/share/dotnet/dotnet /usr/local/bin/ - -# Work around apparent rvm bug that is in Travis's Xcode image. -# https://github.com/direnv/direnv/issues/210 -# https://github.com/travis-ci/travis-ci/issues/6307 -shell_session_update() { :; } - diff --git a/.travis/before-install-osx-swift.sh b/.travis/before-install-osx-swift.sh deleted file mode 100755 index 145a505c6..000000000 --- a/.travis/before-install-osx-swift.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -thisdir=$(dirname "$0") - -brew update - -# Work around apparent rvm bug that is in Travis's Xcode image. -# https://github.com/direnv/direnv/issues/210 -# https://github.com/travis-ci/travis-ci/issues/6307 -shell_session_update() { :; } diff --git a/.travis/run-tests-swift.sh b/.travis/run-tests-swift.sh index 56d2cec65..8c63070aa 100755 --- a/.travis/run-tests-swift.sh +++ b/.travis/run-tests-swift.sh @@ -4,7 +4,7 @@ # here since environment variables doesn't pass # across scripts if [ $TRAVIS_OS_NAME == "linux" ]; then - export SWIFT_VERSION=swift-3.1.1 + export SWIFT_VERSION=swift-4.0 export SWIFT_HOME=$(pwd)/swift/$SWIFT_VERSION-RELEASE-ubuntu14.04/usr/bin/ export PATH=$SWIFT_HOME:$PATH diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java index dcdc0a29d..c0926fe6c 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java @@ -395,7 +395,7 @@ public class Antlr4Mojo extends AbstractMojo { String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens"; File outputFile = new File(outputDirectory, tokensFileName); if ( (! outputFile.exists()) || - outputFile.lastModified() < grammarFile.lastModified() || + outputFile.lastModified() <= grammarFile.lastModified() || dependencies.isDependencyChanged(grammarFile)) { grammarFilesToProcess.add(grammarFile); } @@ -412,10 +412,7 @@ public class Antlr4Mojo extends AbstractMojo { // Iterate each grammar file we were given and add it into the tool's list of // grammars to process. for (File grammarFile : grammarFiles) { - if (!buildContext.hasDelta(grammarFile)) { - continue; - } - + buildContext.refresh(grammarFile); buildContext.removeMessages(grammarFile); getLog().debug("Grammar file '" + grammarFile.getPath() + "' detected."); diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java index 2e9e2472c..d21d1ab7f 100644 --- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java +++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java @@ -216,14 +216,14 @@ class GrammarDependencies { return; for (GrammarAST importDecl : grammar.getAllChildrenWithType(ANTLRParser.IMPORT)) { - Tree id = importDecl.getFirstChildWithType(ANTLRParser.ID); + for (Tree id: importDecl.getAllChildrenWithType(ANTLRParser.ID)) { + // missing id is not valid, but we don't want to prevent the root cause from + // being reported by the ANTLR tool + if (id != null) { + String grammarPath = getRelativePath(grammarFile); - // missing id is not valid, but we don't want to prevent the root cause from - // being reported by the ANTLR tool - if (id != null) { - String grammarPath = getRelativePath(grammarFile); - - graph.addEdge(id.getText() + ".g4", grammarPath); + graph.addEdge(id.getText() + ".g4", grammarPath); + } } } diff --git a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java index d90728922..da38c582a 100644 --- a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java +++ b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java @@ -202,6 +202,7 @@ public class Antlr4MojoTest { Path genHello = generatedSources.resolve("test/HelloParser.java"); Path baseGrammar = antlrDir.resolve("imports/TestBaseLexer.g4"); + Path baseGrammar2 = antlrDir.resolve("imports/TestBaseLexer2.g4"); Path lexerGrammar = antlrDir.resolve("test/TestLexer.g4"); Path parserGrammar = antlrDir.resolve("test/TestParser.g4"); @@ -222,21 +223,20 @@ public class Antlr4MojoTest { assertTrue(Files.exists(genHello)); assertTrue(Files.exists(genTestParser)); assertTrue(Files.exists(genTestLexer)); + byte[] origTestLexerSum = checksum(genTestLexer); + byte[] origTestParserSum = checksum(genTestParser); + byte[] origHelloSum = checksum(genHello); //////////////////////////////////////////////////////////////////////// // 2nd - nothing has been modified, no grammars have to be processed //////////////////////////////////////////////////////////////////////// { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertTrue(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } //////////////////////////////////////////////////////////////////////// @@ -245,50 +245,71 @@ public class Antlr4MojoTest { // modify the grammar to make checksum comparison detect a change try(Change change = Change.of(baseGrammar, "DOT: '.' ;")) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// - // 4th - the lexer grammar changed, the parser grammar has to be processed as well + // 4th - the second imported grammar changed, every dependency has to be processed //////////////////////////////////////////////////////////////////////// // modify the grammar to make checksum comparison detect a change - try(Change change = Change.of(lexerGrammar)) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - + try(Change change = Change.of(baseGrammar2, "BANG: '!' ;")) { maven.executeMojo(session, project, exec); - assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); //////////////////////////////////////////////////////////////////////// - // 5th - the parser grammar changed, no other grammars have to be processed + // 5th - the lexer grammar changed, the parser grammar has to be processed as well + //////////////////////////////////////////////////////////////////////// + + // modify the grammar to make checksum comparison detect a change + try(Change change = Change.of(lexerGrammar, "FOO: 'foo' ;")) { + maven.executeMojo(session, project, exec); + + assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); + } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); + + //////////////////////////////////////////////////////////////////////// + // 6th - the parser grammar changed, no other grammars have to be processed //////////////////////////////////////////////////////////////////////// // modify the grammar to make checksum comparison detect a change try(Change change = Change.of(parserGrammar, " t : WS* ;")) { - byte[] testLexerSum = checksum(genTestLexer); - byte[] testParserSum = checksum(genTestParser); - byte[] helloSum = checksum(genHello); - maven.executeMojo(session, project, exec); - assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer))); - assertFalse(Arrays.equals(testParserSum, checksum(genTestParser))); - assertTrue(Arrays.equals(helloSum, checksum(genHello))); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } + // Restore file and confirm it was restored. + maven.executeMojo(session, project, exec); + assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer))); + assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser))); + assertTrue(Arrays.equals(origHelloSum, checksum(genHello))); } @Test diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 index 5fcc6d353..6c3164de3 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 @@ -10,7 +10,4 @@ fragment Whitespace : ' ' | '\n' | '\t' | '\r' ; fragment -Hexdigit : [a-fA-F0-9] ; - -fragment -Digit : [0-9] ; +Hexdigit : [a-fA-F0-9] ; \ No newline at end of file diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 new file mode 100644 index 000000000..18aa0c4f3 --- /dev/null +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 @@ -0,0 +1,4 @@ +lexer grammar TestBaseLexer2; + +fragment +Digit : [0-9] ; diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 index 668b76496..b9c07b3df 100644 --- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 +++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 @@ -1,6 +1,6 @@ lexer grammar TestLexer; -import TestBaseLexer; +import TestBaseLexer, TestBaseLexer2; WS : Whitespace+ -> skip; -TEXT : ~[<&]+ ; // match any 16 bit char other than < and & \ No newline at end of file +TEXT : ~[<&]+ ; // match any 16 bit char other than < and & diff --git a/appveyor.yml b/appveyor.yml index 57184557b..bf850aac9 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,8 +1,8 @@ -version: '4.6-SNAPSHOT+AppVeyor.{build}' -os: Windows Server 2012 +version: '4.7.1-SNAPSHOT+AppVeyor.{build}' +build: off build_script: - - mvn -DskipTests install -q --batch-mode + - mvn -DskipTests install --batch-mode + - msbuild runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed + - msbuild ./runtime-testsuite/target/classes/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed test_script: - - mvn install -q -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode -build: - verbosity: minimal + - mvn install -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode diff --git a/contributors.txt b/contributors.txt index 21a3c0cdb..caf196135 100644 --- a/contributors.txt +++ b/contributors.txt @@ -151,4 +151,20 @@ YYYY/MM/DD, github id, Full name, email 2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net 2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com 2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com +2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me +2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com +2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in +2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com +2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com +2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com +2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com +2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com +2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com +2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com +2017/08/29, Eddy Reyes, eddy@mindsight.io +2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com +2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com +2017/10/06, bramp, Andrew Brampton, brampton@gmail.com +2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com 2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com +2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de \ No newline at end of file diff --git a/doc/getting-started.md b/doc/getting-started.md index eaf2141fb..5c57119f7 100644 --- a/doc/getting-started.md +++ b/doc/getting-started.md @@ -6,7 +6,7 @@ Hi and welcome to the version 4 release of ANTLR! It's named after the fearless ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library. -The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.5.3-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). +The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.7-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems. @@ -16,19 +16,21 @@ If you are going to integrate ANTLR into your existing build system using mvn, a 1. Download ``` $ cd /usr/local/lib -$ curl -O http://www.antlr.org/download/antlr-4.5.3-complete.jar +$ curl -O http://www.antlr.org/download/antlr-4.7-complete.jar ``` Or just download in browser from website: [http://www.antlr.org/download.html](http://www.antlr.org/download.html) and put it somewhere rational like `/usr/local/lib`. -2. Add `antlr-4.5.3-complete.jar` to your `CLASSPATH`: + +2. Add `antlr-4.7-complete.jar` to your `CLASSPATH`: ``` -$ export CLASSPATH=".:/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH" +$ export CLASSPATH=".:/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH" ``` It's also a good idea to put this in your `.bash_profile` or whatever your startup script is. + 3. Create aliases for the ANTLR Tool, and `TestRig`. ``` -$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH" org.antlr.v4.Tool' +$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH" org.antlr.v4.Tool' $ alias grun='java org.antlr.v4.gui.TestRig' ``` @@ -39,11 +41,11 @@ $ alias grun='java org.antlr.v4.gui.TestRig' 0. Install Java (version 1.6 or higher) 1. Download antlr-4.5.3-complete.jar (or whatever version) from [http://www.antlr.org/download/](http://www.antlr.org/download/) Save to your directory for 3rd party Java libraries, say `C:\Javalib` -2. Add `antlr-4.5-complete.jar` to CLASSPATH, either: +2. Add `antlr-4.5.3-complete.jar` to CLASSPATH, either: * Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable * Temporarily, at command line: ``` -SET CLASSPATH=.;C:\Javalib\antlr-4.5.3-complete.jar;%CLASSPATH% +SET CLASSPATH=.;C:\Javalib\antlr-4.7-complete.jar;%CLASSPATH% ``` 3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands: * Batch files (in directory in system PATH) antlr4.bat and grun.bat @@ -65,7 +67,7 @@ Either launch org.antlr.v4.Tool directly: ``` $ java org.antlr.v4.Tool -ANTLR Parser Generator Version 4.5.3 +ANTLR Parser Generator Version 4.7 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -74,8 +76,8 @@ ANTLR Parser Generator Version 4.5.3 or use -jar option on java: ``` -$ java -jar /usr/local/lib/antlr-4.5.3-complete.jar -ANTLR Parser Generator Version 4.5.3 +$ java -jar /usr/local/lib/antlr-4.7-complete.jar +ANTLR Parser Generator Version 4.7 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... diff --git a/doc/images/gen_spm_module.png b/doc/images/gen_spm_module.png new file mode 100644 index 000000000..0798c37b6 Binary files /dev/null and b/doc/images/gen_spm_module.png differ diff --git a/doc/swift-target.md b/doc/swift-target.md index 69eb88e4d..4f4e6e7c1 100644 --- a/doc/swift-target.md +++ b/doc/swift-target.md @@ -1,9 +1,15 @@ # ANTLR4 Language Target, Runtime for Swift +## Performance Note + +To use ANTLR4 Swift target in production environment, make sure to turn on compiler optimizations by following [these instructions](https://github.com/apple/swift-package-manager/blob/master/Documentation/Usage.md#build-configurations) if you use SwiftPM to build your project. If you are using Xcode to build your project, it's unlikely you will not use `release` build for production build. + +Conclusion is, you need to turn on `release` mode (which will have all the optimization pre configured for you) so the ANTLR4 Swift target can have reasonable parsing speed. + ## Install ANTLR4 Make sure you have the ANTLR -installed.[The getting started guide](getting-started.md) should get +installed. [The getting started guide](getting-started.md) should get you started. ## Create a Swift lexer or parser @@ -18,82 +24,120 @@ For a full list of antlr4 tool options, please visit the ## Build your Swift project with ANTLR runtime -The following instructions are assuming Xcode as the IDE: +### Note -* __Add parser/lexer to project__. Make sure the parsers/lexers +We use __boot.py__ script located at the root of the Swift runtime folder +`antlr4/runtime/Swift` to provide additional support for both Xcode-based +projects and SPM-based projects. Below sections are organized for both of +the flavors. If you want to quickly get started, try: + +``` +python boot.py --help +``` + +for information about this script. + +### Xcode Projects + +Note that even if you are otherwise using ANTLR from a binary distribution, +you should compile the ANTLR Swift runtime from source, because the Swift +language does not yet have a stable ABI. + +ANTLR uses Swift Package Manager to generate Xcode project files. Note that +Swift Package Manager does not currently support iOS, watchOS, or tvOS, so +if you wish to use those platforms, you will need to alter the project build +settings manually as appropriate. + +#### Download source code for ANTLR + +``` +git clone https://github.com/antlr/antlr4 +``` + +#### Generate Xcode project for ANTLR runtime + +The `boot.py` script includes a wrapper around `swift package +generate-xcodeproj`. Use this to generate `Antlr4.xcodeproj` for the ANTLR +Swift runtime. (using _swift package generate-xcodeproj_ is not recommended) +since the project is dependent on some parser files generated by _boot.py_. + +``` +cd antlr4/runtime/Swift +python boot.py --gen-xcodeproj +``` + +#### Import ANTLR Swift runtime into your project + +Open your own project in Xcode. + +Open Finder in the `runtime/Swift` directory: + +``` +# From antlr4/runtime/Swift +open . +``` + +Drag `Antlr4.xcodeproj` into your project. + +After this is done, your Xcode project navigator will be something like the +screenshot below. In this example, your own project is "Smalltalk", and you +will be able to see `Antlr4.xcodeproj` shown as a contained project. + + + +#### Edit the build settings if necessary + +Swift Package Manager currently does not support iOS, watchOS, or tvOS. If +you wish to build for those platforms, you will need to alter the project +build settings manually. + +#### Add generated parser and lexer to project + +Make sure the parsers/lexers generated in __step 2__ are added to the project. To do this, you can drag the generated files from Finder to the Xcode IDE. Remember to check __Copy items if needed__ to make sure the files are actually moved into the project folder instead of symbolic links (see the screenshot below). After moving you will be able to see your files in -the project navigator. But when you open one of the files, you will -see Xcode complaining the module "Antlr4" could not be found at the -import statement. This is expected, since we still need the ANTLR -Swift runtime for those missing symbols. +the project navigator. Make sure that the Target Membership settings +are correct for your project. -* __Download ANTLR runtime__. Due to unstable ABI of Swift language, -there will not be a single "library" for the Swift ANTLR runtime for -now. To get Swift ANTLR runtime, clone the ANTLR repository. Open it -in finder. From the root directory of the repo, go to runtime/Swift -folder. You will see the Xcode project manifest file: -__Antlr4.xcodeproj__. +#### Add the ANTLR Swift runtime as a dependency -* __Import ANTLR Swift runtime into project__. Drag Antlr4.xcodeproj -into your project, after this is done, your Xcode project navigator -will be something like the screenshot below. In this case, your own -project is "Smalltalk", and you will be able to see the -Antlr4.xcodeproj shown as a contained project. The error message will -still be there, that's because we still need to tell Xcode how to find -the runtime. - - - -* __Build ANTLR runtime__. By expanding the "Products" folder in the -inner project (Antlr4.xcodeproj), you will see two Antlr4.framework -files. ".framework" file is the swift version of ".jar", ".a" as in -JAVA, C/C++ Initially those two files should be red, that's because -they are not built. To build, click the "target selection" button -right next to your Xcode run button. And in the drop down select the -target you want to build. And you will see the two Antlr4.framework -files are for iOS and OSX, as shown below. After target selection, -press "CMD+B", and Xcode will build the framework for you. Then you -will see one of the frameworks become black. - - - -* __Add dependencies__. Simply adding ANTLR Swift runtime and build -the artifact is not enough. You still need to specify -dependencies. Click your own project (Smalltalk), and you will see -project setting page. Go to "Build Phase", and inside it make sure -your ANTLR Swift runtime framework is added to both "__Target -Dependencies__" and "__Link Binary With Libraries__" sections, as -shown below. After correctly added dependencies, the error message for -importing library will be gone. +Select your own project in Xcode and go to the Build Phases settings panel. +Add the ANTLR runtime under __Target Dependencies__ and __Link Binary With +Libraries__. -## Example playground +#### Build your project -The Swift runtime includes an Xcode playground to get started with. +The runtime and generated grammar should now build correctly. -First go to the ANTLR4 repository, and open -`runtime/Swift/Antlr4.xcworkspace` in Xcode. Select "Antlr4 OSX > My -Mac" as the build target, and build the project as normal. The -playground should then be active. +### Swift Package Manager Projects -The playground includes a simple grammar called "Hello", and an -example for walking the parse tree. You should see in the playground -output that it is printing messages for each node in the parse tree as -it walks. +Since we cannot have a separate repository for Swift target (see issue [#1774](https://github.com/antlr/antlr4/issues/1774)), +and Swift is currently not ABI stable. We currently support support SPM-based +projects by creating temporary local repository. -The grammar is defined in the playground's `Resources/Hello.g4`. The -parser was generated from the grammar using ANTLR like this: +For people using [Swift Package Manager](https://swift.org/package-manager/), +the __boot.py__ script supports generating local repository that can be used +as a dependency to your project. Simply run: -``` -antlr4 -Dlanguage=Swift -visitor -o ../Sources/Autogen Hello.g4 +``` +python boot.py --gen-spm-module ``` -The example tree walker is in Sources/HelloWalker.swift. - +The prompt will show something like below: + + + +Put the SPM directive that contains the url to temporary repository to your +project's Package.swift. And run `swift build` in your project. + +The project is generated in your system's `/tmp/` directory, if you find it +inconvenient, consider copy that generated ANTLR repository to some place +that won't be cleaned automatically and update `url` parameter in your +`Package.swift` file. diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg index 2a203e969..64d560df5 100755 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg @@ -72,7 +72,7 @@ TokenStartColumnEquals(i) ::= <%self._tokenStartCharPositionInLine == %> ImportListener(X) ::= "" -GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.tokenNames)" +GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.getVocabulary())" RuleInvocationStack() ::= "getRuleInvocationStack().description.replacingOccurrences(of: \"\\\"\", with: \"\")" diff --git a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java index 25c4c0919..c40c4048c 100644 --- a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java +++ b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java @@ -23,6 +23,7 @@ public class TestCodePointCharStream { CodePointCharStream s = CharStreams.fromString(""); assertEquals(0, s.size()); assertEquals(0, s.index()); + assertEquals("", s.toString()); } @Test diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java index 0b53e994e..26352d317 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java @@ -618,4 +618,28 @@ public class ParserErrorsDescriptors { public String grammar; } + + public static class ExtraneousInput extends BaseParserTestDescriptor { + public String input = "baa"; + public String output = null; + public String errors = "line 1:0 mismatched input 'b' expecting {, 'a'}\n"; + public String startRule = "file"; + public String grammarName = "T"; + + /** + grammar T; + + member : 'a'; + body : member*; + file : body EOF; + B : 'b'; + */ + @CommentHasStringValue + public String grammar; + + @Override + public boolean ignore(String targetName) { + return !"Java".equals(targetName); + } + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java index fbf6cfbfc..218bdb789 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java @@ -283,11 +283,16 @@ public class SemPredEvalParserDescriptors { public String input = "s\n\n\nx\n"; public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n"; /** - line 5:0 mismatched input '' expecting ' - ' + line 5:0 mismatched input '' expecting {'s', ' + ', 'x'} */ @CommentHasStringValue public String errors; + + @Override + public boolean ignore(String targetName) { + return !"Java".equals(targetName); + } } public static class PredFromAltTestedInLoopBack_2 extends PredFromAltTestedInLoopBack { diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java index f6b890931..90dc05245 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java @@ -145,7 +145,7 @@ public class BaseSwiftTest implements RuntimeTestSupport { String projectName = "testcase-" + System.currentTimeMillis(); String projectDir = getTmpDir() + "/" + projectName; - buildProject(projectDir); + buildProject(projectDir, projectName); return execTest(projectDir, projectName); } @@ -183,12 +183,12 @@ public class BaseSwiftTest implements RuntimeTestSupport { Collections.addAll(this.sourceFiles, files); } - private void buildProject(String projectDir) { + private void buildProject(String projectDir, String projectName) { mkdir(projectDir); fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable"); for (String sourceFile: sourceFiles) { String absPath = getTmpDir() + "/" + sourceFile; - fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/"); + fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName); } fastFailRunProcess(getTmpDir(), "mv", "-f", "input", projectDir); @@ -201,7 +201,7 @@ public class BaseSwiftTest implements RuntimeTestSupport { "-Xlinker", "-rpath", "-Xlinker", dylibPath); if (buildResult.b.length() > 0) { - throw new RuntimeException("unit test build failed: " + buildResult.b); + throw new RuntimeException("unit test build failed: " + buildResult.a + "\n" + buildResult.b); } } catch (IOException | InterruptedException e) { e.printStackTrace(); @@ -251,7 +251,7 @@ public class BaseSwiftTest implements RuntimeTestSupport { addSourceFiles("main.swift"); String projectName = "testcase-" + System.currentTimeMillis(); String projectDir = getTmpDir() + "/" + projectName; - buildProject(projectDir); + buildProject(projectDir, projectName); return execTest(projectDir, projectName); } diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs index 9009b9f43..3ce2e87d2 100644 --- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs +++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs @@ -1092,7 +1092,10 @@ nextTransition_continue: ; protected internal Guid ReadUUID() { byte[] d = BitConverter.GetBytes (ReadLong ()); - Array.Reverse(d); + if(BitConverter.IsLittleEndian) + { + Array.Reverse(d); + } short c = (short)ReadInt(); short b = (short)ReadInt(); int a = ReadInt32(); diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 65e704516..c91e38e38 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -33,6 +33,7 @@ endif() if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR CMAKE_VERSION VERSION_GREATER "3.3.0") CMAKE_POLICY(SET CMP0059 OLD) + CMAKE_POLICY(SET CMP0054 OLD) endif() if(CMAKE_SYSTEM_NAME MATCHES "Linux") @@ -61,7 +62,11 @@ if (WITH_DEMO) endif() endif(WITH_DEMO) -set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") +if (MSVC_VERSION) + set(MY_CXX_WARNING_FLAGS " /W4") +else() + set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") +endif() # Initialize CXXFLAGS. if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0) @@ -75,11 +80,18 @@ else() set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11") endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}") -set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") -set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") +if (MSVC_VERSION) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Oi /Ob2 /Gy /MP /Zi ${MY_CXX_WARNING_FLAGS}") +else() + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}") +endif() # Compiler-specific C++11 activation. if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") @@ -101,6 +113,8 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND CMAKE_SYSTEM_NAME MATCHES if (WITH_LIBCXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() +elseif ( MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800 ) + # Visual Studio 2012+ supports c++11 features else () message(FATAL_ERROR "Your C++ compiler does not support C++11.") endif () diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm index 7b13ae83d..647f73fed 100644 --- a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm +++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm @@ -91,7 +91,7 @@ using namespace antlr4::misc; - (void)testANTLRInputStreamUse { std::string text(u8"🚧Lorem ipsum dolor sit amet🕶"); - std::u32string wtext = utfConverter.from_bytes(text); // Convert to UTF-32. + std::u32string wtext = utf8_to_utf32(text.c_str(), text.c_str() + text.size()); // Convert to UTF-32. ANTLRInputStream stream(text); XCTAssertEqual(stream.index(), 0U); XCTAssertEqual(stream.size(), wtext.size()); @@ -116,8 +116,8 @@ using namespace antlr4::misc; XCTAssertEqual(stream.LA(0), 0ULL); for (size_t i = 1; i < wtext.size(); ++i) { - XCTAssertEqual(stream.LA((ssize_t)i), wtext[i - 1]); // LA(1) means: current char. - XCTAssertEqual(stream.LT((ssize_t)i), wtext[i - 1]); // LT is mapped to LA. + XCTAssertEqual(stream.LA(static_cast(i)), wtext[i - 1]); // LA(1) means: current char. + XCTAssertEqual(stream.LT(static_cast(i)), wtext[i - 1]); // LT is mapped to LA. XCTAssertEqual(stream.index(), 0U); // No consumption when looking ahead. } @@ -128,7 +128,7 @@ using namespace antlr4::misc; XCTAssertEqual(stream.index(), wtext.size() / 2); stream.seek(wtext.size() - 1); - for (ssize_t i = 1; i < (ssize_t)wtext.size() - 1; ++i) { + for (ssize_t i = 1; i < static_cast(wtext.size()) - 1; ++i) { XCTAssertEqual(stream.LA(-i), wtext[wtext.size() - i - 1]); // LA(-1) means: previous char. XCTAssertEqual(stream.LT(-i), wtext[wtext.size() - i - 1]); // LT is mapped to LA. XCTAssertEqual(stream.index(), wtext.size() - 1); // No consumption when looking ahead. @@ -150,7 +150,7 @@ using namespace antlr4::misc; misc::Interval interval1(2, 10UL); // From - to, inclusive. std::string output = stream.getText(interval1); - std::string sub = utfConverter.to_bytes(wtext.substr(2, 9)); + std::string sub = utf32_to_utf8(wtext.substr(2, 9)); XCTAssertEqual(output, sub); misc::Interval interval2(200, 10UL); // Start beyond bounds. diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm index 063616a1d..58cac4be4 100644 --- a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm +++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm @@ -92,7 +92,7 @@ using namespace antlrcpp; // in a deterministic and a random sequence of 100K values each. std::set hashs; for (size_t i = 0; i < 100000; ++i) { - std::vector data = { i, (size_t)(i * M_PI), arc4random()}; + std::vector data = { i, static_cast(i * M_PI), arc4random() }; size_t hash = 0; for (auto value : data) hash = MurmurHash::update(hash, value); @@ -103,7 +103,7 @@ using namespace antlrcpp; hashs.clear(); for (size_t i = 0; i < 100000; ++i) { - std::vector data = { i, (size_t)(i * M_PI)}; + std::vector data = { i, static_cast(i * M_PI) }; size_t hash = 0; for (auto value : data) hash = MurmurHash::update(hash, value); @@ -232,19 +232,25 @@ using namespace antlrcpp; { 78, Interval(1000, 1000UL), Interval(20, 100UL), { false, false, true, true, false, true, false, false } }, // It's possible to add more tests with borders that touch each other (e.g. first starts before/on/after second - // and first ends directly before/after second. However, such cases are not handled differently in the Interval class + // and first ends directly before/after second. However, such cases are not handled differently in the Interval + // class // (only adjacent intervals, where first ends directly before second starts and vice versa. So I ommitted them here. }; for (auto &entry : testData) { - XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", + entry.runningNumber); + XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", + entry.runningNumber); XCTAssert(entry.interval1.startsAfter(entry.interval2) == entry.results[2], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", + entry.runningNumber); + XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", + entry.runningNumber); XCTAssert(entry.interval1.disjoint(entry.interval2) == entry.results[5], @"entry: %zu", entry.runningNumber); XCTAssert(entry.interval1.adjacent(entry.interval2) == entry.results[6], @"entry: %zu", entry.runningNumber); - XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", entry.runningNumber); + XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", + entry.runningNumber); } XCTAssert(Interval().Union(Interval(10, 100UL)) == Interval(-1L, 100)); @@ -327,30 +333,34 @@ using namespace antlrcpp; try { set4.clear(); XCTFail(@"Expected exception"); - } - catch (IllegalStateException &e) { + } catch (IllegalStateException &e) { } try { set4.setReadOnly(false); XCTFail(@"Expected exception"); + } catch (IllegalStateException &e) { } - catch (IllegalStateException &e) { - } - - set4 = IntervalSet::of(12345); - XCTAssertEqual(set4.getSingleElement(), 12345); - XCTAssertEqual(set4.getMinElement(), 12345); - XCTAssertEqual(set4.getMaxElement(), 12345); - IntervalSet set5(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50); - XCTAssertEqual(set5.getMinElement(), 5); - XCTAssertEqual(set5.getMaxElement(), 50); - XCTAssertEqual(set5.size(), 10U); - set5.add(12, 18); - XCTAssertEqual(set5.size(), 16U); // (15, 15) replaced by (12, 18) - set5.add(9, 33); - XCTAssertEqual(set5.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33) + try { + set4 = IntervalSet::of(12345); + XCTFail(@"Expected exception"); + } catch (IllegalStateException &e) { + } + + IntervalSet set5 = IntervalSet::of(12345); + XCTAssertEqual(set5.getSingleElement(), 12345); + XCTAssertEqual(set5.getMinElement(), 12345); + XCTAssertEqual(set5.getMaxElement(), 12345); + + IntervalSet set6(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50); + XCTAssertEqual(set6.getMinElement(), 5); + XCTAssertEqual(set6.getMaxElement(), 50); + XCTAssertEqual(set6.size(), 10U); + set6.add(12, 18); + XCTAssertEqual(set6.size(), 16U); // (15, 15) replaced by (12, 18) + set6.add(9, 33); + XCTAssertEqual(set6.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33) XCTAssert(IntervalSet(3, 1, 2, 10).Or(IntervalSet(3, 1, 2, 5)) == IntervalSet(4, 1, 2, 5, 10)); XCTAssert(IntervalSet({ Interval(2, 10UL) }).Or(IntervalSet({ Interval(5, 8UL) })) == IntervalSet({ Interval(2, 10UL) })); @@ -358,8 +368,10 @@ using namespace antlrcpp; XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(7, 55)) == IntervalSet::of(11, 55)); XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(20, 55)) == IntervalSet::of(20, 55)); XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(5, 6)) == IntervalSet::EMPTY_SET); - XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) })); - XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) })); + XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == + IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) })); + XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == + IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) })); XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(7, 55)) == IntervalSet::of(7, 10)); XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(20, 55)) == IntervalSet::EMPTY_SET); @@ -368,7 +380,8 @@ using namespace antlrcpp; XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(7, 55)) == IntervalSet::of(1, 6)); XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(20, 55)) == IntervalSet::of(1, 10)); - XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) })); + XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == + IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) })); XCTAssert(IntervalSet::of(15, 20).subtract(IntervalSet::of(7, 55)) == IntervalSet::EMPTY_SET); } diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd index ec81b5940..5660f26a2 100644 --- a/runtime/Cpp/deploy-windows.cmd +++ b/runtime/Cpp/deploy-windows.cmd @@ -12,7 +12,8 @@ rem Headers xcopy runtime\src\*.h antlr4-runtime\ /s rem Binaries -if exist "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" ( +rem VS 2013 disabled by default. Change the X to a C to enable it. +if exist "X:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" ( call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" pushd runtime diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index b2a4fbd02..dcd21b8b0 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -44,7 +44,11 @@ elseif(APPLE) target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY}) endif() -set(disabled_compile_warnings "-Wno-overloaded-virtual") +if (MSVC_VERSION) + set(disabled_compile_warnings "/wd4251") +else() + set(disabled_compile_warnings "-Wno-overloaded-virtual") +endif() if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants") elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") @@ -57,6 +61,15 @@ if (WIN32) set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS") set(extra_static_compile_flags "-DANTLR4CPP_STATIC") endif(WIN32) +if (MSVC_VERSION) + target_compile_options(antlr4_shared PRIVATE "/MD$<$:d>") + target_compile_options(antlr4_static PRIVATE "/MT$<$:d>") +endif() + +set(static_lib_suffix "") +if (MSVC_VERSION) + set(static_lib_suffix "-static") +endif() set_target_properties(antlr4_shared PROPERTIES VERSION ${ANTLR_VERSION} @@ -72,7 +85,7 @@ set_target_properties(antlr4_shared set_target_properties(antlr4_static PROPERTIES VERSION ${ANTLR_VERSION} SOVERSION ${ANTLR_VERSION} - OUTPUT_NAME antlr4-runtime + OUTPUT_NAME "antlr4-runtime${static_lib_suffix}" ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj index 50ab20c8b..80f9ebf77 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj @@ -321,6 +321,8 @@ + + @@ -339,6 +341,7 @@ + @@ -346,6 +349,7 @@ + @@ -412,6 +416,7 @@ + @@ -422,16 +427,23 @@ + + + + + + + @@ -439,6 +451,7 @@ + @@ -454,6 +467,7 @@ + @@ -620,4 +634,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters index d3b301654..499a82ed4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters @@ -938,5 +938,47 @@ Source Files\tree + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\support + + + Source Files\atn + + + Source Files\atn + + + Source Files\tree\pattern + + + Source Files\misc + - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj index e549a78b6..f9bebf6fe 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj @@ -334,6 +334,8 @@ + + @@ -352,6 +354,7 @@ + @@ -359,6 +362,7 @@ + @@ -425,6 +429,7 @@ + @@ -435,16 +440,23 @@ + + + + + + + @@ -452,6 +464,7 @@ + @@ -467,6 +480,7 @@ + @@ -633,4 +647,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters index 21eaaf722..26db5b9c4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters @@ -938,5 +938,47 @@ Source Files\tree + + Source Files + + + Source Files + + + Source Files\atn + + + Source Files\atn + + + Source Files\misc + + + Source Files + + + Source Files + + + Source Files + + + Source Files\support + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree + + + Source Files\tree\pattern + - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj index 643c05885..ced55cf90 100644 --- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj +++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj @@ -534,9 +534,6 @@ 276E5F411CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; }; 276E5F421CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; }; 276E5F431CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; }; - 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; }; - 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; }; 276E5F471CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; 276E5F481CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; 276E5F491CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; }; @@ -800,6 +797,45 @@ 27745F081CE49C000067C6A3 /* RuntimeMetaData.h in Headers */ = {isa = PBXBuildFile; fileRef = 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */; }; 27874F1E1CCB7A0700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; }; 27874F211CCB7B1700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; }; + 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; }; + 2793DC891F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; }; + 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; }; + 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; }; + 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; }; + 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; }; + 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; }; + 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; }; + 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; }; + 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; }; + 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; }; + 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; }; + 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; + 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; + 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; }; 2794D8561CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; 2794D8571CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; 2794D8581CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; }; @@ -1061,7 +1097,6 @@ 276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InterpreterRuleContext.h; sourceTree = ""; wrapsLines = 0; }; 276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IntStream.cpp; sourceTree = ""; }; 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntStream.h; sourceTree = ""; }; - 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IRecognizer.h; sourceTree = ""; }; 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Lexer.cpp; sourceTree = ""; wrapsLines = 0; }; 276E5CC21CDB57AA003FF4B4 /* Lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Lexer.h; sourceTree = ""; }; 276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerInterpreter.cpp; sourceTree = ""; wrapsLines = 0; }; @@ -1152,6 +1187,19 @@ 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; }; 278E313E1D9D6534001C28F9 /* Tests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Tests.m; sourceTree = ""; }; 278E31401D9D6534001C28F9 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 2793DC841F08083F00A84290 /* TokenSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokenSource.cpp; sourceTree = ""; }; + 2793DC881F08087500A84290 /* Chunk.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Chunk.cpp; sourceTree = ""; }; + 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeListener.cpp; sourceTree = ""; }; + 2793DC901F0808A200A84290 /* TerminalNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TerminalNode.cpp; sourceTree = ""; }; + 2793DC941F0808E100A84290 /* ErrorNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorNode.cpp; sourceTree = ""; }; + 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeVisitor.cpp; sourceTree = ""; }; + 2793DC9C1F08090D00A84290 /* Any.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Any.cpp; sourceTree = ""; }; + 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorListener.cpp; sourceTree = ""; }; + 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorStrategy.cpp; sourceTree = ""; }; + 2793DCA21F08095F00A84290 /* Token.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Token.cpp; sourceTree = ""; }; + 2793DCA31F08095F00A84290 /* WritableToken.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WritableToken.cpp; sourceTree = ""; }; + 2793DCB01F08099C00A84290 /* BlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockStartState.cpp; sourceTree = ""; }; + 2793DCB11F08099C00A84290 /* LexerAction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerAction.cpp; sourceTree = ""; }; 2794D8551CE7821B00FADD0F /* antlr4-common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-common.h"; sourceTree = ""; }; 27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-runtime.h"; sourceTree = ""; }; 27B36AC41DACE7AF0069C868 /* RuleContextWithAltNum.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleContextWithAltNum.cpp; sourceTree = ""; }; @@ -1230,7 +1278,9 @@ 276E5CF91CDB57AA003FF4B4 /* tree */, 2794D8551CE7821B00FADD0F /* antlr4-common.h */, 27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */, + 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */, 276E5C0C1CDB57AA003FF4B4 /* ANTLRErrorListener.h */, + 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */, 276E5C0D1CDB57AA003FF4B4 /* ANTLRErrorStrategy.h */, 276E5C0E1CDB57AA003FF4B4 /* ANTLRFileStream.cpp */, 276E5C0F1CDB57AA003FF4B4 /* ANTLRFileStream.h */, @@ -1266,7 +1316,6 @@ 276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */, 276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */, 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */, - 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */, 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */, 276E5CC21CDB57AA003FF4B4 /* Lexer.h */, 276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */, @@ -1295,8 +1344,10 @@ 27B36AC51DACE7AF0069C868 /* RuleContextWithAltNum.h */, 27745EFB1CE49C000067C6A3 /* RuntimeMetaData.cpp */, 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */, + 2793DCA21F08095F00A84290 /* Token.cpp */, 276E5CF01CDB57AA003FF4B4 /* Token.h */, 276E5CF21CDB57AA003FF4B4 /* TokenFactory.h */, + 2793DC841F08083F00A84290 /* TokenSource.cpp */, 276E5CF41CDB57AA003FF4B4 /* TokenSource.h */, 276E5CF51CDB57AA003FF4B4 /* TokenStream.cpp */, 276E5CF61CDB57AA003FF4B4 /* TokenStream.h */, @@ -1308,6 +1359,7 @@ 276E5D251CDB57AA003FF4B4 /* UnbufferedTokenStream.h */, 276E5D271CDB57AA003FF4B4 /* Vocabulary.cpp */, 276E5D281CDB57AA003FF4B4 /* Vocabulary.h */, + 2793DCA31F08095F00A84290 /* WritableToken.cpp */, 276E5D2A1CDB57AA003FF4B4 /* WritableToken.h */, ); name = runtime; @@ -1350,6 +1402,7 @@ 276E5C321CDB57AA003FF4B4 /* BasicState.h */, 276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */, 276E5C341CDB57AA003FF4B4 /* BlockEndState.h */, + 2793DCB01F08099C00A84290 /* BlockStartState.cpp */, 276E5C351CDB57AA003FF4B4 /* BlockStartState.h */, 276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */, 276E5C381CDB57AA003FF4B4 /* ContextSensitivityInfo.h */, @@ -1365,6 +1418,7 @@ 276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */, 276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */, 276E5C441CDB57AA003FF4B4 /* ErrorInfo.h */, + 2793DCB11F08099C00A84290 /* LexerAction.cpp */, 276E5C451CDB57AA003FF4B4 /* LexerAction.h */, 276E5C461CDB57AA003FF4B4 /* LexerActionExecutor.cpp */, 276E5C471CDB57AA003FF4B4 /* LexerActionExecutor.h */, @@ -1483,6 +1537,7 @@ 276E5CE41CDB57AA003FF4B4 /* support */ = { isa = PBXGroup; children = ( + 2793DC9C1F08090D00A84290 /* Any.cpp */, 27F4A8551D4CEB2A00E067EE /* Any.h */, 276E5CE51CDB57AA003FF4B4 /* Arrays.cpp */, 276E5CE61CDB57AA003FF4B4 /* Arrays.h */, @@ -1504,6 +1559,7 @@ 276E5D061CDB57AA003FF4B4 /* pattern */, 27DB448A1D045537007E790B /* xpath */, 276E5CFA1CDB57AA003FF4B4 /* AbstractParseTreeVisitor.h */, + 2793DC941F0808E100A84290 /* ErrorNode.cpp */, 276E5CFB1CDB57AA003FF4B4 /* ErrorNode.h */, 276E5CFC1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp */, 276E5CFD1CDB57AA003FF4B4 /* ErrorNodeImpl.h */, @@ -1511,11 +1567,14 @@ 27D414511DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.h */, 276566DF1DA93BFB000869BE /* ParseTree.cpp */, 276E5CFE1CDB57AA003FF4B4 /* ParseTree.h */, + 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */, 276E5D001CDB57AA003FF4B4 /* ParseTreeListener.h */, 276E5D021CDB57AA003FF4B4 /* ParseTreeProperty.h */, + 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */, 276E5D031CDB57AA003FF4B4 /* ParseTreeVisitor.h */, 276E5D041CDB57AA003FF4B4 /* ParseTreeWalker.cpp */, 276E5D051CDB57AA003FF4B4 /* ParseTreeWalker.h */, + 2793DC901F0808A200A84290 /* TerminalNode.cpp */, 276E5D181CDB57AA003FF4B4 /* TerminalNode.h */, 276E5D191CDB57AA003FF4B4 /* TerminalNodeImpl.cpp */, 276E5D1A1CDB57AA003FF4B4 /* TerminalNodeImpl.h */, @@ -1529,6 +1588,7 @@ isa = PBXGroup; children = ( 276E5D071CDB57AA003FF4B4 /* Chunk.h */, + 2793DC881F08087500A84290 /* Chunk.cpp */, 276E5D081CDB57AA003FF4B4 /* ParseTreeMatch.cpp */, 276E5D091CDB57AA003FF4B4 /* ParseTreeMatch.h */, 276E5D0A1CDB57AA003FF4B4 /* ParseTreePattern.cpp */, @@ -1707,7 +1767,6 @@ 27DB44CC1D0463DB007E790B /* XPathElement.h in Headers */, 276E5F581CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D811CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 27DB44B61D0463CC007E790B /* XPathLexer.h in Headers */, 276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */, 276E602D1CDB57AA003FF4B4 /* TagChunk.h in Headers */, @@ -1875,7 +1934,6 @@ 276E60141CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F571CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D801CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */, 276E602C1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */, @@ -2033,7 +2091,6 @@ 276E60131CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */, 276E5F561CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */, 276E5D7F1CDB57AA003FF4B4 /* ATNSimulator.h in Headers */, - 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */, 276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */, 276E602B1CDB57AA003FF4B4 /* TagChunk.h in Headers */, 276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */, @@ -2225,10 +2282,12 @@ 276E60451CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD21CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F551CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, + 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1D1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D721CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, + 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */, 276E5E2F1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFF1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60511CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2256,6 +2315,8 @@ 276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60631CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDB1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, + 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9E1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC81CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601E1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, @@ -2263,12 +2324,15 @@ 276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, 276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED41CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, + 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */, 276E5FA31CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6C1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60361CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 27DB44D51D0463DB007E790B /* XPathTokenElement.cpp in Sources */, 27DB44D11D0463DB007E790B /* XPathRuleElement.cpp in Sources */, 276E5DED1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606C1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1C1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60181CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2293,7 +2357,9 @@ 276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F051CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAE1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D661CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAF1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECE1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E861CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2301,6 +2367,7 @@ 276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E801CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F401CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F5B1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6D1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2315,6 +2382,7 @@ 27DB44CF1D0463DB007E790B /* XPathRuleAnywhereElement.cpp in Sources */, 276E5E441CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */, 276E5DCC1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */, + 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5D5A1CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE61CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EE01CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, @@ -2333,6 +2401,8 @@ 276E5DC01CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E981CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF81CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60121CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEC1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2365,10 +2435,12 @@ 276E60441CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD11CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, 276E5F541CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, + 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1C1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, 276E5D711CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, + 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */, 276E5E2E1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFE1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, 276E60501CDB57AA003FF4B4 /* Trees.cpp in Sources */, @@ -2396,6 +2468,8 @@ 276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */, 276E60621CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DDA1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, + 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9D1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 276E5EC71CDB57AA003FF4B4 /* Transition.cpp in Sources */, 276E601D1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */, @@ -2403,12 +2477,15 @@ 276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, 276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED31CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, + 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */, 276E5FA21CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6B1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60351CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 27DB44C31D0463DA007E790B /* XPathTokenElement.cpp in Sources */, 27DB44BF1D0463DA007E790B /* XPathRuleElement.cpp in Sources */, 276E5DEC1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606B1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1B1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60171CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2433,7 +2510,9 @@ 276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F041CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAD1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D651CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAE1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECD1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E851CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2441,6 +2520,7 @@ 276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7F1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3F1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F5A1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6C1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2455,6 +2535,7 @@ 27DB44BD1D0463DA007E790B /* XPathRuleAnywhereElement.cpp in Sources */, 276E5E431CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */, 276E5DCB1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */, + 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5D591CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE51CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 276E5EDF1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, @@ -2473,6 +2554,8 @@ 276E5DBF1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E971CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF71CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60111CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEB1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, @@ -2505,10 +2588,12 @@ 276E5DB21CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */, 276E60431CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */, 276E5DD01CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */, + 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */, 276E5F531CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */, 276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */, 276E5E1B1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */, 276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */, + 2793DC891F08087500A84290 /* Chunk.cpp in Sources */, 276E5D701CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */, 276E5E2D1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */, 276E5DFD1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */, @@ -2536,6 +2621,8 @@ 276E60611CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */, 276E5DD91CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */, 27DB449D1D045537007E790B /* XPath.cpp in Sources */, + 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */, + 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */, 276E5E9C1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */, 27DB44AD1D045537007E790B /* XPathWildcardElement.cpp in Sources */, 276E5EC61CDB57AA003FF4B4 /* Transition.cpp in Sources */, @@ -2543,12 +2630,15 @@ 27DB44A51D045537007E790B /* XPathRuleElement.cpp in Sources */, 276E5F201CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */, 276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */, + 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */, + 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */, 276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */, 276E5ED21CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */, 276E5FA11CDB57AA003FF4B4 /* Recognizer.cpp in Sources */, 276E5D6A1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */, 276E60341CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */, 276E5DEB1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */, + 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */, 276E606A1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */, 276E5F1A1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */, 276E60161CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */, @@ -2573,7 +2663,9 @@ 276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */, 27745F031CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */, 276E5DAC1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */, + 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */, 276E5D641CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */, + 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */, 276E5FAD1CDB57AA003FF4B4 /* Arrays.cpp in Sources */, 276E5ECC1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */, 276E5E841CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */, @@ -2581,6 +2673,7 @@ 276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */, 276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */, 276E5E7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */, + 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */, 276E5F3E1CDB57AA003FF4B4 /* IntStream.cpp in Sources */, 276E5F591CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */, 276E5F6B1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */, @@ -2595,6 +2688,7 @@ 276E5D581CDB57AA003FF4B4 /* ATN.cpp in Sources */, 276E5EE41CDB57AA003FF4B4 /* CharStream.cpp in Sources */, 27DB44AB1D045537007E790B /* XPathWildcardAnywhereElement.cpp in Sources */, + 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */, 276E5EDE1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */, 276E5F021CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */, 276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */, @@ -2613,6 +2707,8 @@ 276E5DBE1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */, 276E5E961CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */, 276E5EF61CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */, + 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */, + 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */, 276E60101CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */, 276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */, 276E5EEA1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */, diff --git a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp index ab0d40328..6ceadb87f 100644 --- a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp +++ b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ANTLRErrorListener.h" antlr4::ANTLRErrorListener::~ANTLRErrorListener() diff --git a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp index 04af575c3..1655a5731 100644 --- a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp +++ b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ANTLRErrorStrategy.h" antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy() diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h index 7c7401074..9932a9722 100755 --- a/runtime/Cpp/runtime/src/IntStream.h +++ b/runtime/Cpp/runtime/src/IntStream.h @@ -27,7 +27,7 @@ namespace antlr4 { /// class ANTLR4CPP_PUBLIC IntStream { public: - static const size_t EOF = std::numeric_limits::max(); + static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013 /// The value returned by when the end of the stream is /// reached. diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h index dbffde2e7..8c0bcb0ba 100755 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -11,7 +11,7 @@ namespace antlr4 { class ANTLR4CPP_PUBLIC Recognizer { public: - static const size_t EOF = std::numeric_limits::max(); + static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013. Recognizer(); Recognizer(Recognizer const&) = delete; diff --git a/runtime/Cpp/runtime/src/Token.cpp b/runtime/Cpp/runtime/src/Token.cpp index 06047867a..31266b42d 100644 --- a/runtime/Cpp/runtime/src/Token.cpp +++ b/runtime/Cpp/runtime/src/Token.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "Token.h" antlr4::Token::~Token() { diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h index 2560c7f1b..a7c1594ff 100755 --- a/runtime/Cpp/runtime/src/Token.h +++ b/runtime/Cpp/runtime/src/Token.h @@ -18,7 +18,7 @@ namespace antlr4 { /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - static const size_t EPSILON = std::numeric_limits::max() - 1; + static const size_t EPSILON = static_cast(-2); static const size_t MIN_USER_TOKEN_TYPE = 1; static const size_t EOF = IntStream::EOF; diff --git a/runtime/Cpp/runtime/src/TokenSource.cpp b/runtime/Cpp/runtime/src/TokenSource.cpp index 50b9684ec..6b9d7af2f 100644 --- a/runtime/Cpp/runtime/src/TokenSource.cpp +++ b/runtime/Cpp/runtime/src/TokenSource.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "TokenSource.h" antlr4::TokenSource::~TokenSource() { diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp index 6a9152b50..1f18d3843 100755 --- a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp @@ -52,7 +52,7 @@ void UnbufferedCharStream::sync(size_t want) { size_t UnbufferedCharStream::fill(size_t n) { for (size_t i = 0; i < n; i++) { - if (_data.size() > 0 && _data.back() == (uint32_t)EOF) { + if (_data.size() > 0 && _data.back() == 0xFFFF) { return i; } @@ -89,23 +89,23 @@ size_t UnbufferedCharStream::LA(ssize_t i) { } // We can look back only as many chars as we have buffered. - ssize_t index = (ssize_t)_p + i - 1; + ssize_t index = static_cast(_p) + i - 1; if (index < 0) { throw IndexOutOfBoundsException(); } if (i > 0) { - sync((size_t)i); // No need to sync if we look back. + sync(static_cast(i)); // No need to sync if we look back. } - if ((size_t)index >= _data.size()) { + if (static_cast(index) >= _data.size()) { return EOF; } - if (_data[(size_t)index] == (uint32_t)EOF) { + if (_data[static_cast(index)] == 0xFFFF) { return EOF; } - return _data[(size_t)index]; + return _data[static_cast(index)]; } ssize_t UnbufferedCharStream::mark() { @@ -113,13 +113,13 @@ ssize_t UnbufferedCharStream::mark() { _lastCharBufferStart = _lastChar; } - ssize_t mark = -(ssize_t)_numMarkers - 1; + ssize_t mark = -static_cast(_numMarkers) - 1; _numMarkers++; return mark; } void UnbufferedCharStream::release(ssize_t marker) { - ssize_t expectedMark = -(ssize_t)_numMarkers; + ssize_t expectedMark = -static_cast(_numMarkers); if (marker != expectedMark) { throw IllegalStateException("release() called with an invalid marker."); } @@ -147,16 +147,16 @@ void UnbufferedCharStream::seek(size_t index) { } // index == to bufferStartIndex should set p to 0 - ssize_t i = (ssize_t)index - (ssize_t)getBufferStartIndex(); + ssize_t i = static_cast(index) - static_cast(getBufferStartIndex()); if (i < 0) { throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index)); - } else if (i >= (ssize_t)_data.size()) { + } else if (i >= static_cast(_data.size())) { throw UnsupportedOperationException("Seek to index outside buffer: " + std::to_string(index) + " not in " + std::to_string(getBufferStartIndex()) + ".." + std::to_string(getBufferStartIndex() + _data.size())); } - _p = (size_t)i; + _p = static_cast(i); _currentCharIndex = index; if (_p == 0) { _lastChar = _lastCharBufferStart; @@ -189,7 +189,7 @@ std::string UnbufferedCharStream::getText(const misc::Interval &interval) { } } - if (interval.a < (ssize_t)bufferStartIndex || interval.b >= ssize_t(bufferStartIndex + _data.size())) { + if (interval.a < static_cast(bufferStartIndex) || interval.b >= ssize_t(bufferStartIndex + _data.size())) { throw UnsupportedOperationException("interval " + interval.toString() + " outside buffer: " + std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _data.size() - 1)); } diff --git a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp index fb9a59f35..98e952a0a 100755 --- a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp +++ b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp @@ -46,17 +46,17 @@ Token* UnbufferedTokenStream::LT(ssize_t i) } sync(i); - ssize_t index = (ssize_t)_p + i - 1; + ssize_t index = static_cast(_p) + i - 1; if (index < 0) { throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) + std::string(") gives negative index")); } - if (index >= (ssize_t)_tokens.size()) { + if (index >= static_cast(_tokens.size())) { assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF); return _tokens.back().get(); } - return _tokens[(size_t)index].get(); + return _tokens[static_cast(index)].get(); } size_t UnbufferedTokenStream::LA(ssize_t i) @@ -113,9 +113,9 @@ void UnbufferedTokenStream::consume() /// void UnbufferedTokenStream::sync(ssize_t want) { - ssize_t need = ((ssize_t)_p + want - 1) - (ssize_t)_tokens.size() + 1; // how many more elements we need? + ssize_t need = (static_cast(_p) + want - 1) - static_cast(_tokens.size()) + 1; // how many more elements we need? if (need > 0) { - fill((size_t)need); + fill(static_cast(need)); } } @@ -177,7 +177,7 @@ void UnbufferedTokenStream::release(ssize_t marker) if (_p > 0) { // Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs // p is last valid token; move nothing if p==n as we have no valid char - _tokens.erase(_tokens.begin(), _tokens.begin() + (ssize_t)_p); + _tokens.erase(_tokens.begin(), _tokens.begin() + static_cast(_p)); _p = 0; } diff --git a/runtime/Cpp/runtime/src/WritableToken.cpp b/runtime/Cpp/runtime/src/WritableToken.cpp index 2e3b01241..a30cd96f1 100644 --- a/runtime/Cpp/runtime/src/WritableToken.cpp +++ b/runtime/Cpp/runtime/src/WritableToken.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "WritableToken.h" antlr4::WritableToken::~WritableToken() { diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h index dc0596f1d..316256276 100644 --- a/runtime/Cpp/runtime/src/antlr4-common.h +++ b/runtime/Cpp/runtime/src/antlr4-common.h @@ -63,6 +63,8 @@ typedef std::basic_string<__int32> i32string; typedef i32string UTF32String; + #else + typedef std::u32string UTF32String; #endif #ifdef ANTLR4CPP_EXPORTS diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp index ea2e79266..c6cceda13 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -752,6 +752,7 @@ Ref ATNDeserializer::lexerActionFactory(LexerActionType type, int d return std::make_shared(data1); default: - throw IllegalArgumentException("The specified lexer action type " + std::to_string((size_t)type) + " is not valid."); + throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast(type)) + + " is not valid."); } } diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp index 6eec3ed7f..206c74281 100755 --- a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp @@ -58,7 +58,7 @@ std::vector ATNSerializer::serialize() { serializeUUID(data, ATNDeserializer::SERIALIZED_UUID()); // convert grammar type to ATN const to avoid dependence on ANTLRParser - data.push_back((size_t)atn->grammarType); + data.push_back(static_cast(atn->grammarType)); data.push_back(atn->maxTokenType); size_t nedges = 0; @@ -288,7 +288,7 @@ std::vector ATNSerializer::serialize() { if (atn->grammarType == ATNType::LEXER) { data.push_back(atn->lexerActions.size()); for (Ref &action : atn->lexerActions) { - data.push_back((size_t)action->getActionType()); + data.push_back(static_cast(action->getActionType())); switch (action->getActionType()) { case LexerActionType::CHANNEL: { @@ -348,7 +348,8 @@ std::vector ATNSerializer::serialize() { default: throw IllegalArgumentException("The specified lexer action type " + - std::to_string((size_t)action->getActionType()) + " is not valid."); + std::to_string(static_cast(action->getActionType())) + + " is not valid."); } } } diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h index a6035b4c6..96e8fedb7 100755 --- a/runtime/Cpp/runtime/src/atn/ATNState.h +++ b/runtime/Cpp/runtime/src/atn/ATNState.h @@ -77,7 +77,7 @@ namespace atn { virtual ~ATNState(); static const size_t INITIAL_NUM_TRANSITIONS = 4; - static const size_t INVALID_STATE_NUMBER = std::numeric_limits::max(); + static const size_t INVALID_STATE_NUMBER = static_cast(-1); // std::numeric_limits::max(); enum { ATN_INVALID_TYPE = 0, diff --git a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp index b8ec09440..44cca8f77 100644 --- a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp +++ b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "BlockStartState.h" antlr4::atn::BlockStartState::~BlockStartState() { diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp index 6f39129e5..d7949cd1e 100755 --- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp +++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp @@ -144,12 +144,12 @@ void LL1Analyzer::_LOOK(ATNState *s, ATNState *stopState, Ref } else if (t->isEpsilon()) { _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF); } else if (t->getSerializationType() == Transition::WILDCARD) { - look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType)); + look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); } else { misc::IntervalSet set = t->label(); if (!set.isEmpty()) { if (is(t)) { - set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType)); + set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType))); } look.addAll(set); } diff --git a/runtime/Cpp/runtime/src/atn/LexerAction.cpp b/runtime/Cpp/runtime/src/atn/LexerAction.cpp index 5c98cfe43..983ba6d52 100644 --- a/runtime/Cpp/runtime/src/atn/LexerAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerAction.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "LexerAction.h" antlr4::atn::LexerAction::~LexerAction() { diff --git a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp index dac78fe0c..959beab3d 100755 --- a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp @@ -32,7 +32,7 @@ void LexerChannelAction::execute(Lexer *lexer) { size_t LexerChannelAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _channel); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp index 00df7df76..1e977a310 100755 --- a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp @@ -38,7 +38,7 @@ void LexerCustomAction::execute(Lexer *lexer) { size_t LexerCustomAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _ruleIndex); hash = MurmurHash::update(hash, _actionIndex); return MurmurHash::finish(hash, 3); diff --git a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp index bfd6ea9b3..0bda8b7af 100755 --- a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp @@ -33,7 +33,7 @@ void LexerModeAction::execute(Lexer *lexer) { size_t LexerModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _mode); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp index e7b01e078..99b2dd99b 100755 --- a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp @@ -34,7 +34,7 @@ void LexerMoreAction::execute(Lexer *lexer) { size_t LexerMoreAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp index 3d584a3d1..cac0996f4 100755 --- a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp @@ -34,7 +34,7 @@ void LexerPopModeAction::execute(Lexer *lexer) { size_t LexerPopModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp index 641537a1b..017abed04 100755 --- a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp @@ -33,7 +33,7 @@ void LexerPushModeAction::execute(Lexer *lexer) { size_t LexerPushModeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _mode); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp index 28cda7cc3..01947ce78 100755 --- a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp @@ -34,7 +34,7 @@ void LexerSkipAction::execute(Lexer *lexer) { size_t LexerSkipAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); return MurmurHash::finish(hash, 1); } diff --git a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp index c1e054b68..006778adc 100755 --- a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp @@ -33,7 +33,7 @@ void LexerTypeAction::execute(Lexer *lexer) { size_t LexerTypeAction::hashCode() const { size_t hash = MurmurHash::initialize(); - hash = MurmurHash::update(hash, (size_t)getActionType()); + hash = MurmurHash::update(hash, static_cast(getActionType())); hash = MurmurHash::update(hash, _type); return MurmurHash::finish(hash, 2); } diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp index 0d7d9c54d..5e82bbaff 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp @@ -184,7 +184,7 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream throw e; } - if (D->requiresFullContext && mode != PredictionMode::SLL) { + if (D->requiresFullContext && _mode != PredictionMode::SLL) { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) BitSet conflictingAlts; if (D->predicates.size() != 0) { @@ -283,7 +283,7 @@ dfa::DFAState *ParserATNSimulator::computeTargetState(dfa::DFA &dfa, dfa::DFASta D->isAcceptState = true; D->configs->uniqueAlt = predictedAlt; D->prediction = predictedAlt; - } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(mode, D->configs.get())) { + } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(_mode, D->configs.get())) { // MORE THAN ONE VIABLE ALTERNATIVE D->configs->conflictingAlts = getConflictingAlts(D->configs.get()); D->requiresFullContext = true; @@ -370,7 +370,7 @@ size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState * predictedAlt = reach->uniqueAlt; break; } - if (mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) { + if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) { predictedAlt = PredictionModeClass::resolvesToJustOneViableAlt(altSubSets); if (predictedAlt != ATN::INVALID_ALT_NUMBER) { break; @@ -1332,11 +1332,11 @@ void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, s } void ParserATNSimulator::setPredictionMode(PredictionMode newMode) { - mode = newMode; + _mode = newMode; } atn::PredictionMode ParserATNSimulator::getPredictionMode() { - return mode; + return _mode; } Parser* ParserATNSimulator::getParser() { @@ -1352,6 +1352,6 @@ bool ParserATNSimulator::getLrLoopSetting() { } void ParserATNSimulator::InitializeInstanceFields() { - mode = PredictionMode::LL; + _mode = PredictionMode::LL; _startIndex = 0; } diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h index b5c6d98a9..e2a406324 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h @@ -243,20 +243,133 @@ namespace atn { * the input.

*/ class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator { - protected: - Parser *const parser; - public: + /// Testing only! + ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache); + + ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, + PredictionContextCache &sharedContextCache); + + virtual void reset() override; + virtual void clearDFA() override; + virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext); + static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT; std::vector &decisionToDFA; + + /** Implements first-edge (loop entry) elimination as an optimization + * during closure operations. See antlr/antlr4#1398. + * + * The optimization is to avoid adding the loop entry config when + * the exit path can only lead back to the same + * StarLoopEntryState after popping context at the rule end state + * (traversing only epsilon edges, so we're still in closure, in + * this same rule). + * + * We need to detect any state that can reach loop entry on + * epsilon w/o exiting rule. We don't have to look at FOLLOW + * links, just ensure that all stack tops for config refer to key + * states in LR rule. + * + * To verify we are in the right situation we must first check + * closure is at a StarLoopEntryState generated during LR removal. + * Then we check that each stack top of context is a return state + * from one of these cases: + * + * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + * 2. expr op expr. The return state is the block end of internal block of (...)* + * 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + * That state points at block end of internal block of (...)*. + * 4. expr '?' expr ':' expr. The return state points at block end, + * which points at loop entry state. + * + * If any is true for each stack top, then closure does not add a + * config to the current config set for edge[0], the loop entry branch. + * + * Conditions fail if any context for the current config is: + * + * a. empty (we'd fall out of expr to do a global FOLLOW which could + * even be to some weird spot in expr) or, + * b. lies outside of expr or, + * c. lies within expr but at a state not the BlockEndState + * generated during LR removal + * + * Do we need to evaluate predicates ever in closure for this case? + * + * No. Predicates, including precedence predicates, are only + * evaluated when computing a DFA start state. I.e., only before + * the lookahead (but not parser) consumes a token. + * + * There are no epsilon edges allowed in LR rule alt blocks or in + * the "primary" part (ID here). If closure is in + * StarLoopEntryState any lookahead operation will have consumed a + * token as there are no epsilon-paths that lead to + * StarLoopEntryState. We do not have to evaluate predicates + * therefore if we are in the generated StarLoopEntryState of a LR + * rule. Note that when making a prediction starting at that + * decision point, decision d=2, compute-start-state performs + * closure starting at edges[0], edges[1] emanating from + * StarLoopEntryState. That means it is not performing closure on + * StarLoopEntryState during compute-start-state. + * + * How do we know this always gives same prediction answer? + * + * Without predicates, loop entry and exit paths are ambiguous + * upon remaining input +b (in, say, a+b). Either paths lead to + * valid parses. Closure can lead to consuming + immediately or by + * falling out of this call to expr back into expr and loop back + * again to StarLoopEntryState to match +b. In this special case, + * we choose the more efficient path, which is to take the bypass + * path. + * + * The lookahead language has not changed because closure chooses + * one path over the other. Both paths lead to consuming the same + * remaining input during a lookahead operation. If the next token + * is an operator, lookahead will enter the choice block with + * operators. If it is not, lookahead will exit expr. Same as if + * closure had chosen to enter the choice block immediately. + * + * Closure is examining one config (some loopentrystate, some alt, + * context) which means it is considering exactly one alt. Closure + * always copies the same alt to any derived configs. + * + * How do we know this optimization doesn't mess up precedence in + * our parse trees? + * + * Looking through expr from left edge of stat only has to confirm + * that an input, say, a+b+c; begins with any valid interpretation + * of an expression. The precedence actually doesn't matter when + * making a decision in stat seeing through expr. It is only when + * parsing rule expr that we must use the precedence to get the + * right interpretation and, hence, parse tree. + */ + bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const; + virtual std::string getRuleName(size_t index); + + virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, + bool collectPredicates, bool inContext, bool fullCtx); + + void setPredictionMode(PredictionMode newMode); + PredictionMode getPredictionMode(); + + Parser* getParser(); + + virtual std::string getTokenName(size_t t); + + virtual std::string getLookaheadName(TokenStream *input); - private: /// - /// SLL, LL, or LL + exact ambig detection? - PredictionMode mode; - + /// Used for debugging in adaptivePredict around execATN but I cut + /// it out for clarity now that alg. works well. We can leave this + /// "dead" code for a bit. + /// + virtual void dumpDeadEndConfigs(NoViableAltException &nvae); + protected: + Parser *const parser; + /// /// Each prediction operation uses a cache for merge of prediction contexts. /// Don't keep around as it wastes huge amounts of memory. The merge cache @@ -273,20 +386,7 @@ namespace atn { size_t _startIndex; ParserRuleContext *_outerContext; dfa::DFA *_dfa; // Reference into the decisionToDFA vector. - - public: - /// Testing only! - ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA, - PredictionContextCache &sharedContextCache); - - ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA, - PredictionContextCache &sharedContextCache); - - virtual void reset() override; - virtual void clearDFA() override; - virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext); - - protected: + /// /// Performs ATN simulation to compute a predicted alternative based /// upon the remaining input, but also updates the DFA cache to avoid @@ -350,7 +450,7 @@ namespace atn { // comes back with reach.uniqueAlt set to a valid alt virtual size_t execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0, - TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over + TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over virtual std::unique_ptr computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx); @@ -549,10 +649,10 @@ namespace atn { virtual ATNState *getReachableTarget(Transition *trans, size_t ttype); virtual std::vector> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts, - ATNConfigSet *configs, size_t nalts); + ATNConfigSet *configs, size_t nalts); virtual std::vector getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, - std::vector> altToPred); + std::vector> altToPred); /** * This method is used to improve the localization of error messages by @@ -601,7 +701,7 @@ namespace atn { * identified and {@link #adaptivePredict} should report an error instead. */ size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs, - ParserRuleContext *outerContext); + ParserRuleContext *outerContext); virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs); @@ -615,7 +715,7 @@ namespace atn { * prediction, which is where predicates need to evaluate. */ std::pair splitAccordingToSemanticValidity(ATNConfigSet *configs, - ParserRuleContext *outerContext); + ParserRuleContext *outerContext); /// /// Look through a list of predicate/alt pairs, returning alts for the @@ -627,7 +727,6 @@ namespace atn { virtual antlrcpp::BitSet evalSemanticContext(std::vector predPredictions, ParserRuleContext *outerContext, bool complete); - /** * Evaluate a semantic context within a specific parser context. * @@ -672,111 +771,15 @@ namespace atn { virtual void closureCheckingStopState(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - + /// Do the actual work of walking epsilon edges. virtual void closure_(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon); - - public: - /** Implements first-edge (loop entry) elimination as an optimization - * during closure operations. See antlr/antlr4#1398. - * - * The optimization is to avoid adding the loop entry config when - * the exit path can only lead back to the same - * StarLoopEntryState after popping context at the rule end state - * (traversing only epsilon edges, so we're still in closure, in - * this same rule). - * - * We need to detect any state that can reach loop entry on - * epsilon w/o exiting rule. We don't have to look at FOLLOW - * links, just ensure that all stack tops for config refer to key - * states in LR rule. - * - * To verify we are in the right situation we must first check - * closure is at a StarLoopEntryState generated during LR removal. - * Then we check that each stack top of context is a return state - * from one of these cases: - * - * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state - * 2. expr op expr. The return state is the block end of internal block of (...)* - * 3. 'between' expr 'and' expr. The return state of 2nd expr reference. - * That state points at block end of internal block of (...)*. - * 4. expr '?' expr ':' expr. The return state points at block end, - * which points at loop entry state. - * - * If any is true for each stack top, then closure does not add a - * config to the current config set for edge[0], the loop entry branch. - * - * Conditions fail if any context for the current config is: - * - * a. empty (we'd fall out of expr to do a global FOLLOW which could - * even be to some weird spot in expr) or, - * b. lies outside of expr or, - * c. lies within expr but at a state not the BlockEndState - * generated during LR removal - * - * Do we need to evaluate predicates ever in closure for this case? - * - * No. Predicates, including precedence predicates, are only - * evaluated when computing a DFA start state. I.e., only before - * the lookahead (but not parser) consumes a token. - * - * There are no epsilon edges allowed in LR rule alt blocks or in - * the "primary" part (ID here). If closure is in - * StarLoopEntryState any lookahead operation will have consumed a - * token as there are no epsilon-paths that lead to - * StarLoopEntryState. We do not have to evaluate predicates - * therefore if we are in the generated StarLoopEntryState of a LR - * rule. Note that when making a prediction starting at that - * decision point, decision d=2, compute-start-state performs - * closure starting at edges[0], edges[1] emanating from - * StarLoopEntryState. That means it is not performing closure on - * StarLoopEntryState during compute-start-state. - * - * How do we know this always gives same prediction answer? - * - * Without predicates, loop entry and exit paths are ambiguous - * upon remaining input +b (in, say, a+b). Either paths lead to - * valid parses. Closure can lead to consuming + immediately or by - * falling out of this call to expr back into expr and loop back - * again to StarLoopEntryState to match +b. In this special case, - * we choose the more efficient path, which is to take the bypass - * path. - * - * The lookahead language has not changed because closure chooses - * one path over the other. Both paths lead to consuming the same - * remaining input during a lookahead operation. If the next token - * is an operator, lookahead will enter the choice block with - * operators. If it is not, lookahead will exit expr. Same as if - * closure had chosen to enter the choice block immediately. - * - * Closure is examining one config (some loopentrystate, some alt, - * context) which means it is considering exactly one alt. Closure - * always copies the same alt to any derived configs. - * - * How do we know this optimization doesn't mess up precedence in - * our parse trees? - * - * Looking through expr from left edge of stat only has to confirm - * that an input, say, a+b+c; begins with any valid interpretation - * of an expression. The precedence actually doesn't matter when - * making a decision in stat seeing through expr. It is only when - * parsing rule expr that we must use the precedence to get the - * right interpretation and, hence, parse tree. - */ - bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const; - virtual std::string getRuleName(size_t index); - - protected: + virtual Ref getEpsilonTarget(Ref const& config, Transition *t, bool collectPredicates, bool inContext, bool fullCtx, bool treatEofAsEpsilon); virtual Ref actionTransition(Ref const& config, ActionTransition *t); - public: - virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt, - bool collectPredicates, bool inContext, bool fullCtx); - - protected: virtual Ref predTransition(Ref const& config, PredicateTransition *pt, bool collectPredicates, bool inContext, bool fullCtx); @@ -832,19 +835,6 @@ namespace atn { virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet *configs); - public: - virtual std::string getTokenName(size_t t); - - virtual std::string getLookaheadName(TokenStream *input); - - /// - /// Used for debugging in adaptivePredict around execATN but I cut - /// it out for clarity now that alg. works well. We can leave this - /// "dead" code for a bit. - /// - virtual void dumpDeadEndConfigs(NoViableAltException &nvae); - - protected: virtual NoViableAltException noViableAlt(TokenStream *input, ParserRuleContext *outerContext, ATNConfigSet *configs, size_t startIndex); @@ -901,13 +891,10 @@ namespace atn { const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs); // configs that LL not SLL considered conflicting - public: - void setPredictionMode(PredictionMode newMode); - PredictionMode getPredictionMode(); - - Parser* getParser(); - private: + // SLL, LL, or LL + exact ambig detection? + PredictionMode _mode; + static bool getLrLoopSetting(); void InitializeInstanceFields(); }; diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h index fb053f14a..9a52e00e5 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionContext.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h @@ -17,7 +17,6 @@ namespace atn { class PredictionContextMergeCache; typedef std::unordered_set, PredictionContextHasher, PredictionContextComparer> PredictionContextCache; - //typedef std::map, Ref>, Ref> PredictionContextMergeCache; class ANTLR4CPP_PUBLIC PredictionContext { public: @@ -28,10 +27,10 @@ namespace atn { /// Represents $ in an array in full context mode, when $ /// doesn't mean wildcard: $ + x = [$,x]. Here, /// $ = EMPTY_RETURN_STATE. - // ml: originally Integer.MAX_VALUE, which would be (size_t)-1 for us, but this is already used in places where + // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't // conflict with real return states. - static const size_t EMPTY_RETURN_STATE = std::numeric_limits::max() - 9; + static const size_t EMPTY_RETURN_STATE = static_cast(-10); // std::numeric_limits::max() - 9; private: static const size_t INITIAL_HASH = 1; diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.h b/runtime/Cpp/runtime/src/atn/PredictionMode.h index d3de2e952..726f4cf40 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionMode.h +++ b/runtime/Cpp/runtime/src/atn/PredictionMode.h @@ -15,7 +15,7 @@ namespace atn { * utility methods for analyzing configuration sets for conflicts and/or * ambiguities. */ - enum class ANTLR4CPP_PUBLIC PredictionMode { + enum class PredictionMode { /** * The SLL(*) prediction mode. This prediction mode ignores the current * parser context when making predictions. This is the fastest prediction diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp index fdc272f84..0531e37f8 100755 --- a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp @@ -82,7 +82,7 @@ int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate *o) { size_t SemanticContext::PrecedencePredicate::hashCode() const { size_t hashCode = 1; - hashCode = 31 * hashCode + (size_t)precedence; + hashCode = 31 * hashCode + static_cast(precedence); return hashCode; } diff --git a/runtime/Cpp/runtime/src/misc/Interval.cpp b/runtime/Cpp/runtime/src/misc/Interval.cpp index 325b8621f..97486bf3f 100755 --- a/runtime/Cpp/runtime/src/misc/Interval.cpp +++ b/runtime/Cpp/runtime/src/misc/Interval.cpp @@ -10,16 +10,16 @@ using namespace antlr4::misc; Interval::~Interval() = default; size_t antlr4::misc::numericToSymbol(ssize_t v) { - return (size_t)v; + return static_cast(v); } ssize_t antlr4::misc::symbolToNumeric(size_t v) { - return (ssize_t)v; + return static_cast(v); } Interval const Interval::INVALID; -Interval::Interval() : Interval((ssize_t)-1, -2) { // Need an explicit cast here for VS. +Interval::Interval() : Interval(static_cast(-1), -2) { // Need an explicit cast here for VS. } Interval::Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) { @@ -41,8 +41,8 @@ bool Interval::operator == (const Interval &other) const { size_t Interval::hashCode() const { size_t hash = 23; - hash = hash * 31 + (size_t)a; - hash = hash * 31 + (size_t)b; + hash = hash * 31 + static_cast(a); + hash = hash * 31 + static_cast(b); return hash; } diff --git a/runtime/Cpp/runtime/src/support/Any.cpp b/runtime/Cpp/runtime/src/support/Any.cpp index 1404343d3..3dd1a94bf 100644 --- a/runtime/Cpp/runtime/src/support/Any.cpp +++ b/runtime/Cpp/runtime/src/support/Any.cpp @@ -1,9 +1,16 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "Any.h" -antlrcpp::Any::~Any() +using namespace antlrcpp; + +Any::~Any() { delete _ptr; } -antlrcpp::Any::Base::~Base() { +Any::Base::~Base() { } diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h index f9559b30d..3d8845c70 100644 --- a/runtime/Cpp/runtime/src/support/Any.h +++ b/runtime/Cpp/runtime/src/support/Any.h @@ -19,7 +19,7 @@ namespace antlrcpp { template using StorageType = typename std::decay::type; -struct Any +struct ANTLR4CPP_PUBLIC Any { bool isNull() const { return _ptr == nullptr; } bool isNotNull() const { return _ptr != nullptr; } diff --git a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp index 685047d20..ade2539af 100644 --- a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp +++ b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/ErrorNode.h" antlr4::tree::ErrorNode::~ErrorNode() { diff --git a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp index 5ce30d3a7..a4b3efd73 100644 --- a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp +++ b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp @@ -1,31 +1,6 @@ -/* - * [The "BSD license"] - * Copyright (c) 2012 Terence Parr - * Copyright (c) 2012 Sam Harwell - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. */ #include "support/CPPUtils.h" diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp index 820962118..ce1229758 100644 --- a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ParseTreeListener.h" antlr4::tree::ParseTreeListener::~ParseTreeListener() { diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp index 5298eee09..a329919c1 100644 --- a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp +++ b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "ParseTreeVisitor.h" antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() { diff --git a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp index e41ff7e9d..d630469c7 100644 --- a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp +++ b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/TerminalNode.h" antlr4::tree::TerminalNode::~TerminalNode() { diff --git a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp index 7997ce867..5320f910b 100644 --- a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp +++ b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp @@ -1,3 +1,8 @@ +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + #include "tree/pattern/Chunk.h" antlr4::tree::pattern::Chunk::~Chunk() { diff --git a/runtime/Go/antlr/common_token_stream.go b/runtime/Go/antlr/common_token_stream.go index 0121fe8e4..3154e00ac 100644 --- a/runtime/Go/antlr/common_token_stream.go +++ b/runtime/Go/antlr/common_token_stream.go @@ -337,8 +337,8 @@ func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { interval = NewInterval(0, len(c.tokens)-1) } - start := interval.start - stop := interval.stop + start := interval.Start + stop := interval.Stop if start < 0 || stop < 0 { return "" diff --git a/runtime/Go/antlr/common_token_stream_test.go b/runtime/Go/antlr/common_token_stream_test.go new file mode 100644 index 000000000..27cf42111 --- /dev/null +++ b/runtime/Go/antlr/common_token_stream_test.go @@ -0,0 +1,154 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "testing" +) + +type commonTokenStreamTestLexer struct { + *BaseLexer + + tokens []Token + i int +} + +func (l *commonTokenStreamTestLexer) NextToken() Token { + tmp := l.tokens[l.i] + l.i++ + return tmp +} + +func TestCommonTokenStreamOffChannel(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + + assert.Equal("x", tokens.LT(1).GetText()) // must skip first off channel token + tokens.Consume() + assert.Equal("=", tokens.LT(1).GetText()) + assert.Equal("x", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal("34", tokens.LT(1).GetText()) + assert.Equal("=", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal(";", tokens.LT(1).GetText()) + assert.Equal("34", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal(TokenEOF, tokens.LT(1).GetTokenType()) + assert.Equal(";", tokens.LT(-1).GetText()) + + assert.Equal("34", tokens.LT(-2).GetText()) + assert.Equal("=", tokens.LT(-3).GetText()) + assert.Equal("x", tokens.LT(-4).GetText()) +} + +func TestCommonTokenStreamFetchOffChannel(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, " ", LexerHidden), // 8 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + + assert.Nil(tokens.getHiddenTokensToLeft(0, -1)) + assert.Nil(tokens.getHiddenTokensToRight(0, -1)) + + assert.Equal("[[@0,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(1, -1))) + assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToRight(1, -1))) + + assert.Nil(tokens.getHiddenTokensToLeft(2, -1)) + assert.Nil(tokens.getHiddenTokensToRight(2, -1)) + + assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(3, -1))) + assert.Nil(tokens.getHiddenTokensToRight(3, -1)) + + assert.Nil(tokens.getHiddenTokensToLeft(4, -1)) + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToRight(4, -1))) + + assert.Nil(tokens.getHiddenTokensToLeft(5, -1)) + assert.Equal("[[@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToRight(5, -1))) + + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToLeft(6, -1))) + assert.Nil(tokens.getHiddenTokensToRight(6, -1)) + + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToLeft(7, -1))) + assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToRight(7, -1))) + + assert.Nil(tokens.getHiddenTokensToLeft(8, -1)) + assert.Equal("[[@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToRight(8, -1))) + + assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.getHiddenTokensToLeft(9, -1))) + assert.Nil(tokens.getHiddenTokensToRight(9, -1)) + +} + +type commonTokenStreamTestLexerSingleEOF struct { + *BaseLexer + + tokens []Token + i int +} + +func (l *commonTokenStreamTestLexerSingleEOF) NextToken() Token { + return newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel) +} + +func TestCommonTokenStreamSingleEOF(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexerSingleEOF{} + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + + assert.Equal(TokenEOF, tokens.LA(1)) + assert.Equal(0, tokens.index) + assert.Equal(1, tokens.Size()) +} + +func TestCommonTokenStreamCannotConsumeEOF(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexerSingleEOF{} + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + assert.Equal(TokenEOF, tokens.LA(1)) + assert.Equal(0, tokens.index) + assert.Equal(1, tokens.Size()) + assert.Panics(tokens.Consume) +} diff --git a/runtime/Go/antlr/input_stream.go b/runtime/Go/antlr/input_stream.go index da9d2f7f4..5ff270f53 100644 --- a/runtime/Go/antlr/input_stream.go +++ b/runtime/Go/antlr/input_stream.go @@ -101,7 +101,7 @@ func (is *InputStream) GetTextFromTokens(start, stop Token) string { } func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.start, i.stop) + return is.GetText(i.Start, i.Stop) } func (*InputStream) GetSourceName() string { diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go index 749ec1cb3..510d90911 100644 --- a/runtime/Go/antlr/interval_set.go +++ b/runtime/Go/antlr/interval_set.go @@ -10,33 +10,33 @@ import ( ) type Interval struct { - start int - stop int + Start int + Stop int } /* stop is not included! */ func NewInterval(start, stop int) *Interval { i := new(Interval) - i.start = start - i.stop = stop + i.Start = start + i.Stop = stop return i } -func (i *Interval) contains(item int) bool { - return item >= i.start && item < i.stop +func (i *Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop } func (i *Interval) String() string { - if i.start == i.stop-1 { - return strconv.Itoa(i.start) + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) } - return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1) + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } func (i *Interval) length() int { - return i.stop - i.start + return i.Stop - i.Start } type IntervalSet struct { @@ -59,7 +59,7 @@ func (i *IntervalSet) first() int { return TokenInvalidType } - return i.intervals[0].start + return i.intervals[0].Start } func (i *IntervalSet) addOne(v int) { @@ -78,24 +78,24 @@ func (i *IntervalSet) addInterval(v *Interval) { // find insert pos for k, interval := range i.intervals { // distinct range -> insert - if v.stop < interval.start { + if v.Stop < interval.Start { i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) return - } else if v.stop == interval.start { - i.intervals[k].start = v.start + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start return - } else if v.start <= interval.stop { - i.intervals[k] = NewInterval(intMin(interval.start, v.start), intMax(interval.stop, v.stop)) + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) // if not applying to end, merge potential overlaps if k < len(i.intervals)-1 { l := i.intervals[k] r := i.intervals[k+1] // if r contained in l - if l.stop >= r.stop { + if l.Stop >= r.Stop { i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.stop >= r.start { // partial overlap - i.intervals[k] = NewInterval(l.start, r.stop) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) } } @@ -111,7 +111,7 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { if other.intervals != nil { for k := 0; k < len(other.intervals); k++ { i2 := other.intervals[k] - i.addInterval(NewInterval(i2.start, i2.stop)) + i.addInterval(NewInterval(i2.Start, i2.Stop)) } } return i @@ -131,7 +131,7 @@ func (i *IntervalSet) contains(item int) bool { return false } for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].contains(item) { + if i.intervals[k].Contains(item) { return true } } @@ -149,29 +149,29 @@ func (i *IntervalSet) length() int { } func (i *IntervalSet) removeRange(v *Interval) { - if v.start == v.stop-1 { - i.removeOne(v.start) + if v.Start == v.Stop-1 { + i.removeOne(v.Start) } else if i.intervals != nil { k := 0 for n := 0; n < len(i.intervals); n++ { ni := i.intervals[k] // intervals are ordered - if v.stop <= ni.start { + if v.Stop <= ni.Start { return - } else if v.start > ni.start && v.stop < ni.stop { - i.intervals[k] = NewInterval(ni.start, v.start) - x := NewInterval(v.stop, ni.stop) + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) return - } else if v.start <= ni.start && v.stop >= ni.stop { + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) k = k - 1 // need another pass - } else if v.start < ni.stop { - i.intervals[k] = NewInterval(ni.start, v.start) - } else if v.stop < ni.stop { - i.intervals[k] = NewInterval(v.stop, ni.stop) + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) } k++ } @@ -183,21 +183,21 @@ func (i *IntervalSet) removeOne(v int) { for k := 0; k < len(i.intervals); k++ { ki := i.intervals[k] // intervals i ordered - if v < ki.start { + if v < ki.Start { return - } else if v == ki.start && v == ki.stop-1 { + } else if v == ki.Start && v == ki.Stop-1 { // i.intervals.splice(k, 1) i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) return - } else if v == ki.start { - i.intervals[k] = NewInterval(ki.start+1, ki.stop) + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) return - } else if v == ki.stop-1 { - i.intervals[k] = NewInterval(ki.start, ki.stop-1) + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) return - } else if v < ki.stop-1 { - x := NewInterval(ki.start, v) - ki.start = v + 1 + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 // i.intervals.splice(k, 0, x) i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) return @@ -228,14 +228,14 @@ func (i *IntervalSet) toCharString() string { for j := 0; j < len(i.intervals); j++ { v := i.intervals[j] - if v.stop == v.start+1 { - if v.start == TokenEOF { + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { names = append(names, "") } else { - names = append(names, ("'" + string(v.start) + "'")) + names = append(names, ("'" + string(v.Start) + "'")) } } else { - names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'") + names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") } } if len(names) > 1 { @@ -250,14 +250,14 @@ func (i *IntervalSet) toIndexString() string { names := make([]string, 0) for j := 0; j < len(i.intervals); j++ { v := i.intervals[j] - if v.stop == v.start+1 { - if v.start == TokenEOF { + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { names = append(names, "") } else { - names = append(names, strconv.Itoa(v.start)) + names = append(names, strconv.Itoa(v.Start)) } } else { - names = append(names, strconv.Itoa(v.start)+".."+strconv.Itoa(v.stop-1)) + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) } } if len(names) > 1 { @@ -270,7 +270,7 @@ func (i *IntervalSet) toIndexString() string { func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { names := make([]string, 0) for _, v := range i.intervals { - for j := v.start; j < v.stop; j++ { + for j := v.Start; j < v.Stop; j++ { names = append(names, i.elementName(literalNames, symbolicNames, j)) } } diff --git a/runtime/Go/antlr/lexer.go b/runtime/Go/antlr/lexer.go index ec0e27945..02deaf99c 100644 --- a/runtime/Go/antlr/lexer.go +++ b/runtime/Go/antlr/lexer.go @@ -21,11 +21,11 @@ type Lexer interface { Emit() Token - setChannel(int) - pushMode(int) - popMode() int - setType(int) - setMode(int) + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) } type BaseLexer struct { @@ -150,7 +150,7 @@ func (b *BaseLexer) GetSourceName() string { return b.GrammarFileName } -func (b *BaseLexer) setChannel(v int) { +func (b *BaseLexer) SetChannel(v int) { b.channel = v } @@ -250,11 +250,11 @@ func (b *BaseLexer) More() { b.thetype = LexerMore } -func (b *BaseLexer) setMode(m int) { +func (b *BaseLexer) SetMode(m int) { b.mode = m } -func (b *BaseLexer) pushMode(m int) { +func (b *BaseLexer) PushMode(m int) { if LexerATNSimulatorDebug { fmt.Println("pushMode " + strconv.Itoa(m)) } @@ -262,7 +262,7 @@ func (b *BaseLexer) pushMode(m int) { b.mode = m } -func (b *BaseLexer) popMode() int { +func (b *BaseLexer) PopMode() int { if len(b.modeStack) == 0 { panic("Empty Stack") } @@ -331,7 +331,7 @@ func (b *BaseLexer) GetType() int { return b.thetype } -func (b *BaseLexer) setType(t int) { +func (b *BaseLexer) SetType(t int) { b.thetype = t } @@ -361,7 +361,7 @@ func (b *BaseLexer) GetATN() *ATN { // Return a list of all Token objects in input char stream. // Forces load of all tokens. Does not include EOF token. // / -func (b *BaseLexer) getAllTokens() []Token { +func (b *BaseLexer) GetAllTokens() []Token { vl := b.Virt tokens := make([]Token, 0) t := vl.NextToken() diff --git a/runtime/Go/antlr/lexer_action.go b/runtime/Go/antlr/lexer_action.go index 3ca5e9ff3..20df84f94 100644 --- a/runtime/Go/antlr/lexer_action.go +++ b/runtime/Go/antlr/lexer_action.go @@ -101,7 +101,7 @@ func NewLexerTypeAction(thetype int) *LexerTypeAction { } func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.setType(l.thetype) + lexer.SetType(l.thetype) } func (l *LexerTypeAction) hash() int { @@ -145,7 +145,7 @@ func NewLexerPushModeAction(mode int) *LexerPushModeAction { //

This action is implemented by calling {@link Lexer//pushMode} with the // value provided by {@link //getMode}.

func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.pushMode(l.mode) + lexer.PushMode(l.mode) } func (l *LexerPushModeAction) hash() int { @@ -190,7 +190,7 @@ var LexerPopModeActionINSTANCE = NewLexerPopModeAction() //

This action is implemented by calling {@link Lexer//popMode}.

func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.popMode() + lexer.PopMode() } func (l *LexerPopModeAction) String() string { @@ -242,7 +242,7 @@ func NewLexerModeAction(mode int) *LexerModeAction { //

This action is implemented by calling {@link Lexer//mode} with the // value provided by {@link //getMode}.

func (l *LexerModeAction) execute(lexer Lexer) { - lexer.setMode(l.mode) + lexer.SetMode(l.mode) } func (l *LexerModeAction) hash() int { @@ -341,7 +341,7 @@ func NewLexerChannelAction(channel int) *LexerChannelAction { //

This action is implemented by calling {@link Lexer//setChannel} with the // value provided by {@link //getChannel}.

func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.setChannel(l.channel) + lexer.SetChannel(l.channel) } func (l *LexerChannelAction) hash() int { diff --git a/runtime/Go/antlr/testing_assert_test.go b/runtime/Go/antlr/testing_assert_test.go new file mode 100644 index 000000000..f3ca0d341 --- /dev/null +++ b/runtime/Go/antlr/testing_assert_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// These assert functions are borrowed from https://github.com/stretchr/testify/ (MIT License) + +package antlr + +import ( + "fmt" + "reflect" + "testing" +) + +type assert struct { + t *testing.T +} + +func assertNew(t *testing.T) *assert { + return &assert{ + t: t, + } +} + +func (a *assert) Equal(expected, actual interface{}) bool { + if !objectsAreEqual(expected, actual) { + return a.Fail(fmt.Sprintf("Not equal:\n"+ + "expected: %#v\n"+ + " actual: %#v\n", expected, actual)) + } + return true +} + +func objectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + return reflect.DeepEqual(expected, actual) +} + +func (a *assert) Nil(object interface{}) bool { + if isNil(object) { + return true + } + return a.Fail(fmt.Sprintf("Expected nil, but got: %#v", object)) +} + +func (a *assert) NotNil(object interface{}) bool { + if !isNil(object) { + return true + } + return a.Fail("Expected value not to be nil.") +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +func (a *assert) Panics(f func()) bool { + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return a.Fail(fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue)) + } + + return true +} + +// Fail reports a failure through +func (a *assert) Fail(failureMessage string) bool { + a.t.Errorf("%s", failureMessage) + return false +} + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f func()) (bool, interface{}) { + didPanic := false + var message interface{} + func() { + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + // call the target function + f() + }() + return didPanic, message +} diff --git a/runtime/Go/antlr/testing_lexer_b_test.go b/runtime/Go/antlr/testing_lexer_b_test.go new file mode 100644 index 000000000..4ab9b340d --- /dev/null +++ b/runtime/Go/antlr/testing_lexer_b_test.go @@ -0,0 +1,107 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +/* +LexerB is a lexer for testing purpose. + +This file is generated from this grammer. + +lexer grammar LexerB; + +ID : 'a'..'z'+; +INT : '0'..'9'+; +SEMI : ';'; +ASSIGN : '='; +PLUS : '+'; +MULT : '*'; +WS : ' '+; +*/ + +var lexerB_serializedLexerAtn = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 9, 40, 8, + 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, + 7, 4, 8, 9, 8, 3, 2, 6, 2, 19, 10, 2, 13, 2, 14, 2, 20, 3, 3, 6, 3, 24, + 10, 3, 13, 3, 14, 3, 25, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, + 3, 8, 6, 8, 37, 10, 8, 13, 8, 14, 8, 38, 2, 2, 9, 3, 3, 5, 4, 7, 5, 9, + 6, 11, 7, 13, 8, 15, 9, 3, 2, 2, 2, 42, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, + 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, + 2, 2, 2, 15, 3, 2, 2, 2, 3, 18, 3, 2, 2, 2, 5, 23, 3, 2, 2, 2, 7, 27, 3, + 2, 2, 2, 9, 29, 3, 2, 2, 2, 11, 31, 3, 2, 2, 2, 13, 33, 3, 2, 2, 2, 15, + 36, 3, 2, 2, 2, 17, 19, 4, 99, 124, 2, 18, 17, 3, 2, 2, 2, 19, 20, 3, 2, + 2, 2, 20, 18, 3, 2, 2, 2, 20, 21, 3, 2, 2, 2, 21, 4, 3, 2, 2, 2, 22, 24, + 4, 50, 59, 2, 23, 22, 3, 2, 2, 2, 24, 25, 3, 2, 2, 2, 25, 23, 3, 2, 2, + 2, 25, 26, 3, 2, 2, 2, 26, 6, 3, 2, 2, 2, 27, 28, 7, 61, 2, 2, 28, 8, 3, + 2, 2, 2, 29, 30, 7, 63, 2, 2, 30, 10, 3, 2, 2, 2, 31, 32, 7, 45, 2, 2, + 32, 12, 3, 2, 2, 2, 33, 34, 7, 44, 2, 2, 34, 14, 3, 2, 2, 2, 35, 37, 7, + 34, 2, 2, 36, 35, 3, 2, 2, 2, 37, 38, 3, 2, 2, 2, 38, 36, 3, 2, 2, 2, 38, + 39, 3, 2, 2, 2, 39, 16, 3, 2, 2, 2, 6, 2, 20, 25, 38, 2, +} + +var lexerB_lexerDeserializer = NewATNDeserializer(nil) +var lexerB_lexerAtn = lexerB_lexerDeserializer.DeserializeFromUInt16(lexerB_serializedLexerAtn) + +var lexerB_lexerChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", +} + +var lexerB_lexerModeNames = []string{ + "DEFAULT_MODE", +} + +var lexerB_lexerLiteralNames = []string{ + "", "", "", "';'", "'='", "'+'", "'*'", +} + +var lexerB_lexerSymbolicNames = []string{ + "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", +} + +var lexerB_lexerRuleNames = []string{ + "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", +} + +type LexerB struct { + *BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexerB_lexerDecisionToDFA = make([]*DFA, len(lexerB_lexerAtn.DecisionToState)) + +func init() { + for index, ds := range lexerB_lexerAtn.DecisionToState { + lexerB_lexerDecisionToDFA[index] = NewDFA(ds, index) + } +} + +func NewLexerB(input CharStream) *LexerB { + l := new(LexerB) + + l.BaseLexer = NewBaseLexer(input) + l.Interpreter = NewLexerATNSimulator(l, lexerB_lexerAtn, lexerB_lexerDecisionToDFA, NewPredictionContextCache()) + + l.channelNames = lexerB_lexerChannelNames + l.modeNames = lexerB_lexerModeNames + l.RuleNames = lexerB_lexerRuleNames + l.LiteralNames = lexerB_lexerLiteralNames + l.SymbolicNames = lexerB_lexerSymbolicNames + l.GrammarFileName = "LexerB.g4" + // TODO: l.EOF = TokenEOF + + return l +} + +// LexerB tokens. +const ( + LexerBID = 1 + LexerBINT = 2 + LexerBSEMI = 3 + LexerBASSIGN = 4 + LexerBPLUS = 5 + LexerBMULT = 6 + LexerBWS = 7 +) diff --git a/runtime/Go/antlr/testing_util_test.go b/runtime/Go/antlr/testing_util_test.go new file mode 100644 index 000000000..20428831b --- /dev/null +++ b/runtime/Go/antlr/testing_util_test.go @@ -0,0 +1,30 @@ +package antlr + +import ( + "fmt" + "strings" +) + +// newTestCommonToken create common token with tokentype, text and channel +// notice: test purpose only +func newTestCommonToken(tokenType int, text string, channel int) *CommonToken { + t := new(CommonToken) + t.BaseToken = new(BaseToken) + t.tokenType = tokenType + t.channel = channel + t.text = text + t.line = 0 + t.column = -1 + return t +} + +// tokensToString returnes []Tokens string +// notice: test purpose only +func tokensToString(tokens []Token) string { + buf := make([]string, len(tokens)) + for i, token := range tokens { + buf[i] = fmt.Sprintf("%v", token) + } + + return "[" + strings.Join(buf, ", ") + "]" +} diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go new file mode 100644 index 000000000..96a03f02a --- /dev/null +++ b/runtime/Go/antlr/tokenstream_rewriter.go @@ -0,0 +1,649 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. +package antlr + +import ( +"bytes" +"fmt" +) + + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ + + +const( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation)GetInstructionIndex() int{ + return op.instruction_index +} + +func (op *BaseRewriteOperation)GetIndex() int{ + return op.index +} + +func (op *BaseRewriteOperation)GetText() string{ + return op.text +} + +func (op *BaseRewriteOperation)GetOpName() string{ + return op.op_name +} + +func (op *BaseRewriteOperation)GetTokens() TokenStream{ + return op.tokens +} + +func (op *BaseRewriteOperation)SetInstructionIndex(val int){ + op.instruction_index = val +} + +func (op *BaseRewriteOperation)SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation)SetText(val string){ + op.text = val +} + +func (op *BaseRewriteOperation)SetOpName(val string){ + op.op_name = val +} + +func (op *BaseRewriteOperation)SetTokens(val TokenStream) { + op.tokens = val +} + + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ + return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index, + text:text, + op_name:"InsertBeforeOp", + tokens:stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ + return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index+1, + text:text, + tokens:stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct{ + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation:BaseRewriteOperation{ + index:from, + text:text, + op_name:"ReplaceOp", + tokens:stream, + }, + LastIndex:to, + } +} + +func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ + if op.text != ""{ + buffer.WriteString(op.text) + } + return op.LastIndex +1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ + is, ok := tsr.programs[program_name] + if ok{ + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ + tsr.Rollback(Default_Program_Name, instruction_index) +} +//Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ + tsr.Replace(program_name, from, to, "" ) +} + +func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ + tsr.DeleteDefault(index,index) +} + +func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok{ + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok{ + is = tsr.InitializeProgram(name) + } + return is +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetTextDefault() string{ + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start,0) + if rewrites == nil || len(rewrites) == 0{ + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +// +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ + // WALK REPLACES + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil{continue} + rop, ok := op.(*ReplaceOp) + if !ok{continue} + // Wipe prior inserts within range + for j:=0; j rop.index && iop.index <=rop.LastIndex{ + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint{ + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + }else if !disjoint{ + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i:=0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil{continue} + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok{continue} + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ + panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil {continue} + if _, ok := m[op.GetIndex()]; ok{ + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + + +/* + Quick fixing Go lack of overloads + */ + +func max(a,b int)int{ + if a>b{ + return a + }else { + return b + } +} +func min(a,b int)int{ + if aaa", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1,"") + }), + NewLexerTest("aa", "

a

a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "

") + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "

") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1,"") + }), + NewLexerTest("ab", "

a

!b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter){ + r.InsertBeforeDefault(0, "

") + r.InsertBeforeDefault(0, "") + r.InsertBeforeDefault(0, "

") + r.InsertAfterDefault(0, "

") + r.InsertAfterDefault(0, "
") + r.InsertAfterDefault(0, "
") + r.InsertBeforeDefault(1, "!") + }), + } + + + for _,c := range tests{ + t.Run(c.description,func(t *testing.T) { + rewriter := prepare_rewriter(c.input) + c.ops(rewriter) + if len(c.expected_exception)>0{ + panic_tester(t, c.expected_exception, rewriter) + }else{ + result := rewriter.GetTextDefault() + if result!=c.expected{ + t.Errorf("Expected:%s | Result: %s", c.expected, result) + } + } + } ) + } +} + + +// Suppress unused import error +var _ = fmt.Printf +var _ = unicode.IsLetter + +var serializedLexerAtn = []uint16{ + 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 5, 15, 8, + 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, + 4, 2, 2, 5, 3, 3, 5, 4, 7, 5, 3, 2, 2, 2, 14, 2, 3, 3, 2, 2, 2, 2, 5, 3, + 2, 2, 2, 2, 7, 3, 2, 2, 2, 3, 9, 3, 2, 2, 2, 5, 11, 3, 2, 2, 2, 7, 13, + 3, 2, 2, 2, 9, 10, 7, 99, 2, 2, 10, 4, 3, 2, 2, 2, 11, 12, 7, 100, 2, 2, + 12, 6, 3, 2, 2, 2, 13, 14, 7, 101, 2, 2, 14, 8, 3, 2, 2, 2, 3, 2, 2, +} + +var lexerDeserializer = NewATNDeserializer(nil) +var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn) + +var lexerChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", +} + +var lexerModeNames = []string{ + "DEFAULT_MODE", +} + +var lexerLiteralNames = []string{ + "", "'a'", "'b'", "'c'", +} + +var lexerSymbolicNames = []string{ + "", "A", "B", "C", +} + +var lexerRuleNames = []string{ + "A", "B", "C", +} + +type LexerA struct { + *BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexerDecisionToDFA = make([]*DFA, len(lexerAtn.DecisionToState)) + +func init() { + for index, ds := range lexerAtn.DecisionToState { + lexerDecisionToDFA[index] = NewDFA(ds, index) + } +} + +func NewLexerA(input CharStream) *LexerA { + + l := new(LexerA) + + l.BaseLexer = NewBaseLexer(input) + l.Interpreter = NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, NewPredictionContextCache()) + + l.channelNames = lexerChannelNames + l.modeNames = lexerModeNames + l.RuleNames = lexerRuleNames + l.LiteralNames = lexerLiteralNames + l.SymbolicNames = lexerSymbolicNames + l.GrammarFileName = "LexerA.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// LexerA tokens. +const ( + LexerAA = 1 + LexerAB = 2 + LexerAC = 3 +) + diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml index c42015629..3eb60b2df 100644 --- a/runtime/Java/pom.xml +++ b/runtime/Java/pom.xml @@ -27,6 +27,7 @@ org.apache.maven.plugins maven-source-plugin + 3.0.1 diff --git a/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java b/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java index 491ef6918..107faa7b1 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java +++ b/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java @@ -151,8 +151,8 @@ public abstract class CodePointCharStream implements CharStream { /** Return the UTF-16 encoded string for the given interval */ @Override public String getText(Interval interval) { - int startIdx = Math.min(interval.a, size - 1); - int len = Math.min(interval.b - interval.a + 1, size); + int startIdx = Math.min(interval.a, size); + int len = Math.min(interval.b - interval.a + 1, size - startIdx); // We know the maximum code point in byteArray is U+00FF, // so we can treat this as if it were ISO-8859-1, aka Latin-1, diff --git a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java index 819538539..02b5ee510 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java +++ b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java @@ -36,6 +36,21 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { protected IntervalSet lastErrorStates; + /** + * This field is used to propagate information about the lookahead following + * the previous match. Since prediction prefers completing the current rule + * to error recovery efforts, error reporting may occur later than the + * original point where it was discoverable. The original context is used to + * compute the true expected sets as though the reporting occurred as early + * as possible. + */ + protected ParserRuleContext nextTokensContext; + + /** + * @see #nextTokensContext + */ + protected int nextTokensState; + /** * {@inheritDoc} * @@ -225,7 +240,20 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { // try cheaper subset first; might get lucky. seems to shave a wee bit off IntervalSet nextTokens = recognizer.getATN().nextTokens(s); - if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) { + if (nextTokens.contains(la)) { + // We are sure the token matches + nextTokensContext = null; + nextTokensState = ATNState.INVALID_STATE_NUMBER; + return; + } + + if (nextTokens.contains(Token.EPSILON)) { + if (nextTokensContext == null) { + // It's possible the next token won't match; information tracked + // by sync is restricted for performance. + nextTokensContext = recognizer.getContext(); + nextTokensState = recognizer.getState(); + } return; } @@ -450,7 +478,14 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy { } // even that didn't work; must throw the exception - throw new InputMismatchException(recognizer); + InputMismatchException e; + if (nextTokensContext == null) { + e = new InputMismatchException(recognizer); + } else { + e = new InputMismatchException(recognizer, nextTokensState, nextTokensContext); + } + + throw e; } /** diff --git a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java index fc4261558..08ef67c58 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java +++ b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java @@ -13,4 +13,10 @@ public class InputMismatchException extends RecognitionException { super(recognizer, recognizer.getInputStream(), recognizer._ctx); this.setOffendingToken(recognizer.getCurrentToken()); } + + public InputMismatchException(Parser recognizer, int state, ParserRuleContext ctx) { + super(recognizer, recognizer.getInputStream(), ctx); + this.setOffendingState(state); + this.setOffendingToken(recognizer.getCurrentToken()); + } } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java index 46840ab7c..76524ebd8 100755 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java @@ -270,7 +270,7 @@ public class ParserATNSimulator extends ATNSimulator { public static final boolean retry_debug = false; /** Just in case this optimization is bad, add an ENV variable to turn it off */ - public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(System.getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT")); + public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(getSafeEnv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT")); protected final Parser parser; @@ -1541,11 +1541,6 @@ public class ParserATNSimulator extends ATNSimulator { ATNConfig c = getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon); if ( c!=null ) { - if (!t.isEpsilon() && !closureBusy.add(c)) { - // avoid infinite recursion for EOF* and EOF+ - continue; - } - int newDepth = depth; if ( config.state instanceof RuleStopState) { assert !fullCtx; @@ -1555,11 +1550,6 @@ public class ParserATNSimulator extends ATNSimulator { // come in handy and we avoid evaluating context dependent // preds if this is > 0. - if (!closureBusy.add(c)) { - // avoid infinite recursion for right-recursive rules - continue; - } - if (_dfa != null && _dfa.isPrecedenceDfa()) { int outermostPrecedenceReturn = ((EpsilonTransition)t).outermostPrecedenceReturn(); if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) { @@ -1568,15 +1558,28 @@ public class ParserATNSimulator extends ATNSimulator { } c.reachesIntoOuterContext++; + + if (!closureBusy.add(c)) { + // avoid infinite recursion for right-recursive rules + continue; + } + configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method assert newDepth > Integer.MIN_VALUE; newDepth--; if ( debug ) System.out.println("dips into outer ctx: "+c); } - else if (t instanceof RuleTransition) { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if (newDepth >= 0) { - newDepth++; + else { + if (!t.isEpsilon() && !closureBusy.add(c)) { + // avoid infinite recursion for EOF* and EOF+ + continue; + } + + if (t instanceof RuleTransition) { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if (newDepth >= 0) { + newDepth++; + } } } @@ -2178,4 +2181,14 @@ public class ParserATNSimulator extends ATNSimulator { public Parser getParser() { return parser; } + + public static String getSafeEnv(String envName) { + try { + return System.getenv(envName); + } + catch(SecurityException e) { + // use the default value + } + return null; + } } diff --git a/runtime/JavaScript/src/antlr4/Utils.js b/runtime/JavaScript/src/antlr4/Utils.js index d7627be60..2cb939a66 100644 --- a/runtime/JavaScript/src/antlr4/Utils.js +++ b/runtime/JavaScript/src/antlr4/Utils.js @@ -401,11 +401,11 @@ DoubleDict.prototype.set = function (a, b, o) { function escapeWhitespace(s, escapeSpaces) { - s = s.replace("\t", "\\t"); - s = s.replace("\n", "\\n"); - s = s.replace("\r", "\\r"); + s = s.replace(/\t/g, "\\t") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r"); if (escapeSpaces) { - s = s.replace(" ", "\u00B7"); + s = s.replace(/ /g, "\u00B7"); } return s; } @@ -443,4 +443,4 @@ exports.hashStuff = hashStuff; exports.escapeWhitespace = escapeWhitespace; exports.arrayToString = arrayToString; exports.titleCase = titleCase; -exports.equalArrays = equalArrays; \ No newline at end of file +exports.equalArrays = equalArrays; diff --git a/runtime/Python2/src/antlr4/Parser.py b/runtime/Python2/src/antlr4/Parser.py index d88f77918..69abe739b 100644 --- a/runtime/Python2/src/antlr4/Parser.py +++ b/runtime/Python2/src/antlr4/Parser.py @@ -218,6 +218,13 @@ class Parser (Recognizer): self._ctx.exitRule(listener) listener.exitEveryRule(self._ctx) + # Gets the number of syntax errors reported during parsing. This value is + # incremented each time {@link #notifyErrorListeners} is called. + # + # @see #notifyErrorListeners + # + def getNumberOfSyntaxErrors(self): + return self._syntaxErrors def getTokenFactory(self): return self._input.tokenSource._factory diff --git a/runtime/Python2/src/antlr4/tree/RuleTagToken.py b/runtime/Python2/src/antlr4/tree/RuleTagToken.py index 2043c1625..d63a3a53b 100644 --- a/runtime/Python2/src/antlr4/tree/RuleTagToken.py +++ b/runtime/Python2/src/antlr4/tree/RuleTagToken.py @@ -36,14 +36,13 @@ class RuleTagToken(Token): self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream self.line = 0 # line=1..n of the 1st character self.column = -1 # beginning of the line at which it occurs, 0..n-1 - self.label = label + self.label = unicode(label) self._text = self.getText() # text of the token. - - self.ruleName = ruleName + self.ruleName = unicode(ruleName) def getText(self): if self.label is None: - return "<" + self.ruleName + ">" + return u"<" + self.ruleName + u">" else: - return "<" + self.label + ":" + self.ruleName + ">" + return u"<" + self.label + ":" + self.ruleName + u">" diff --git a/runtime/Python2/src/antlr4/tree/TokenTagToken.py b/runtime/Python2/src/antlr4/tree/TokenTagToken.py index 2ffc79f6f..dba41f785 100644 --- a/runtime/Python2/src/antlr4/tree/TokenTagToken.py +++ b/runtime/Python2/src/antlr4/tree/TokenTagToken.py @@ -24,8 +24,8 @@ class TokenTagToken(CommonToken): # def __init__(self, tokenName, type, label=None): super(TokenTagToken, self).__init__(type=type) - self.tokenName = tokenName - self.label = label + self.tokenName = unicode(tokenName) + self.label = unicode(label) self._text = self.getText() # @@ -36,9 +36,9 @@ class TokenTagToken(CommonToken): # def getText(self): if self.label is None: - return "<" + self.tokenName + ">" + return u"<" + self.tokenName + u">" else: - return "<" + self.label + ":" + self.tokenName + ">" + return u"<" + self.label + u":" + self.tokenName + u">" #

The implementation for {@link TokenTagToken} returns a string of the form # {@code tokenName:type}.

diff --git a/runtime/Python2/src/antlr4/tree/Tree.py b/runtime/Python2/src/antlr4/tree/Tree.py index 26e959612..14b5f29ec 100644 --- a/runtime/Python2/src/antlr4/tree/Tree.py +++ b/runtime/Python2/src/antlr4/tree/Tree.py @@ -108,13 +108,13 @@ class TerminalNodeImpl(TerminalNode): return visitor.visitTerminal(self) def getText(self): - return self.symbol.text + return unicode(self.symbol.text) def __unicode__(self): if self.symbol.type == Token.EOF: - return "" + return u"" else: - return self.symbol.text + return unicode(self.symbol.text) # Represents a token that was consumed during resynchronization # rather than during a valid match operation. For example, diff --git a/runtime/Python3/src/antlr4/Parser.py b/runtime/Python3/src/antlr4/Parser.py index 03f10a438..c461bbdc0 100644 --- a/runtime/Python3/src/antlr4/Parser.py +++ b/runtime/Python3/src/antlr4/Parser.py @@ -227,6 +227,14 @@ class Parser (Recognizer): listener.exitEveryRule(self._ctx) + # Gets the number of syntax errors reported during parsing. This value is + # incremented each time {@link #notifyErrorListeners} is called. + # + # @see #notifyErrorListeners + # + def getNumberOfSyntaxErrors(self): + return self._syntaxErrors + def getTokenFactory(self): return self._input.tokenSource._factory diff --git a/runtime/Python3/src/antlr4/__init__.py b/runtime/Python3/src/antlr4/__init__.py index 4eac6c579..37c834202 100644 --- a/runtime/Python3/src/antlr4/__init__.py +++ b/runtime/Python3/src/antlr4/__init__.py @@ -12,7 +12,7 @@ from antlr4.atn.LexerATNSimulator import LexerATNSimulator from antlr4.atn.ParserATNSimulator import ParserATNSimulator from antlr4.atn.PredictionMode import PredictionMode from antlr4.PredictionContext import PredictionContextCache -from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.ParserRuleContext import RuleContext, ParserRuleContext from antlr4.tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode from antlr4.error.Errors import RecognitionException, IllegalStateException, NoViableAltException from antlr4.error.ErrorStrategy import BailErrorStrategy diff --git a/runtime/Swift/.gitignore b/runtime/Swift/.gitignore index c54511205..e4a84b226 100644 --- a/runtime/Swift/.gitignore +++ b/runtime/Swift/.gitignore @@ -1 +1,4 @@ +.build/ +Antlr4.xcodeproj/ +Tests/Antlr4Tests/gen/ xcuserdata/ diff --git a/runtime/Swift/Package.swift b/runtime/Swift/Package.swift index 5c2e28b12..0d72a47dd 100644 --- a/runtime/Swift/Package.swift +++ b/runtime/Swift/Package.swift @@ -1,3 +1,4 @@ +// swift-tools-version:4.0 // Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -5,15 +6,19 @@ import PackageDescription let package = Package( - name: "Antlr4" -) - -products.append( - Product( - name: "Antlr4", - type: .Library(.Dynamic), - modules: [ - "Antlr4" - ] - ) + name: "Antlr4", + products: [ + .library( + name: "Antlr4", + type: .dynamic, + targets: ["Antlr4"]), + ], + targets: [ + .target( + name: "Antlr4", + dependencies: []), + .testTarget( + name: "Antlr4Tests", + dependencies: ["Antlr4"]), + ] ) diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift index 942986b5e..486ae0f2c 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift @@ -1,20 +1,23 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. /// How to emit recognition errors. +/// public protocol ANTLRErrorListener: class { + /// /// Upon syntax error, notify any interested parties. This is not how to - /// recover from errors or compute error messages. {@link org.antlr.v4.runtime.ANTLRErrorStrategy} + /// recover from errors or compute error messages. _org.antlr.v4.runtime.ANTLRErrorStrategy_ /// specifies how to recover from syntax errors and how to compute error /// messages. This listener's job is simply to emit a computed message, /// though it has enough information to create its own message in many cases. - /// - ///

The {@link org.antlr.v4.runtime.RecognitionException} is non-null for all syntax errors except + /// + /// The _RecognitionException_ is non-null for all syntax errors except /// when we discover mismatched token errors that we can recover from /// in-line, without returning from the surrounding rule (via the single - /// token insertion and deletion mechanism).

- /// + /// token insertion and deletion mechanism). + /// /// - parameter recognizer: /// What parser got the error. From this /// object, you can access the context as well @@ -22,7 +25,7 @@ public protocol ANTLRErrorListener: class { /// - parameter offendingSymbol: /// The offending token in the input token /// stream, unless recognizer is a lexer (then it's null). If - /// no viable alternative error, {@code e} has token at which we + /// no viable alternative error, `e` has token at which we /// started production for the decision. /// - parameter line: /// The line number in the input where the error occurred. @@ -35,116 +38,122 @@ public protocol ANTLRErrorListener: class { /// the reporting of an error. It is null in the case where /// the parser was able to recover in line without exiting the /// surrounding rule. - func syntaxError(_ recognizer: Recognizer, - _ offendingSymbol: AnyObject?, - _ line: Int, - _ charPositionInLine: Int, - _ msg: String, - _ e: AnyObject?// RecognitionException? + /// + func syntaxError(_ recognizer: Recognizer, + _ offendingSymbol: AnyObject?, + _ line: Int, + _ charPositionInLine: Int, + _ msg: String, + _ e: AnyObject? ) + /// /// This method is called by the parser when a full-context prediction /// results in an ambiguity. - /// - ///

Each full-context prediction which does not result in a syntax error - /// will call either {@link #reportContextSensitivity} or - /// {@link #reportAmbiguity}.

- /// - ///

When {@code ambigAlts} is not null, it contains the set of potentially + /// + /// Each full-context prediction which does not result in a syntax error + /// will call either _#reportContextSensitivity_ or + /// _#reportAmbiguity_. + /// + /// When `ambigAlts` is not null, it contains the set of potentially /// viable alternatives identified by the prediction algorithm. When - /// {@code ambigAlts} is null, use {@link org.antlr.v4.runtime.atn.ATNConfigSet#getAlts} to obtain the - /// represented alternatives from the {@code configs} argument.

- /// - ///

When {@code exact} is {@code true}, all of the potentially + /// `ambigAlts` is null, use _org.antlr.v4.runtime.atn.ATNConfigSet#getAlts_ to obtain the + /// represented alternatives from the `configs` argument. + /// + /// When `exact` is `true`, __all__ of the potentially /// viable alternatives are truly viable, i.e. this is reporting an exact - /// ambiguity. When {@code exact} is {@code false}, at least two of + /// ambiguity. When `exact` is `false`, __at least two__ of /// the potentially viable alternatives are viable for the current input, but /// the prediction algorithm terminated as soon as it determined that at - /// least the minimum potentially viable alternative is truly - /// viable.

- /// - ///

When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction + /// least the __minimum__ potentially viable alternative is truly + /// viable. + /// + /// When the _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ prediction /// mode is used, the parser is required to identify exact ambiguities so - /// {@code exact} will always be {@code true}.

- /// - ///

This method is not used by lexers.

- /// + /// `exact` will always be `true`. + /// + /// This method is not used by lexers. + /// /// - parameter recognizer: the parser instance /// - parameter dfa: the DFA for the current decision /// - parameter startIndex: the input index where the decision started /// - parameter stopIndex: the input input where the ambiguity was identified - /// - parameter exact: {@code true} if the ambiguity is exactly known, otherwise - /// {@code false}. This is always {@code true} when - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. - /// - parameter ambigAlts: the potentially ambiguous alternatives, or {@code null} + /// - parameter exact: `true` if the ambiguity is exactly known, otherwise + /// `false`. This is always `true` when + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ is used. + /// - parameter ambigAlts: the potentially ambiguous alternatives, or `null` /// to indicate that the potentially ambiguous alternatives are the complete - /// set of represented alternatives in {@code configs} + /// set of represented alternatives in `configs` /// - parameter configs: the ATN configuration set where the ambiguity was /// identified + /// func reportAmbiguity(_ recognizer: Parser, _ dfa: DFA, _ startIndex: Int, _ stopIndex: Int, _ exact: Bool, _ ambigAlts: BitSet, - _ configs: ATNConfigSet) throws + _ configs: ATNConfigSet) + /// /// This method is called when an SLL conflict occurs and the parser is about /// to use the full context information to make an LL decision. - /// - ///

If one or more configurations in {@code configs} contains a semantic + /// + /// If one or more configurations in `configs` contains a semantic /// predicate, the predicates are evaluated before this method is called. The /// subset of alternatives which are still viable after predicates are - /// evaluated is reported in {@code conflictingAlts}.

- /// - ///

This method is not used by lexers.

- /// + /// evaluated is reported in `conflictingAlts`. + /// + /// This method is not used by lexers. + /// /// - parameter recognizer: the parser instance /// - parameter dfa: the DFA for the current decision /// - parameter startIndex: the input index where the decision started /// - parameter stopIndex: the input index where the SLL conflict occurred /// - parameter conflictingAlts: The specific conflicting alternatives. If this is - /// {@code null}, the conflicting alternatives are all alternatives - /// represented in {@code configs}. At the moment, conflictingAlts is non-null + /// `null`, the conflicting alternatives are all alternatives + /// represented in `configs`. At the moment, conflictingAlts is non-null /// (for the reference implementation, but Sam's optimized version can see this /// as null). /// - parameter configs: the ATN configuration set where the SLL conflict was /// detected + /// func reportAttemptingFullContext(_ recognizer: Parser, _ dfa: DFA, _ startIndex: Int, _ stopIndex: Int, _ conflictingAlts: BitSet?, - _ configs: ATNConfigSet) throws + _ configs: ATNConfigSet) + /// /// This method is called by the parser when a full-context prediction has a /// unique result. - /// - ///

Each full-context prediction which does not result in a syntax error - /// will call either {@link #reportContextSensitivity} or - /// {@link #reportAmbiguity}.

- /// - ///

For prediction implementations that only evaluate full-context + /// + /// Each full-context prediction which does not result in a syntax error + /// will call either _#reportContextSensitivity_ or + /// _#reportAmbiguity_. + /// + /// For prediction implementations that only evaluate full-context /// predictions when an SLL conflict is found (including the default - /// {@link org.antlr.v4.runtime.atn.ParserATNSimulator} implementation), this method reports cases + /// _org.antlr.v4.runtime.atn.ParserATNSimulator_ implementation), this method reports cases /// where SLL conflicts were resolved to unique full-context predictions, /// i.e. the decision was context-sensitive. This report does not necessarily /// indicate a problem, and it may appear even in completely unambiguous - /// grammars.

- /// - ///

{@code configs} may have more than one represented alternative if the + /// grammars. + /// + /// `configs` may have more than one represented alternative if the /// full-context prediction algorithm does not evaluate predicates before /// beginning the full-context prediction. In all cases, the final prediction - /// is passed as the {@code prediction} argument.

- /// - ///

Note that the definition of "context sensitivity" in this method - /// differs from the concept in {@link org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities}. + /// is passed as the `prediction` argument. + /// + /// Note that the definition of "context sensitivity" in this method + /// differs from the concept in _org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities_. /// This method reports all instances where an SLL conflict occurred but LL /// parsing produced a unique result, whether or not that unique result - /// matches the minimum alternative in the SLL conflicting set.

- /// - ///

This method is not used by lexers.

- /// + /// matches the minimum alternative in the SLL conflicting set. + /// + /// This method is not used by lexers. + /// /// - parameter recognizer: the parser instance /// - parameter dfa: the DFA for the current decision /// - parameter startIndex: the input index where the decision started @@ -153,10 +162,11 @@ public protocol ANTLRErrorListener: class { /// - parameter prediction: the unambiguous result of the full-context prediction /// - parameter configs: the ATN configuration set where the unambiguous prediction /// was determined + /// func reportContextSensitivity(_ recognizer: Parser, _ dfa: DFA, _ startIndex: Int, _ stopIndex: Int, _ prediction: Int, - _ configs: ATNConfigSet) throws + _ configs: ATNConfigSet) } diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift index 541a2a3b4..7ca228ff8 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift @@ -1,99 +1,119 @@ +/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// The interface for defining strategies to deal with syntax errors encountered -/// during a parse by ANTLR-generated parsers. We distinguish between three -/// different kinds of errors: -/// -///
    -///
  • The parser could not figure out which path to take in the ATN (none of -/// the available alternatives could possibly match)
  • -///
  • The current input does not match what we were looking for
  • -///
  • A predicate evaluated to false
  • -///
-/// -/// Implementations of this interface report syntax errors by calling -/// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}. -/// -///

TODO: what to do about lexers

+/// +/// + +/// +/// +/// The interface for defining strategies to deal with syntax errors +/// encountered during a parse by ANTLR-generated parsers. We distinguish between three +/// different kinds of errors: +/// +/// * The parser could not figure out which path to take in the ATN (none of +/// the available alternatives could possibly match) +/// * The current input does not match what we were looking for +/// * A predicate evaluated to false +/// +/// Implementations of this interface report syntax errors by calling +/// _org.antlr.v4.runtime.Parser#notifyErrorListeners_. +/// +/// TODO: what to do about lexers +/// public protocol ANTLRErrorStrategy { - /// Reset the error handler state for the specified {@code recognizer}. + /// + /// Reset the error handler state for the specified `recognizer`. /// - parameter recognizer: the parser instance + /// func reset(_ recognizer: Parser) + /// /// This method is called when an unexpected symbol is encountered during an - /// inline match operation, such as {@link org.antlr.v4.runtime.Parser#match}. If the error + /// inline match operation, such as _org.antlr.v4.runtime.Parser#match_. If the error /// strategy successfully recovers from the match failure, this method - /// returns the {@link org.antlr.v4.runtime.Token} instance which should be treated as the + /// returns the _org.antlr.v4.runtime.Token_ instance which should be treated as the /// successful result of the match. - /// - ///

This method handles the consumption of any tokens - the caller should - /// not call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.

- /// - ///

Note that the calling code will not report an error if this method + /// + /// This method handles the consumption of any tokens - the caller should + /// __not__ call _org.antlr.v4.runtime.Parser#consume_ after a successful recovery. + /// + /// Note that the calling code will not report an error if this method /// returns successfully. The error strategy implementation is responsible - /// for calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} as appropriate.

- /// + /// for calling _org.antlr.v4.runtime.Parser#notifyErrorListeners_ as appropriate. + /// /// - parameter recognizer: the parser instance - /// - org.antlr.v4.runtime.RecognitionException if the error strategy was not able to + /// - throws: _RecognitionException_ if the error strategy was not able to /// recover from the unexpected input symbol + /// @discardableResult - func recoverInline(_ recognizer: Parser) throws -> Token // RecognitionException; + func recoverInline(_ recognizer: Parser) throws -> Token - /// This method is called to recover from exception {@code e}. This method is - /// called after {@link #reportError} by the default exception handler + /// + /// This method is called to recover from exception `e`. This method is + /// called after _#reportError_ by the default exception handler /// generated for a rule method. - /// + /// /// - seealso: #reportError - /// + /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception to recover from - /// - org.antlr.v4.runtime.RecognitionException if the error strategy could not recover from + /// - throws: _RecognitionException_ if the error strategy could not recover from /// the recognition exception - func recover(_ recognizer: Parser, _ e: AnyObject) throws // RecognitionException; + /// + func recover(_ recognizer: Parser, _ e: RecognitionException) throws + /// /// This method provides the error handler with an opportunity to handle /// syntactic or semantic errors in the input stream before they result in a - /// {@link org.antlr.v4.runtime.RecognitionException}. - /// - ///

The generated code currently contains calls to {@link #sync} after - /// entering the decision state of a closure block ({@code (...)*} or - /// {@code (...)+}).

- /// - ///

For an implementation based on Jim Idle's "magic sync" mechanism, see - /// {@link org.antlr.v4.runtime.DefaultErrorStrategy#sync}.

- /// + /// _org.antlr.v4.runtime.RecognitionException_. + /// + /// The generated code currently contains calls to _#sync_ after + /// entering the decision state of a closure block (`(...)*` or + /// `(...)+`). + /// + /// For an implementation based on Jim Idle's "magic sync" mechanism, see + /// _org.antlr.v4.runtime.DefaultErrorStrategy#sync_. + /// /// - seealso: org.antlr.v4.runtime.DefaultErrorStrategy#sync - /// + /// /// - parameter recognizer: the parser instance - /// - org.antlr.v4.runtime.RecognitionException if an error is detected by the error + /// - throws: _RecognitionException_ if an error is detected by the error /// strategy but cannot be automatically recovered at the current state in /// the parsing process - func sync(_ recognizer: Parser) throws // RecognitionException; + /// + func sync(_ recognizer: Parser) throws + /// /// Tests whether or not recognizer} is in the process of recovering - /// from an error. In error recovery mode, {@link org.antlr.v4.runtime.Parser#consume} adds + /// from an error. In error recovery mode, _org.antlr.v4.runtime.Parser#consume_ adds /// symbols to the parse tree by calling - /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then - /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of - /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}. - /// + /// _Parser#createErrorNode(ParserRuleContext, Token)_ then + /// _ParserRuleContext#addErrorNode(ErrorNode)_ instead of + /// _Parser#createTerminalNode(ParserRuleContext, Token)_. + /// /// - parameter recognizer: the parser instance - /// - returns: {@code true} if the parser is currently recovering from a parse - /// error, otherwise {@code false} + /// - returns: `true` if the parser is currently recovering from a parse + /// error, otherwise `false` + /// func inErrorRecoveryMode(_ recognizer: Parser) -> Bool + /// /// This method is called by when the parser successfully matches an input /// symbol. - /// + /// /// - parameter recognizer: the parser instance + /// func reportMatch(_ recognizer: Parser) - /// Report any kind of {@link org.antlr.v4.runtime.RecognitionException}. This method is called by + /// + /// Report any kind of _org.antlr.v4.runtime.RecognitionException_. This method is called by /// the default exception handler generated for a rule method. - /// + /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception to report - func reportError(_ recognizer: Parser, _ e: AnyObject) + /// + func reportError(_ recognizer: Parser, _ e: RecognitionException) } diff --git a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift index 053b6348d..9ed7ac9ef 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift @@ -1,8 +1,10 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// This is an {@link org.antlr.v4.runtime.ANTLRInputStream} that is loaded from a file all at once +/// This is an _org.antlr.v4.runtime.ANTLRInputStream_ that is loaded from a file all at once /// when you construct the object. +/// import Foundation @@ -10,7 +12,6 @@ public class ANTLRFileStream: ANTLRInputStream { internal var fileName: String public convenience override init(_ fileName: String) { - // throws; IOException self.init(fileName, nil) } diff --git a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift index 3b73981fd..17bbd8096 100644 --- a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift +++ b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift @@ -1,26 +1,36 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// Vacuum all input from a {@link java.io.Reader}/{@link java.io.InputStream} and then treat it -/// like a {@code char[]} buffer. Can also pass in a {@link String} or -/// {@code char[]} to use. -/// -///

If you need encoding, pass in stream/reader with correct encoding.

+/// Vacuum all input from a _java.io.Reader_/_java.io.InputStream_ and then treat it +/// like a `char[]` buffer. Can also pass in a _String_ or +/// `char[]` to use. +/// +/// If you need encoding, pass in stream/reader with correct encoding. +/// public class ANTLRInputStream: CharStream { public static let READ_BUFFER_SIZE: Int = 1024 public static let INITIAL_BUFFER_SIZE: Int = 1024 + /// /// The data being scanned + /// internal var data: [Character] + /// /// How many characters are actually in the buffer + /// internal var n: Int + /// /// 0..n-1 index into string of next char + /// internal var p: Int = 0 + /// /// What is name or source of this char stream? + /// public var name: String? public init() { @@ -28,87 +38,26 @@ public class ANTLRInputStream: CharStream { data = [Character]() } + /// /// Copy data in string to a local char array + /// public init(_ input: String) { self.data = Array(input.characters) // input.toCharArray(); self.n = input.length } + /// /// This is the preferred constructor for strings as no data is copied + /// public init(_ data: [Character], _ numberOfActualCharsInArray: Int) { self.data = data self.n = numberOfActualCharsInArray } - /// public convenience init(_ r : Reader) throws; IOException { - /// self.init(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE); - /// } - /// - /// public convenience init(_ r : Reader, _ initialSize : Int) throws; IOException { - /// self.init(r, initialSize, READ_BUFFER_SIZE); - /// } - /// - /// public init(_ r : Reader, _ initialSize : Int, _ readChunkSize : Int) throws; IOException { - /// load(r, initialSize, readChunkSize); - /// } - /// - /// public convenience init(_ input : InputStream) throws; IOException { - /// self.init(InputStreamReader(input), INITIAL_BUFFER_SIZE); - /// } - /// - /// public convenience init(_ input : InputStream, _ initialSize : Int) throws; IOException { - /// self.init(InputStreamReader(input), initialSize); - /// } - /// - /// public convenience init(_ input : InputStream, _ initialSize : Int, _ readChunkSize : Int) throws; IOException { - /// self.init(InputStreamReader(input), initialSize, readChunkSize); - /// } - /// - /// public func load(r : Reader, _ size : Int, _ readChunkSize : Int) - /// throws; IOException - /// { - /// if ( r==nil ) { - /// return; - /// } - /// if ( size<=0 ) { - /// size = INITIAL_BUFFER_SIZE; - /// } - /// if ( readChunkSize<=0 ) { - /// readChunkSize = READ_BUFFER_SIZE; - /// } - /// // print("load "+size+" in chunks of "+readChunkSize); - /// try { - /// // alloc initial buffer size. - /// data = new char[size]; - /// // read all the data in chunks of readChunkSize - /// var numRead : Int=0; - /// var p : Int = 0; - /// do { - /// if ( p+readChunkSize > data.length ) { // overflow? - /// // print("### overflow p="+p+", data.length="+data.length); - /// data = Arrays.copyOf(data, data.length * 2); - /// } - /// numRead = r.read(data, p, readChunkSize); - /// // print("read "+numRead+" chars; p was "+p+" is now "+(p+numRead)); - /// p += numRead; - /// } while (numRead!=-1); // while not EOF - /// // set the actual size of the data available; - /// // EOF subtracted one above in p+=numRead; add one back - /// n = p+1; - /// //print("n="+n); - /// } - /// finally { - /// r.close(); - /// } - /// } - /// Reset the stream so that it's in the same state it was - /// when the object was created *except* the data array is not - /// touched. public func reset() { p = 0 } - public func consume() throws { if p >= n { assert(LA(1) == ANTLRInputStream.EOF, "Expected: LA(1)==IntStream.EOF") @@ -124,7 +73,6 @@ public class ANTLRInputStream: CharStream { } } - public func LA(_ i: Int) -> Int { var i = i if i == 0 { @@ -150,9 +98,11 @@ public class ANTLRInputStream: CharStream { return LA(i) } + /// /// Return the current input symbol index 0..n where n indicates the /// last symbol has been read. The index is the index of char to /// be returned from LA(1). + /// public func index() -> Int { return p } @@ -161,7 +111,9 @@ public class ANTLRInputStream: CharStream { return n } + /// /// mark/release do nothing; we have entire buffer + /// public func mark() -> Int { return -1 @@ -170,8 +122,10 @@ public class ANTLRInputStream: CharStream { public func release(_ marker: Int) { } + /// /// consume() ahead until p==index; can't just set p=index as we must /// update line and charPositionInLine. If we seek backwards, just set p + /// public func seek(_ index: Int) throws { var index = index @@ -186,7 +140,6 @@ public class ANTLRInputStream: CharStream { } } - public func getText(_ interval: Interval) -> String { let start: Int = interval.a var stop: Int = interval.b @@ -201,7 +154,6 @@ public class ANTLRInputStream: CharStream { return String(data[start ..< (start + count)]) } - public func getSourceName() -> String { guard let name = name , !name.isEmpty else { return ANTLRInputStream.UNKNOWN_SOURCE_NAME @@ -209,7 +161,6 @@ public class ANTLRInputStream: CharStream { return name } - public func toString() -> String { return String(data) } diff --git a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift index eca1de705..d1e81140c 100644 --- a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift @@ -1,69 +1,74 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// - -/// This implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} responds to syntax errors +/// +/// +/// This implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy_ responds to syntax errors /// by immediately canceling the parse operation with a -/// {@link org.antlr.v4.runtime.misc.ParseCancellationException}. The implementation ensures that the -/// {@link org.antlr.v4.runtime.ParserRuleContext#exception} field is set for all parse tree nodes +/// _org.antlr.v4.runtime.misc.ParseCancellationException_. The implementation ensures that the +/// _org.antlr.v4.runtime.ParserRuleContext#exception_ field is set for all parse tree nodes /// that were not completed prior to encountering the error. -/// -///

-/// This error strategy is useful in the following scenarios.

-/// -///
    -///
  • Two-stage parsing: This error strategy allows the first +/// +/// This error strategy is useful in the following scenarios. +/// +/// * __Two-stage parsing:__ This error strategy allows the first /// stage of two-stage parsing to immediately terminate if an error is /// encountered, and immediately fall back to the second stage. In addition to /// avoiding wasted work by attempting to recover from errors here, the empty -/// implementation of {@link org.antlr.v4.runtime.BailErrorStrategy#sync} improves the performance of -/// the first stage.
  • -///
  • Silent validation: When syntax errors are not being +/// implementation of _org.antlr.v4.runtime.BailErrorStrategy#sync_ improves the performance of +/// the first stage. +/// +/// * __Silent validation:__ When syntax errors are not being /// reported or logged, and the parse result is simply ignored if errors occur, -/// the {@link org.antlr.v4.runtime.BailErrorStrategy} avoids wasting work on recovering from errors -/// when the result will be ignored either way.
  • -///
-/// -///

-/// {@code myparser.setErrorHandler(new BailErrorStrategy());}

-/// +/// the _org.antlr.v4.runtime.BailErrorStrategy_ avoids wasting work on recovering from errors +/// when the result will be ignored either way. +/// +/// `myparser.setErrorHandler(new BailErrorStrategy());` +/// /// - seealso: org.antlr.v4.runtime.Parser#setErrorHandler(org.antlr.v4.runtime.ANTLRErrorStrategy) - +/// +/// public class BailErrorStrategy: DefaultErrorStrategy { public override init(){} - /// Instead of recovering from exception {@code e}, re-throw it wrapped - /// in a {@link org.antlr.v4.runtime.misc.ParseCancellationException} so it is not caught by the - /// rule function catches. Use {@link Exception#getCause()} to get the - /// original {@link org.antlr.v4.runtime.RecognitionException}. - override - public func recover(_ recognizer: Parser, _ e: AnyObject) throws { - var context: ParserRuleContext? = recognizer.getContext() - while let contextWrap = context{ + /// + /// Instead of recovering from exception `e`, re-throw it wrapped + /// in a _org.antlr.v4.runtime.misc.ParseCancellationException_ so it is not caught by the + /// rule function catches. Use _Exception#getCause()_ to get the + /// original _org.antlr.v4.runtime.RecognitionException_. + /// + override public func recover(_ recognizer: Parser, _ e: RecognitionException) throws { + var context = recognizer.getContext() + while let contextWrap = context { contextWrap.exception = e context = (contextWrap.getParent() as? ParserRuleContext) } - throw ANTLRException.recognition(e: e) + throw ANTLRException.recognition(e: e) } + /// /// Make sure we don't attempt to recover inline; if the parser /// successfully recovers, it won't throw an exception. + /// override public func recoverInline(_ recognizer: Parser) throws -> Token { - let e: InputMismatchException = try InputMismatchException(recognizer) - var context: ParserRuleContext? = recognizer.getContext() + let e = InputMismatchException(recognizer) + var context = recognizer.getContext() while let contextWrap = context { contextWrap.exception = e context = (contextWrap.getParent() as? ParserRuleContext) } - throw ANTLRException.recognition(e: e) - + throw ANTLRException.recognition(e: e) } + /// /// Make sure we don't attempt to recover from problems in subrules. + /// override public func sync(_ recognizer: Parser) { } diff --git a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift index 5a4292c4e..8db84a00b 100644 --- a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift @@ -1,24 +1,28 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Provides an empty default implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener}. The +/// +/// Provides an empty default implementation of _org.antlr.v4.runtime.ANTLRErrorListener_. The /// default implementation of each method does nothing, but can be overridden as /// necessary. -/// +/// /// - Sam Harwell +/// open class BaseErrorListener: ANTLRErrorListener { public init() { } - open func syntaxError(_ recognizer: Recognizer, - _ offendingSymbol: AnyObject?, - _ line: Int, - _ charPositionInLine: Int, - _ msg: String, - _ e: AnyObject?//RecognitionException + open func syntaxError(_ recognizer: Recognizer, + _ offendingSymbol: AnyObject?, + _ line: Int, + _ charPositionInLine: Int, + _ msg: String, + _ e: AnyObject? ) { } @@ -29,7 +33,7 @@ open class BaseErrorListener: ANTLRErrorListener { _ stopIndex: Int, _ exact: Bool, _ ambigAlts: BitSet, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { } @@ -38,7 +42,7 @@ open class BaseErrorListener: ANTLRErrorListener { _ startIndex: Int, _ stopIndex: Int, _ conflictingAlts: BitSet?, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { } @@ -47,6 +51,6 @@ open class BaseErrorListener: ANTLRErrorListener { _ startIndex: Int, _ stopIndex: Int, _ prediction: Int, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { } } diff --git a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift index 8ea114821..2805f8855 100644 --- a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift @@ -1,52 +1,63 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This implementation of {@link org.antlr.v4.runtime.TokenStream} loads tokens from a -/// {@link org.antlr.v4.runtime.TokenSource} on-demand, and places the tokens in a buffer to provide +/// +/// This implementation of _org.antlr.v4.runtime.TokenStream_ loads tokens from a +/// _org.antlr.v4.runtime.TokenSource_ on-demand, and places the tokens in a buffer to provide /// access to any previous token by index. -/// -///

-/// This token stream ignores the value of {@link org.antlr.v4.runtime.Token#getChannel}. If your +/// +/// +/// This token stream ignores the value of _org.antlr.v4.runtime.Token#getChannel_. If your /// parser requires the token stream filter tokens to only those on a particular -/// channel, such as {@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL} or -/// {@link org.antlr.v4.runtime.Token#HIDDEN_CHANNEL}, use a filtering token stream such a -/// {@link org.antlr.v4.runtime.CommonTokenStream}.

+/// channel, such as _org.antlr.v4.runtime.Token#DEFAULT_CHANNEL_ or +/// _org.antlr.v4.runtime.Token#HIDDEN_CHANNEL_, use a filtering token stream such a +/// _org.antlr.v4.runtime.CommonTokenStream_. +/// public class BufferedTokenStream: TokenStream { - /// The {@link org.antlr.v4.runtime.TokenSource} from which tokens for this stream are fetched. + /// + /// The _org.antlr.v4.runtime.TokenSource_ from which tokens for this stream are fetched. + /// internal var tokenSource: TokenSource + /// /// A collection of all tokens fetched from the token source. The list is - /// considered a complete view of the input once {@link #fetchedEOF} is set - /// to {@code true}. + /// considered a complete view of the input once _#fetchedEOF_ is set + /// to `true`. + /// internal var tokens: Array = Array() // Array(100 - /// The index into {@link #tokens} of the current token (next token to - /// {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be - /// {@link #LT LT(1)}. - /// - ///

This field is set to -1 when the stream is first constructed or when - /// {@link #setTokenSource} is called, indicating that the first token has + /// + /// The index into _#tokens_ of the current token (next token to + /// _#consume_). _#tokens_`[`_#p_`]` should be + /// _#LT LT(1)_. + /// + /// This field is set to -1 when the stream is first constructed or when + /// _#setTokenSource_ is called, indicating that the first token has /// not yet been fetched from the token source. For additional information, - /// see the documentation of {@link org.antlr.v4.runtime.IntStream} for a description of - /// Initializing Methods.

+ /// see the documentation of _org.antlr.v4.runtime.IntStream_ for a description of + /// Initializing Methods. + /// internal var p: Int = -1 - /// Indicates whether the {@link org.antlr.v4.runtime.Token#EOF} token has been fetched from - /// {@link #tokenSource} and added to {@link #tokens}. This field improves + /// + /// Indicates whether the _org.antlr.v4.runtime.Token#EOF_ token has been fetched from + /// _#tokenSource_ and added to _#tokens_. This field improves /// performance for the following cases: - /// - ///
    - ///
  • {@link #consume}: The lookahead check in {@link #consume} to prevent + /// + /// * _#consume_: The lookahead check in _#consume_ to prevent /// consuming the EOF symbol is optimized by checking the values of - /// {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
  • - ///
  • {@link #fetch}: The check to prevent adding multiple EOF symbols into - /// {@link #tokens} is trivial with this field.
  • - ///
      + /// _#fetchedEOF_ and _#p_ instead of calling _#LA_. + /// + /// * _#fetch_: The check to prevent adding multiple EOF symbols into + /// _#tokens_ is trivial with this field. + /// internal var fetchedEOF: Bool = false public init(_ tokenSource: TokenSource) { @@ -69,7 +80,6 @@ public class BufferedTokenStream: TokenStream { return 0 } - public func release(_ marker: Int) { // no resources to release } @@ -108,8 +118,6 @@ public class BufferedTokenStream: TokenStream { if try !skipEofCheck && LA(1) == BufferedTokenStream.EOF { throw ANTLRError.illegalState(msg: "cannot consume EOF") - //RuntimeException("cannot consume EOF") - //throw ANTLRError.IllegalState /* throw IllegalStateException("cannot consume EOF"); */ } if try sync(p + 1) { @@ -117,11 +125,13 @@ public class BufferedTokenStream: TokenStream { } } - /// Make sure index {@code i} in tokens has a token. - /// - /// - returns: {@code true} if a token is located at index {@code i}, otherwise - /// {@code false}. + /// + /// Make sure index `i` in tokens has a token. + /// + /// - returns: `true` if a token is located at index `i`, otherwise + /// `false`. /// - seealso: #get(int i) + /// @discardableResult internal func sync(_ i: Int) throws -> Bool { assert(i >= 0, "Expected: i>=0") @@ -135,9 +145,11 @@ public class BufferedTokenStream: TokenStream { return true } - /// Add {@code n} elements to buffer. - /// + /// + /// Add `n` elements to buffer. + /// /// - returns: The actual number of elements added to the buffer. + /// internal func fetch(_ n: Int) throws -> Int { if fetchedEOF { return 0 @@ -159,16 +171,17 @@ public class BufferedTokenStream: TokenStream { return n } - public func get(_ i: Int) throws -> Token { if i < 0 || i >= tokens.count { let index = tokens.count - 1 throw ANTLRError.indexOutOfBounds(msg: "token index \(i) out of range 0..\(index)") } - return tokens[i] //tokens[i] + return tokens[i] } + /// /// Get all tokens from start..stop inclusively + /// public func get(_ start: Int,_ stop: Int) throws -> Array? { var stop = stop if start < 0 || stop < 0 { @@ -189,7 +202,6 @@ public class BufferedTokenStream: TokenStream { return subset } - //TODO: LT(i)!.getType(); public func LA(_ i: Int) throws -> Int { return try LT(i)!.getType() } @@ -222,17 +234,19 @@ public class BufferedTokenStream: TokenStream { return tokens[i] } + /// /// Allowed derived classes to modify the behavior of operations which change /// the current stream position by adjusting the target token index of a seek - /// operation. The default implementation simply returns {@code i}. If an + /// operation. The default implementation simply returns `i`. If an /// exception is thrown in this method, the current stream index should not be /// changed. - /// - ///

      For example, {@link org.antlr.v4.runtime.CommonTokenStream} overrides this method to ensure that - /// the seek target is always an on-channel token.

      - /// + /// + /// For example, _org.antlr.v4.runtime.CommonTokenStream_ overrides this method to ensure that + /// the seek target is always an on-channel token. + /// /// - parameter i: The target token index. /// - returns: The adjusted target token index. + /// internal func adjustSeekIndex(_ i: Int) throws -> Int { return i } @@ -248,7 +262,9 @@ public class BufferedTokenStream: TokenStream { p = try adjustSeekIndex(0) } + /// /// Reset this token stream by setting its token source. + /// public func setTokenSource(_ tokenSource: TokenSource) { self.tokenSource = tokenSource tokens.removeAll() @@ -256,57 +272,57 @@ public class BufferedTokenStream: TokenStream { fetchedEOF = false } - public func getTokens() -> Array { + public func getTokens() -> [Token] { return tokens } - public func getTokens(_ start: Int, _ stop: Int) throws -> Array? { + public func getTokens(_ start: Int, _ stop: Int) throws -> [Token]? { return try getTokens(start, stop, nil) } + /// /// Given a start and stop index, return a List of all tokens in /// the token type BitSet. Return null if no tokens were found. This /// method looks at both on and off channel tokens. - public func getTokens(_ start: Int, _ stop: Int, _ types: Set?) throws -> Array? { + /// + public func getTokens(_ start: Int, _ stop: Int, _ types: Set?) throws -> [Token]? { try lazyInit() - if start < 0 || stop >= tokens.count || - stop < 0 || start >= tokens.count { - throw ANTLRError.indexOutOfBounds(msg: "start \(start) or stop \(stop) not in 0..\(tokens.count - 1)") + if start < 0 || start >= tokens.count || + stop < 0 || stop >= tokens.count { + throw ANTLRError.indexOutOfBounds(msg: "start \(start) or stop \(stop) not in 0...\(tokens.count - 1)") } if start > stop { return nil } - - var filteredTokens: Array = Array() + var filteredTokens = [Token]() for i in start...stop { - let t: Token = tokens[i] - if let types = types , !types.contains(t.getType()) { - }else { + let t = tokens[i] + if let types = types, !types.contains(t.getType()) { + } + else { filteredTokens.append(t) } - } if filteredTokens.isEmpty { return nil - //filteredTokens = nil; } return filteredTokens } - public func getTokens(_ start: Int, _ stop: Int, _ ttype: Int) throws -> Array? { - //TODO Set initialCapacity - var s: Set = Set() + public func getTokens(_ start: Int, _ stop: Int, _ ttype: Int) throws -> [Token]? { + var s = Set() s.insert(ttype) - //s.append(ttype); - return try getTokens(start, stop, s) + return try getTokens(start, stop, s) } + /// /// Given a starting index, return the index of the next token on channel. - /// Return {@code i} if {@code tokens[i]} is on channel. Return the index of - /// the EOF token if there are no tokens on channel between {@code i} and + /// Return `i` if `tokens[i]` is on channel. Return the index of + /// the EOF token if there are no tokens on channel between `i` and /// EOF. + /// internal func nextTokenOnChannel(_ i: Int, _ channel: Int) throws -> Int { var i = i try sync(i) @@ -328,14 +344,16 @@ public class BufferedTokenStream: TokenStream { return i } + /// /// Given a starting index, return the index of the previous token on - /// channel. Return {@code i} if {@code tokens[i]} is on channel. Return -1 - /// if there are no tokens on channel between {@code i} and 0. - /// - ///

      - /// If {@code i} specifies an index at or after the EOF token, the EOF token + /// channel. Return `i` if `tokens[i]` is on channel. Return -1 + /// if there are no tokens on channel between `i` and 0. + /// + /// + /// If `i` specifies an index at or after the EOF token, the EOF token /// index is returned. This is due to the fact that the EOF token is treated - /// as though it were on every channel.

      + /// as though it were on every channel. + /// internal func previousTokenOnChannel(_ i: Int, _ channel: Int) throws -> Int { var i = i try sync(i) @@ -356,9 +374,11 @@ public class BufferedTokenStream: TokenStream { return i } + /// /// Collect all tokens on specified channel to the right of /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or /// EOF. If channel is -1, find any non default channel token. + /// public func getHiddenTokensToRight(_ tokenIndex: Int, _ channel: Int) throws -> Array? { try lazyInit() if tokenIndex < 0 || tokenIndex >= tokens.count { @@ -380,22 +400,24 @@ public class BufferedTokenStream: TokenStream { return filterForChannel(from, to, channel) } + /// /// Collect all hidden tokens (any off-default channel) to the right of /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL /// or EOF. + /// public func getHiddenTokensToRight(_ tokenIndex: Int) throws -> Array? { return try getHiddenTokensToRight(tokenIndex, -1) } + /// /// Collect all tokens on specified channel to the left of /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. /// If channel is -1, find any non default channel token. + /// public func getHiddenTokensToLeft(_ tokenIndex: Int, _ channel: Int) throws -> Array? { try lazyInit() if tokenIndex < 0 || tokenIndex >= tokens.count { throw ANTLRError.indexOutOfBounds(msg: "\(tokenIndex) not in 0..\(tokens.count - 1)") - //RuntimeException("\(tokenIndex) not in 0..\(tokens.count-1)") - //throw ANTLRError.IndexOutOfBounds /* throw IndexOutOfBoundsException(tokenIndex+" not in 0.."+(tokens.count-1)); */ } if tokenIndex == 0 { @@ -415,8 +437,10 @@ public class BufferedTokenStream: TokenStream { return filterForChannel(from, to, channel) } + /// /// Collect all hidden tokens (any off-default channel) to the left of /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL. + /// public func getHiddenTokensToLeft(_ tokenIndex: Int) throws -> Array? { return try getHiddenTokensToLeft(tokenIndex, -1) } @@ -435,7 +459,7 @@ public class BufferedTokenStream: TokenStream { } } } - if hidden.count == 0 { + if hidden.isEmpty { return nil } return hidden @@ -446,14 +470,13 @@ public class BufferedTokenStream: TokenStream { return tokenSource.getSourceName() } + /// /// Get the text of all tokens in this buffer. - - + /// public func getText() throws -> String { return try getText(Interval.of(0, size() - 1)) } - public func getText(_ interval: Interval) throws -> String { let start: Int = interval.a var stop: Int = interval.b @@ -490,7 +513,9 @@ public class BufferedTokenStream: TokenStream { return "" } + /// /// Get all tokens from lexer until EOF + /// public func fill() throws { try lazyInit() let blockSize: Int = 1000 diff --git a/runtime/Swift/Sources/Antlr4/CharStream.swift b/runtime/Swift/Sources/Antlr4/CharStream.swift index c4895112e..f21aabe3d 100644 --- a/runtime/Swift/Sources/Antlr4/CharStream.swift +++ b/runtime/Swift/Sources/Antlr4/CharStream.swift @@ -1,24 +1,29 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A source of characters for an ANTLR lexer. +/// public protocol CharStream: IntStream { + /// /// This method returns the text for a range of characters within this input /// stream. This method is guaranteed to not throw an exception if the - /// specified {@code interval} lies entirely within a marked range. For more - /// information about marked ranges, see {@link org.antlr.v4.runtime.IntStream#mark}. - /// + /// specified `interval` lies entirely within a marked range. For more + /// information about marked ranges, see _org.antlr.v4.runtime.IntStream#mark_. + /// /// - parameter interval: an interval within the stream /// - returns: the text of the specified interval - /// - /// - NullPointerException if {@code interval} is {@code null} - /// - IllegalArgumentException if {@code interval.a < 0}, or if - /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or + /// + /// - throws: _ANTLRError.illegalArgument_ if `interval.a < 0`, or if + /// `interval.b < interval.a - 1`, or if `interval.b` lies at or /// past the end of the stream - /// - UnsupportedOperationException if the stream does not support + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// getting the text of the specified interval - func getText(_ interval: Interval) -> String + /// + func getText(_ interval: Interval) throws -> String } diff --git a/runtime/Swift/Sources/Antlr4/CommonToken.swift b/runtime/Swift/Sources/Antlr4/CommonToken.swift index b7cbe3cc6..cef3973c0 100644 --- a/runtime/Swift/Sources/Antlr4/CommonToken.swift +++ b/runtime/Swift/Sources/Antlr4/CommonToken.swift @@ -1,104 +1,118 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class CommonToken: WritableToken { - /// An empty {@link org.antlr.v4.runtime.misc.Pair} which is used as the default value of - /// {@link #source} for tokens that do not have a source. - internal static let EMPTY_SOURCE: (TokenSource?, CharStream?) = (nil, nil) - - /// This is the backing field for {@link #getType} and {@link #setType}. + /// + /// This is the backing field for _#getType_ and _#setType_. + /// internal var type: Int - /// This is the backing field for {@link #getLine} and {@link #setLine}. - internal var line: Int = 0 + /// + /// This is the backing field for _#getLine_ and _#setLine_. + /// + internal var line = 0 - /// This is the backing field for {@link #getCharPositionInLine} and - /// {@link #setCharPositionInLine}. - internal var charPositionInLine: Int = -1 + /// + /// This is the backing field for _#getCharPositionInLine_ and + /// _#setCharPositionInLine_. + /// + internal var charPositionInLine = -1 // set to invalid position - /// This is the backing field for {@link #getChannel} and - /// {@link #setChannel}. - internal var channel: Int = DEFAULT_CHANNEL + /// + /// This is the backing field for _#getChannel_ and + /// _#setChannel_. + /// + internal var channel = DEFAULT_CHANNEL - /// This is the backing field for {@link #getTokenSource} and - /// {@link #getInputStream}. - /// - ///

      + /// + /// This is the backing field for _#getTokenSource_ and + /// _#getInputStream_. + /// + /// /// These properties share a field to reduce the memory footprint of - /// {@link org.antlr.v4.runtime.CommonToken}. Tokens created by a {@link org.antlr.v4.runtime.CommonTokenFactory} from + /// _org.antlr.v4.runtime.CommonToken_. Tokens created by a _org.antlr.v4.runtime.CommonTokenFactory_ from /// the same source and input stream share a reference to the same - /// {@link org.antlr.v4.runtime.misc.Pair} containing these values.

      + /// _org.antlr.v4.runtime.misc.Pair_ containing these values. + /// - internal var source: (TokenSource?, CharStream?) + internal let source: TokenSourceAndStream - /// This is the backing field for {@link #getText} when the token text is - /// explicitly set in the constructor or via {@link #setText}. - /// + /// + /// This is the backing field for _#getText_ when the token text is + /// explicitly set in the constructor or via _#setText_. + /// /// - seealso: #getText() + /// internal var text: String? - /// This is the backing field for {@link #getTokenIndex} and - /// {@link #setTokenIndex}. - internal var index: Int = -1 + /// + /// This is the backing field for _#getTokenIndex_ and + /// _#setTokenIndex_. + /// + internal var index = -1 - /// This is the backing field for {@link #getStartIndex} and - /// {@link #setStartIndex}. - internal var start: Int = 0 + /// + /// This is the backing field for _#getStartIndex_ and + /// _#setStartIndex_. + /// + internal var start = 0 - /// This is the backing field for {@link #getStopIndex} and - /// {@link #setStopIndex}. - internal var stop: Int = 0 + /// + /// This is the backing field for _#getStopIndex_ and + /// _#setStopIndex_. + /// + internal var stop = 0 - /// Constructs a new {@link org.antlr.v4.runtime.CommonToken} with the specified token type. - /// + /// + /// Constructs a new _org.antlr.v4.runtime.CommonToken_ with the specified token type. + /// /// - parameter type: The token type. + /// private var _visited: Bool = false public init(_ type: Int) { self.type = type - self.source = CommonToken.EMPTY_SOURCE + self.source = TokenSourceAndStream.EMPTY } - public init(_ source: (TokenSource?, CharStream?), _ type: Int, _ channel: Int, _ start: Int, _ stop: Int) { + public init(_ source: TokenSourceAndStream, _ type: Int, _ channel: Int, _ start: Int, _ stop: Int) { self.source = source self.type = type self.channel = channel self.start = start self.stop = stop - if let tsource = source.0 { + if let tsource = source.tokenSource { self.line = tsource.getLine() self.charPositionInLine = tsource.getCharPositionInLine() } } - /// Constructs a new {@link org.antlr.v4.runtime.CommonToken} with the specified token type and + /// + /// Constructs a new _org.antlr.v4.runtime.CommonToken_ with the specified token type and /// text. - /// + /// /// - parameter type: The token type. /// - parameter text: The text of the token. + /// public init(_ type: Int, _ text: String?) { self.type = type self.channel = CommonToken.DEFAULT_CHANNEL self.text = text - self.source = CommonToken.EMPTY_SOURCE + self.source = TokenSourceAndStream.EMPTY } - /// Constructs a new {@link org.antlr.v4.runtime.CommonToken} as a copy of another {@link org.antlr.v4.runtime.Token}. - /// - ///

      - /// If {@code oldToken} is also a {@link org.antlr.v4.runtime.CommonToken} instance, the newly - /// constructed token will share a reference to the {@link #text} field and - /// the {@link org.antlr.v4.runtime.misc.Pair} stored in {@link #source}. Otherwise, {@link #text} will - /// be assigned the result of calling {@link #getText}, and {@link #source} - /// will be constructed from the result of {@link org.antlr.v4.runtime.Token#getTokenSource} and - /// {@link org.antlr.v4.runtime.Token#getInputStream}.

      + /// + /// Constructs a new _org.antlr.v4.runtime.CommonToken_ as a copy of another _org.antlr.v4.runtime.Token_. /// /// - parameter oldToken: The token to copy. + /// public init(_ oldToken: Token) { type = oldToken.getType() line = oldToken.getLine() @@ -107,14 +121,8 @@ public class CommonToken: WritableToken { channel = oldToken.getChannel() start = oldToken.getStartIndex() stop = oldToken.getStopIndex() - - if oldToken is CommonToken { - text = (oldToken as! CommonToken).text - source = (oldToken as! CommonToken).source - } else { - text = oldToken.getText() - source = (oldToken.getTokenSource(), oldToken.getInputStream()) - } + text = oldToken.getText() + source = oldToken.getTokenSourceAndStream() } @@ -129,14 +137,19 @@ public class CommonToken: WritableToken { public func getText() -> String? { - if text != nil { - return text! + if let text = text { + return text } if let input = getInputStream() { - let n: Int = input.size() + let n = input.size() if start < n && stop < n { - return input.getText(Interval.of(start, stop)) + do { + return try input.getText(Interval.of(start, stop)) + } + catch { + return nil + } } else { return "" } @@ -146,13 +159,15 @@ public class CommonToken: WritableToken { } + /// /// Explicitly set the text for this token. If {code text} is not - /// {@code null}, then {@link #getText} will return this value rather than + /// `null`, then _#getText_ will return this value rather than /// extracting the text from the input. - /// - /// - parameter text: The explicit text of the token, or {@code null} if the text + /// + /// - parameter text: The explicit text of the token, or `null` if the text /// should be obtained from the input along with the start and stop indexes /// of the token. + /// public func setText(_ text: String) { self.text = text @@ -217,12 +232,16 @@ public class CommonToken: WritableToken { public func getTokenSource() -> TokenSource? { - return source.0 + return source.tokenSource } public func getInputStream() -> CharStream? { - return source.1 + return source.stream + } + + public func getTokenSourceAndStream() -> TokenSourceAndStream { + return source } public var description: String { @@ -230,10 +249,8 @@ public class CommonToken: WritableToken { } public func toString(_ r: Recognizer?) -> String { - var channelStr: String = "" - if channel > 0 { - channelStr = ",channel=\(channel)" - } + let channelStr = (channel > 0 ? ",channel=\(channel)" : "") + var txt: String if let tokenText = getText() { txt = tokenText.replacingOccurrences(of: "\n", with: "\\n") @@ -242,12 +259,16 @@ public class CommonToken: WritableToken { } else { txt = "" } - var typeString = "\(type)" + let typeString: String if let r = r { - typeString = r.getVocabulary().getDisplayName(type); + typeString = r.getVocabulary().getDisplayName(type) + } + else { + typeString = "\(type)" } return "[@\(getTokenIndex()),\(start):\(stop)='\(txt)',<\(typeString)>\(channelStr),\(line):\(getCharPositionInLine())]" } + public var visited: Bool { get { return _visited diff --git a/runtime/Swift/Sources/Antlr4/CommonTokenFactory.swift b/runtime/Swift/Sources/Antlr4/CommonTokenFactory.swift index 92d40ca7f..d7922802f 100644 --- a/runtime/Swift/Sources/Antlr4/CommonTokenFactory.swift +++ b/runtime/Swift/Sources/Antlr4/CommonTokenFactory.swift @@ -1,70 +1,81 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This default implementation of {@link org.antlr.v4.runtime.TokenFactory} creates -/// {@link org.antlr.v4.runtime.CommonToken} objects. +/// +/// This default implementation of _org.antlr.v4.runtime.TokenFactory_ creates +/// _org.antlr.v4.runtime.CommonToken_ objects. +/// public class CommonTokenFactory: TokenFactory { - /// The default {@link org.antlr.v4.runtime.CommonTokenFactory} instance. - /// - ///

      + /// + /// The default _org.antlr.v4.runtime.CommonTokenFactory_ instance. + /// + /// /// This token factory does not explicitly copy token text when constructing - /// tokens.

      + /// tokens. + /// public static let DEFAULT: TokenFactory = CommonTokenFactory() - /// Indicates whether {@link org.antlr.v4.runtime.CommonToken#setText} should be called after + /// + /// Indicates whether _org.antlr.v4.runtime.CommonToken#setText_ should be called after /// constructing tokens to explicitly set the text. This is useful for cases /// where the input stream might not be able to provide arbitrary substrings /// of text from the input after the lexer creates a token (e.g. the - /// implementation of {@link org.antlr.v4.runtime.CharStream#getText} in - /// {@link org.antlr.v4.runtime.UnbufferedCharStream} throws an - /// {@link UnsupportedOperationException}). Explicitly setting the token text - /// allows {@link org.antlr.v4.runtime.Token#getText} to be called at any time regardless of the + /// implementation of _org.antlr.v4.runtime.CharStream#getText_ in + /// _org.antlr.v4.runtime.UnbufferedCharStream_ throws an + /// _UnsupportedOperationException_). Explicitly setting the token text + /// allows _org.antlr.v4.runtime.Token#getText_ to be called at any time regardless of the /// input stream implementation. - /// - ///

      - /// The default value is {@code false} to avoid the performance and memory - /// overhead of copying text for every token unless explicitly requested.

      + /// + /// + /// The default value is `false` to avoid the performance and memory + /// overhead of copying text for every token unless explicitly requested. + /// internal final var copyText: Bool - /// Constructs a {@link org.antlr.v4.runtime.CommonTokenFactory} with the specified value for - /// {@link #copyText}. - /// - ///

      - /// When {@code copyText} is {@code false}, the {@link #DEFAULT} instance - /// should be used instead of constructing a new instance.

      - /// - /// - parameter copyText: The value for {@link #copyText}. + /// + /// Constructs a _org.antlr.v4.runtime.CommonTokenFactory_ with the specified value for + /// _#copyText_. + /// + /// + /// When `copyText` is `false`, the _#DEFAULT_ instance + /// should be used instead of constructing a new instance. + /// + /// - parameter copyText: The value for _#copyText_. + /// public init(_ copyText: Bool) { self.copyText = copyText } - /// Constructs a {@link org.antlr.v4.runtime.CommonTokenFactory} with {@link #copyText} set to - /// {@code false}. - /// - ///

      - /// The {@link #DEFAULT} instance should be used instead of calling this - /// directly.

      + /// + /// Constructs a _org.antlr.v4.runtime.CommonTokenFactory_ with _#copyText_ set to + /// `false`. + /// + /// + /// The _#DEFAULT_ instance should be used instead of calling this + /// directly. + /// public convenience init() { self.init(false) } - public func create(_ source: (TokenSource?, CharStream?), _ type: Int, _ text: String?, + public func create(_ source: TokenSourceAndStream, _ type: Int, _ text: String?, _ channel: Int, _ start: Int, _ stop: Int, _ line: Int, _ charPositionInLine: Int) -> Token { - let t: CommonToken = CommonToken(source, type, channel, start, stop) + let t = CommonToken(source, type, channel, start, stop) t.setLine(line) t.setCharPositionInLine(charPositionInLine) - if text != nil { - t.setText(text!) - } else { - if let cStream = source.1 , copyText { - t.setText(cStream.getText(Interval.of(start, stop))) - } + if let text = text { + t.setText(text) + } + else if let cStream = source.stream, copyText { + t.setText(try! cStream.getText(Interval.of(start, stop))) } return t diff --git a/runtime/Swift/Sources/Antlr4/CommonTokenStream.swift b/runtime/Swift/Sources/Antlr4/CommonTokenStream.swift index 5095a1b25..8abaac82d 100644 --- a/runtime/Swift/Sources/Antlr4/CommonTokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/CommonTokenStream.swift @@ -1,56 +1,66 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This class extends {@link org.antlr.v4.runtime.BufferedTokenStream} with functionality to filter +/// +/// This class extends _org.antlr.v4.runtime.BufferedTokenStream_ with functionality to filter /// token streams to tokens on a particular channel (tokens where -/// {@link org.antlr.v4.runtime.Token#getChannel} returns a particular value). -/// -///

      +/// _org.antlr.v4.runtime.Token#getChannel_ returns a particular value). +/// +/// /// This token stream provides access to all tokens by index or when calling -/// methods like {@link #getText}. The channel filtering is only used for code -/// accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and -/// {@link #LB}.

      -/// -///

      +/// methods like _#getText_. The channel filtering is only used for code +/// accessing tokens via the lookahead methods _#LA_, _#LT_, and +/// _#LB_. +/// +/// /// By default, tokens are placed on the default channel -/// ({@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}), but may be reassigned by using the -/// {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to -/// call {@link org.antlr.v4.runtime.Lexer#setChannel}. -///

      -/// -///

      -/// Note: lexer rules which use the {@code ->skip} lexer command or call -/// {@link org.antlr.v4.runtime.Lexer#skip} do not produce tokens at all, so input text matched by +/// (_org.antlr.v4.runtime.Token#DEFAULT_CHANNEL_), but may be reassigned by using the +/// `->channel(HIDDEN)` lexer command, or by using an embedded action to +/// call _org.antlr.v4.runtime.Lexer#setChannel_. +/// +/// +/// +/// Note: lexer rules which use the `->skip` lexer command or call +/// _org.antlr.v4.runtime.Lexer#skip_ do not produce tokens at all, so input text matched by /// such a rule will not be available as part of the token stream, regardless of -/// channel.

      +/// channel. +/// public class CommonTokenStream: BufferedTokenStream { + /// /// Specifies the channel to use for filtering tokens. - /// - ///

      - /// The default value is {@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}, which matches the - /// default channel assigned to tokens created by the lexer.

      - internal var channel: Int = CommonToken.DEFAULT_CHANNEL + /// + /// + /// The default value is _org.antlr.v4.runtime.Token#DEFAULT_CHANNEL_, which matches the + /// default channel assigned to tokens created by the lexer. + /// + internal var channel = CommonToken.DEFAULT_CHANNEL - /// Constructs a new {@link org.antlr.v4.runtime.CommonTokenStream} using the specified token - /// source and the default token channel ({@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL}). - /// + /// + /// Constructs a new _org.antlr.v4.runtime.CommonTokenStream_ using the specified token + /// source and the default token channel (_org.antlr.v4.runtime.Token#DEFAULT_CHANNEL_). + /// /// - parameter tokenSource: The token source. + /// public override init(_ tokenSource: TokenSource) { super.init(tokenSource) } - /// Constructs a new {@link org.antlr.v4.runtime.CommonTokenStream} using the specified token + /// + /// Constructs a new _org.antlr.v4.runtime.CommonTokenStream_ using the specified token /// source and filtering tokens to the specified channel. Only tokens whose - /// {@link org.antlr.v4.runtime.Token#getChannel} matches {@code channel} or have the - /// {@link org.antlr.v4.runtime.Token#getType} equal to {@link org.antlr.v4.runtime.Token#EOF} will be returned by the + /// _org.antlr.v4.runtime.Token#getChannel_ matches `channel` or have the + /// _org.antlr.v4.runtime.Token#getType_ equal to _org.antlr.v4.runtime.Token#EOF_ will be returned by the /// token stream lookahead methods. - /// + /// /// - parameter tokenSource: The token source. /// - parameter channel: The channel to use for filtering tokens. + /// public convenience init(_ tokenSource: TokenSource, _ channel: Int) { self.init(tokenSource) self.channel = channel @@ -67,8 +77,8 @@ public class CommonTokenStream: BufferedTokenStream { return nil } - var i: Int = p - var n: Int = 1 + var i = p + var n = 1 // find k good tokens looking backwards while n <= k { // skip off-channel tokens @@ -91,8 +101,8 @@ public class CommonTokenStream: BufferedTokenStream { if k < 0 { return try LB(-k) } - var i: Int = p - var n: Int = 1 // we know tokens[p] is a good one + var i = p + var n = 1 // we know tokens[p] is a good one // find k good tokens while n < k { // skip off-channel tokens, but make sure to not look past EOF @@ -105,13 +115,15 @@ public class CommonTokenStream: BufferedTokenStream { return tokens[i] } + /// /// Count EOF just once. + /// public func getNumberOfOnChannelTokens() throws -> Int { - var n: Int = 0 + var n = 0 try fill() let length = tokens.count for i in 0.. - /// This implementation prints messages to {@link System#err} containing the - /// values of {@code line}, {@code charPositionInLine}, and {@code msg} using - /// the following format.

      - /// - ///
      -    /// line line:charPositionInLine msg
      -    /// 
      - override - public func syntaxError(_ recognizer: Recognizer, - _ offendingSymbol: AnyObject?, - _ line: Int, - _ charPositionInLine: Int, - _ msg: String, - _ e: AnyObject? + /// + /// + /// This implementation prints messages to _System#err_ containing the + /// values of `line`, `charPositionInLine`, and `msg` using + /// the following format. + /// + /// line __line__:__charPositionInLine__ __msg__ + /// + /// + override public func syntaxError(_ recognizer: Recognizer, + _ offendingSymbol: AnyObject?, + _ line: Int, + _ charPositionInLine: Int, + _ msg: String, + _ e: AnyObject? ) { if Parser.ConsoleError { errPrint("line \(line):\(charPositionInLine) \(msg)") diff --git a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift index a7221b8dc..d6ac65767 100644 --- a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift +++ b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift @@ -1,92 +1,96 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// - - -/// This is the default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} used for +/// +/// This is the default implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy_ used for /// error reporting and recovery in ANTLR parsers. +/// import Foundation public class DefaultErrorStrategy: ANTLRErrorStrategy { + /// /// Indicates whether the error strategy is currently "recovering from an /// error". This is used to suppress reporting multiple error messages while /// attempting to recover from a detected syntax error. - /// + /// /// - seealso: #inErrorRecoveryMode + /// internal var errorRecoveryMode: Bool = false + /// /// The index into the input stream where the last error occurred. /// This is used to prevent infinite loops where an error is found /// but no token is consumed during recovery...another error is found, /// ad nauseum. This is a failsafe mechanism to guarantee that at least /// one token/tree node is consumed for two errors. + /// internal var lastErrorIndex: Int = -1 internal var lastErrorStates: IntervalSet? - /// {@inheritDoc} - /// - ///

      The default implementation simply calls {@link #endErrorCondition} to - /// ensure that the handler is not in error recovery mode.

      - + /// + /// The default implementation simply calls _#endErrorCondition_ to + /// ensure that the handler is not in error recovery mode. + /// public func reset(_ recognizer: Parser) { endErrorCondition(recognizer) } + /// /// This method is called to enter error recovery mode when a recognition /// exception is reported. - /// + /// /// - parameter recognizer: the parser instance + /// internal func beginErrorCondition(_ recognizer: Parser) { errorRecoveryMode = true } - /// {@inheritDoc} - public func inErrorRecoveryMode(_ recognizer: Parser) -> Bool { return errorRecoveryMode } + /// /// This method is called to leave error recovery mode after recovering from /// a recognition exception. - /// + /// /// - parameter recognizer: + /// internal func endErrorCondition(_ recognizer: Parser) { errorRecoveryMode = false lastErrorStates = nil lastErrorIndex = -1 } - /// {@inheritDoc} - /// - ///

      The default implementation simply calls {@link #endErrorCondition}.

      - + /// + /// The default implementation simply calls _#endErrorCondition_. + /// public func reportMatch(_ recognizer: Parser) { endErrorCondition(recognizer) } - /// {@inheritDoc} - /// - ///

      The default implementation returns immediately if the handler is already - /// in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} - /// and dispatches the reporting task based on the runtime type of {@code e} - /// according to the following table.

      - /// - ///
        - ///
      • {@link org.antlr.v4.runtime.NoViableAltException}: Dispatches the call to - /// {@link #reportNoViableAlternative}
      • - ///
      • {@link org.antlr.v4.runtime.InputMismatchException}: Dispatches the call to - /// {@link #reportInputMismatch}
      • - ///
      • {@link org.antlr.v4.runtime.FailedPredicateException}: Dispatches the call to - /// {@link #reportFailedPredicate}
      • - ///
      • All other types: calls {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} to report - /// the exception
      • - ///
      - + /// + /// + /// The default implementation returns immediately if the handler is already + /// in error recovery mode. Otherwise, it calls _#beginErrorCondition_ + /// and dispatches the reporting task based on the runtime type of `e` + /// according to the following table. + /// + /// * _org.antlr.v4.runtime.NoViableAltException_: Dispatches the call to + /// _#reportNoViableAlternative_ + /// * _org.antlr.v4.runtime.InputMismatchException_: Dispatches the call to + /// _#reportInputMismatch_ + /// * _org.antlr.v4.runtime.FailedPredicateException_: Dispatches the call to + /// _#reportFailedPredicate_ + /// * All other types: calls _org.antlr.v4.runtime.Parser#notifyErrorListeners_ to report + /// the exception + /// public func reportError(_ recognizer: Parser, - _ e: AnyObject) { + _ e: RecognitionException) { // if we've already reported an error and have not matched a token // yet successfully, don't report any errors. if inErrorRecoveryMode(recognizer) { @@ -94,31 +98,27 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { return // don't report spurious errors } beginErrorCondition(recognizer) - //TODO: exception handler - if (e is NoViableAltException) { - try! reportNoViableAlternative(recognizer, e as! NoViableAltException); - } else { - if (e is InputMismatchException) { - reportInputMismatch(recognizer, e as! InputMismatchException); - } else { - if (e is FailedPredicateException) { - reportFailedPredicate(recognizer, e as! FailedPredicateException); - } else { - errPrint("unknown recognition error type: " + String(describing: type(of: e))); - let re = (e as! RecognitionException) - recognizer.notifyErrorListeners(re.getOffendingToken(), re.message ?? "", e); - } - } + if let nvae = e as? NoViableAltException { + reportNoViableAlternative(recognizer, nvae) + } + else if let ime = e as? InputMismatchException { + reportInputMismatch(recognizer, ime) + } + else if let fpe = e as? FailedPredicateException { + reportFailedPredicate(recognizer, fpe) + } + else { + errPrint("unknown recognition error type: " + String(describing: type(of: e))) + recognizer.notifyErrorListeners(e.getOffendingToken(), e.message ?? "", e) } } - /// {@inheritDoc} - /// - ///

      The default implementation resynchronizes the parser by consuming tokens + /// + /// The default implementation resynchronizes the parser by consuming tokens /// until we find one in the resynchronization set--loosely the set of tokens - /// that can follow the current rule.

      - - public func recover(_ recognizer: Parser, _ e: AnyObject) throws { + /// that can follow the current rule. + /// + public func recover(_ recognizer: Parser, _ e: RecognitionException) throws { // print("recover in "+recognizer.getRuleInvocationStack()+ // " index="+getTokenStream(recognizer).index()+ // ", lastErrorIndex="+ @@ -138,77 +138,74 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { } lastErrorIndex = getTokenStream(recognizer).index() if lastErrorStates == nil { - lastErrorStates = try IntervalSet() + lastErrorStates = IntervalSet() } try lastErrorStates!.add(recognizer.getState()) - let followSet: IntervalSet = try getErrorRecoverySet(recognizer) + let followSet = getErrorRecoverySet(recognizer) try consumeUntil(recognizer, followSet) } - /// The default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy#sync} makes sure + /// + /// The default implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy#sync_ makes sure /// that the current lookahead symbol is consistent with what were expecting /// at this point in the ATN. You can call this anytime but ANTLR only /// generates code to check before subrules/loops and each iteration. - /// - ///

      Implements Jim Idle's magic sync mechanism in closures and optional - /// subrules. E.g.,

      - /// - ///
      +    /// 
      +    /// Implements Jim Idle's magic sync mechanism in closures and optional
      +    /// subrules. E.g.,
      +    /// 
      +    /// 
           /// a : sync ( stuff sync )* ;
           /// sync : {consume to what can follow sync} ;
      -    /// 
      - /// - /// At the start of a sub rule upon error, {@link #sync} performs single + /// + /// + /// At the start of a sub rule upon error, _#sync_ performs single /// token deletion, if possible. If it can't do that, it bails on the current /// rule and uses the default error recovery, which consumes until the /// resynchronization set of the current rule. - /// - ///

      If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + /// + /// If the sub rule is optional (`(...)?`, `(...)*`, or block /// with an empty alternative), then the expected set includes what follows - /// the subrule.

      - /// - ///

      During loop iteration, it consumes until it sees a token that can start a + /// the subrule. + /// + /// During loop iteration, it consumes until it sees a token that can start a /// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to - /// stay in the loop as long as possible.

      - /// - ///

      ORIGINS

      - /// - ///

      Previous versions of ANTLR did a poor job of their recovery within loops. + /// stay in the loop as long as possible. + /// + /// __ORIGINS__ + /// + /// Previous versions of ANTLR did a poor job of their recovery within loops. /// A single mismatch token or missing token would force the parser to bail - /// out of the entire rules surrounding the loop. So, for rule

      - /// - ///
      +    /// out of the entire rules surrounding the loop. So, for rule
      +    /// 
      +    /// 
           /// classDef : 'class' ID '{' member* '}'
      -    /// 
      - /// + /// + /// /// input with an extra token between members would force the parser to /// consume until it found the next class definition rather than the next /// member definition of the current class. - /// - ///

      This functionality cost a little bit of effort because the parser has to + /// + /// This functionality cost a little bit of effort because the parser has to /// compare token set at the start of the loop and at each iteration. If for /// some reason speed is suffering for you, you can turn off this - /// functionality by simply overriding this method as a blank { }.

      + /// functionality by simply overriding this method as a blank { }. + /// public func sync(_ recognizer: Parser) throws { - let s: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]! + let s = recognizer.getInterpreter().atn.states[recognizer.getState()]! // errPrint("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName()); // If already recovering, don't try to sync if inErrorRecoveryMode(recognizer) { return } - let tokens: TokenStream = getTokenStream(recognizer) - let la: Int = try tokens.LA(1) + let tokens = getTokenStream(recognizer) + let la = try tokens.LA(1) // try cheaper subset first; might get lucky. seems to shave a wee bit off - //let set : IntervalSet = recognizer.getATN().nextTokens(s) - - if try recognizer.getATN().nextTokens(s).contains(CommonToken.EPSILON) { - return - } - - if try recognizer.getATN().nextTokens(s).contains(la) { + let nextToks = recognizer.getATN().nextTokens(s) + if nextToks.contains(CommonToken.EPSILON) || nextToks.contains(la) { return } @@ -221,15 +218,14 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { if try singleTokenDeletion(recognizer) != nil { return } - throw try ANTLRException.recognition(e: InputMismatchException(recognizer)) + throw ANTLRException.recognition(e: InputMismatchException(recognizer)) case ATNState.PLUS_LOOP_BACK: fallthrough case ATNState.STAR_LOOP_BACK: // errPrint("at loop back: "+s.getClass().getSimpleName()); - try reportUnwantedToken(recognizer) - let expecting: IntervalSet = try recognizer.getExpectedTokens() - let whatFollowsLoopIterationOrRule: IntervalSet = - try expecting.or(try getErrorRecoverySet(recognizer)) as! IntervalSet + reportUnwantedToken(recognizer) + let expecting = try recognizer.getExpectedTokens() + let whatFollowsLoopIterationOrRule = expecting.or(getErrorRecoverySet(recognizer)) as! IntervalSet try consumeUntil(recognizer, whatFollowsLoopIterationOrRule) break @@ -239,258 +235,274 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { } } - /// This is called by {@link #reportError} when the exception is a - /// {@link org.antlr.v4.runtime.NoViableAltException}. - /// + /// + /// This is called by _#reportError_ when the exception is a + /// _org.antlr.v4.runtime.NoViableAltException_. + /// /// - seealso: #reportError - /// + /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception + /// internal func reportNoViableAlternative(_ recognizer: Parser, - _ e: NoViableAltException) throws { - let tokens: TokenStream? = getTokenStream(recognizer) + _ e: NoViableAltException) { + let tokens = getTokenStream(recognizer) var input: String - if let tokens = tokens { - if e.getStartToken().getType() == CommonToken.EOF { - input = "" - } else { + if e.getStartToken().getType() == CommonToken.EOF { + input = "" + } + else { + do { input = try tokens.getText(e.getStartToken(), e.getOffendingToken()) } - } else { - input = "" + catch { + input = "" + } } - let msg: String = "no viable alternative at input " + escapeWSAndQuote(input) + let msg = "no viable alternative at input " + escapeWSAndQuote(input) recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e) } - /// This is called by {@link #reportError} when the exception is an - /// {@link org.antlr.v4.runtime.InputMismatchException}. - /// + /// + /// This is called by _#reportError_ when the exception is an + /// _org.antlr.v4.runtime.InputMismatchException_. + /// /// - seealso: #reportError - /// + /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception + /// internal func reportInputMismatch(_ recognizer: Parser, _ e: InputMismatchException) { - let msg: String = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) + + let msg = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) + " expecting " + e.getExpectedTokens()!.toString(recognizer.getVocabulary()) recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e) } - /// This is called by {@link #reportError} when the exception is a - /// {@link org.antlr.v4.runtime.FailedPredicateException}. - /// + /// + /// This is called by _#reportError_ when the exception is a + /// _org.antlr.v4.runtime.FailedPredicateException_. + /// /// - seealso: #reportError - /// + /// /// - parameter recognizer: the parser instance /// - parameter e: the recognition exception + /// internal func reportFailedPredicate(_ recognizer: Parser, _ e: FailedPredicateException) { - let ruleName: String = recognizer.getRuleNames()[recognizer._ctx!.getRuleIndex()] - let msg: String = "rule " + ruleName + " " + e.message! // e.getMessage() + let ruleName = recognizer.getRuleNames()[recognizer._ctx!.getRuleIndex()] + let msg = "rule \(ruleName) \(e.message!)" recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e) } + /// /// This method is called to report a syntax error which requires the removal /// of a token from the input stream. At the time this method is called, the - /// erroneous symbol is current {@code LT(1)} symbol and has not yet been + /// erroneous symbol is current `LT(1)` symbol and has not yet been /// removed from the input stream. When this method returns, - /// {@code recognizer} is in error recovery mode. - /// - ///

      This method is called when {@link #singleTokenDeletion} identifies + /// `recognizer` is in error recovery mode. + /// + /// This method is called when _#singleTokenDeletion_ identifies /// single-token deletion as a viable recovery strategy for a mismatched - /// input error.

      - /// - ///

      The default implementation simply returns if the handler is already in - /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// input error. + /// + /// The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls _#beginErrorCondition_ to /// enter error recovery mode, followed by calling - /// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.

      - /// + /// _org.antlr.v4.runtime.Parser#notifyErrorListeners_. + /// /// - parameter recognizer: the parser instance - internal func reportUnwantedToken(_ recognizer: Parser) throws { + /// + internal func reportUnwantedToken(_ recognizer: Parser) { if inErrorRecoveryMode(recognizer) { return } beginErrorCondition(recognizer) - let t: Token = try recognizer.getCurrentToken() - let tokenName: String = getTokenErrorDisplay(t) - let expecting: IntervalSet = try getExpectedTokens(recognizer) - let msg: String = "extraneous input " + tokenName + " expecting " + - expecting.toString(recognizer.getVocabulary()) + let t = try? recognizer.getCurrentToken() + let tokenName = getTokenErrorDisplay(t) + let expecting = (try? getExpectedTokens(recognizer)) ?? IntervalSet.EMPTY_SET + let msg = "extraneous input \(tokenName) expecting \(expecting.toString(recognizer.getVocabulary()))" recognizer.notifyErrorListeners(t, msg, nil) } + /// /// This method is called to report a syntax error which requires the /// insertion of a missing token into the input stream. At the time this /// method is called, the missing token has not yet been inserted. When this - /// method returns, {@code recognizer} is in error recovery mode. - /// - ///

      This method is called when {@link #singleTokenInsertion} identifies + /// method returns, `recognizer` is in error recovery mode. + /// + /// This method is called when _#singleTokenInsertion_ identifies /// single-token insertion as a viable recovery strategy for a mismatched - /// input error.

      - /// - ///

      The default implementation simply returns if the handler is already in - /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + /// input error. + /// + /// The default implementation simply returns if the handler is already in + /// error recovery mode. Otherwise, it calls _#beginErrorCondition_ to /// enter error recovery mode, followed by calling - /// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.

      - /// + /// _org.antlr.v4.runtime.Parser#notifyErrorListeners_. + /// /// - parameter recognizer: the parser instance - internal func reportMissingToken(_ recognizer: Parser) throws { + /// + internal func reportMissingToken(_ recognizer: Parser) { if inErrorRecoveryMode(recognizer) { return } beginErrorCondition(recognizer) - let t: Token = try recognizer.getCurrentToken() - let expecting: IntervalSet = try getExpectedTokens(recognizer) - let msg: String = "missing " + expecting.toString(recognizer.getVocabulary()) + - " at " + getTokenErrorDisplay(t) + let t = try? recognizer.getCurrentToken() + let expecting = (try? getExpectedTokens(recognizer)) ?? IntervalSet.EMPTY_SET + let msg = "missing \(expecting.toString(recognizer.getVocabulary())) at \(getTokenErrorDisplay(t))" recognizer.notifyErrorListeners(t, msg, nil) } - /// {@inheritDoc} - /// - ///

      The default implementation attempts to recover from the mismatched input + /// + /// + /// + /// The default implementation attempts to recover from the mismatched input /// by using single token insertion and deletion as described below. If the /// recovery attempt fails, this method throws an - /// {@link org.antlr.v4.runtime.InputMismatchException}.

      - /// - ///

      EXTRA TOKEN (single token deletion)

      - /// - ///

      {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the - /// right token, however, then assume {@code LA(1)} is some extra spurious + /// _org.antlr.v4.runtime.InputMismatchException_. + /// + /// __EXTRA TOKEN__ (single token deletion) + /// + /// `LA(1)` is not what we are looking for. If `LA(2)` has the + /// right token, however, then assume `LA(1)` is some extra spurious /// token and delete it. Then consume and return the next token (which was - /// the {@code LA(2)} token) as the successful result of the match operation.

      - /// - ///

      This recovery strategy is implemented by {@link #singleTokenDeletion}.

      - /// - ///

      MISSING TOKEN (single token insertion)

      - /// - ///

      If current token (at {@code LA(1)}) is consistent with what could come - /// after the expected {@code LA(1)} token, then assume the token is missing - /// and use the parser's {@link org.antlr.v4.runtime.TokenFactory} to create it on the fly. The + /// the `LA(2)` token) as the successful result of the match operation. + /// + /// This recovery strategy is implemented by _#singleTokenDeletion_. + /// + /// __MISSING TOKEN__ (single token insertion) + /// + /// If current token (at `LA(1)`) is consistent with what could come + /// after the expected `LA(1)` token, then assume the token is missing + /// and use the parser's _org.antlr.v4.runtime.TokenFactory_ to create it on the fly. The /// "insertion" is performed by returning the created token as the successful - /// result of the match operation.

      - /// - ///

      This recovery strategy is implemented by {@link #singleTokenInsertion}.

      - /// - ///

      EXAMPLE

      - /// - ///

      For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When - /// the parser returns from the nested call to {@code expr}, it will have - /// call chain:

      - /// - ///
      +    /// result of the match operation.
      +    /// 
      +    /// This recovery strategy is implemented by _#singleTokenInsertion_.
      +    /// 
      +    /// __EXAMPLE__
      +    /// 
      +    /// For example, Input `i=(3;` is clearly missing the `')'`. When
      +    /// the parser returns from the nested call to `expr`, it will have
      +    /// call chain:
      +    /// 
      +    /// 
           /// stat → expr → atom
      -    /// 
      - /// - /// and it will be trying to match the {@code ')'} at this point in the + /// + /// + /// and it will be trying to match the `')'` at this point in the /// derivation: - /// - ///
      +    /// 
      +    /// 
           /// => ID '=' '(' INT ')' ('+' atom)* ';'
           /// ^
      -    /// 
      - /// - /// The attempt to match {@code ')'} will fail when it sees {@code ';'} and - /// call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'} - /// is in the set of tokens that can follow the {@code ')'} token reference - /// in rule {@code atom}. It can assume that you forgot the {@code ')'}. + /// + /// + /// The attempt to match `')'` will fail when it sees `';'` and + /// call _#recoverInline_. To recover, it sees that `LA(1)==';'` + /// is in the set of tokens that can follow the `')'` token reference + /// in rule `atom`. It can assume that you forgot the `')'`. + /// public func recoverInline(_ recognizer: Parser) throws -> Token { // SINGLE TOKEN DELETION - let matchedSymbol: Token? = try singleTokenDeletion(recognizer) - if matchedSymbol != nil { + let matchedSymbol = try singleTokenDeletion(recognizer) + if let matchedSymbol = matchedSymbol { // we have deleted the extra token. // now, move past ttype token as if all were ok try recognizer.consume() - return matchedSymbol! + return matchedSymbol } // SINGLE TOKEN INSERTION if try singleTokenInsertion(recognizer) { return try getMissingSymbol(recognizer) } - throw try ANTLRException.recognition(e: InputMismatchException(recognizer)) - // throw try ANTLRException.InputMismatch(e: InputMismatchException(recognizer) ) - //RuntimeException("InputMismatchException") // even that didn't work; must throw the exception - //throwException() /* throw InputMismatchException(recognizer); */ + throw ANTLRException.recognition(e: InputMismatchException(recognizer)) } + /// /// This method implements the single-token insertion inline error recovery - /// strategy. It is called by {@link #recoverInline} if the single-token + /// strategy. It is called by _#recoverInline_ if the single-token /// deletion strategy fails to recover from the mismatched input. If this - /// method returns {@code true}, {@code recognizer} will be in error recovery + /// method returns `true`, `recognizer` will be in error recovery /// mode. - /// - ///

      This method determines whether or not single-token insertion is viable by - /// checking if the {@code LA(1)} input symbol could be successfully matched - /// if it were instead the {@code LA(2)} symbol. If this method returns - /// {@code true}, the caller is responsible for creating and inserting a - /// token with the correct type to produce this behavior.

      - /// + /// + /// This method determines whether or not single-token insertion is viable by + /// checking if the `LA(1)` input symbol could be successfully matched + /// if it were instead the `LA(2)` symbol. If this method returns + /// `true`, the caller is responsible for creating and inserting a + /// token with the correct type to produce this behavior. + /// /// - parameter recognizer: the parser instance - /// - returns: {@code true} if single-token insertion is a viable recovery - /// strategy for the current mismatched input, otherwise {@code false} + /// - returns: `true` if single-token insertion is a viable recovery + /// strategy for the current mismatched input, otherwise `false` + /// internal func singleTokenInsertion(_ recognizer: Parser) throws -> Bool { - let currentSymbolType: Int = try getTokenStream(recognizer).LA(1) + let currentSymbolType = try getTokenStream(recognizer).LA(1) // if current token is consistent with what could come after current // ATN state, then we know we're missing a token; error recovery // is free to conjure up and insert the missing token - let currentState: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]! - let next: ATNState = currentState.transition(0).target - let atn: ATN = recognizer.getInterpreter().atn - let expectingAtLL2: IntervalSet = try atn.nextTokens(next, recognizer._ctx) + let currentState = recognizer.getInterpreter().atn.states[recognizer.getState()]! + let next = currentState.transition(0).target + let atn = recognizer.getInterpreter().atn + let expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) // print("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames())); if expectingAtLL2.contains(currentSymbolType) { - try reportMissingToken(recognizer) + reportMissingToken(recognizer) return true } return false } + /// /// This method implements the single-token deletion inline error recovery - /// strategy. It is called by {@link #recoverInline} to attempt to recover + /// strategy. It is called by _#recoverInline_ to attempt to recover /// from mismatched input. If this method returns null, the parser and error /// handler state will not have changed. If this method returns non-null, - /// {@code recognizer} will not be in error recovery mode since the + /// `recognizer` will __not__ be in error recovery mode since the /// returned token was a successful match. - /// - ///

      If the single-token deletion is successful, this method calls - /// {@link #reportUnwantedToken} to report the error, followed by - /// {@link org.antlr.v4.runtime.Parser#consume} to actually "delete" the extraneous token. Then, - /// before returning {@link #reportMatch} is called to signal a successful - /// match.

      - /// + /// + /// If the single-token deletion is successful, this method calls + /// _#reportUnwantedToken_ to report the error, followed by + /// _org.antlr.v4.runtime.Parser#consume_ to actually "delete" the extraneous token. Then, + /// before returning _#reportMatch_ is called to signal a successful + /// match. + /// /// - parameter recognizer: the parser instance - /// - returns: the successfully matched {@link org.antlr.v4.runtime.Token} instance if single-token + /// - returns: the successfully matched _org.antlr.v4.runtime.Token_ instance if single-token /// deletion successfully recovers from the mismatched input, otherwise - /// {@code null} + /// `null` + /// internal func singleTokenDeletion(_ recognizer: Parser) throws -> Token? { - let nextTokenType: Int = try getTokenStream(recognizer).LA(2) - let expecting: IntervalSet = try getExpectedTokens(recognizer) + let nextTokenType = try getTokenStream(recognizer).LA(2) + let expecting = try getExpectedTokens(recognizer) if expecting.contains(nextTokenType) { - try reportUnwantedToken(recognizer) + reportUnwantedToken(recognizer) + /// /// errPrint("recoverFromMismatchedToken deleting "+ /// ((TokenStream)getTokenStream(recognizer)).LT(1)+ /// " since "+((TokenStream)getTokenStream(recognizer)).LT(2)+ /// " is what we want"); + /// try recognizer.consume() // simply delete extra token // we want to return the token we're actually matching - let matchedSymbol: Token = try recognizer.getCurrentToken() + let matchedSymbol = try recognizer.getCurrentToken() reportMatch(recognizer) // we know current token is correct return matchedSymbol } return nil } + /// /// Conjure up a missing token during error recovery. - /// + /// /// The recognizer attempts to recover from single missing /// symbols. But, actions might refer to that missing symbol. /// For example, x=ID {f($x);}. The action clearly assumes @@ -507,31 +519,33 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// a CommonToken of the appropriate type. The text will be the token. /// If you change what tokens must be created by the lexer, /// override this method to create the appropriate tokens. - + /// internal func getTokenStream(_ recognizer: Parser) -> TokenStream { return recognizer.getInputStream() as! TokenStream } internal func getMissingSymbol(_ recognizer: Parser) throws -> Token { - let currentSymbol: Token = try recognizer.getCurrentToken() - let expecting: IntervalSet = try getExpectedTokens(recognizer) - let expectedTokenType: Int = expecting.getMinElement() // get any element + let currentSymbol = try recognizer.getCurrentToken() + let expecting = try getExpectedTokens(recognizer) + let expectedTokenType = expecting.getMinElement() // get any element var tokenText: String if expectedTokenType == CommonToken.EOF { tokenText = "" } else { tokenText = "" } - var current: Token = currentSymbol - let lookback: Token? = try getTokenStream(recognizer).LT(-1) + var current = currentSymbol + let lookback = try getTokenStream(recognizer).LT(-1) if current.getType() == CommonToken.EOF && lookback != nil { current = lookback! } - let token = recognizer.getTokenFactory().create((current.getTokenSource(), current.getTokenSource()!.getInputStream()), expectedTokenType, tokenText, - CommonToken.DEFAULT_CHANNEL, - -1, -1, - current.getLine(), current.getCharPositionInLine()) + let token = recognizer.getTokenFactory().create( + current.getTokenSourceAndStream(), + expectedTokenType, tokenText, + CommonToken.DEFAULT_CHANNEL, + -1, -1, + current.getLine(), current.getCharPositionInLine()) return token } @@ -541,6 +555,7 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { return try recognizer.getExpectedTokens() } + /// /// How should a token be displayed in an error message? The default /// is to display just the text, but during development you might /// want to have a lot of information spit out. Override in that case @@ -548,23 +563,24 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// the token). This is better than forcing you to override a method in /// your token objects because you don't have to go modify your lexer /// so that it creates a new Java type. + /// internal func getTokenErrorDisplay(_ t: Token?) -> String { - if t == nil { + guard let t = t else { return "" } - var s: String? = getSymbolText(t!) + var s = getSymbolText(t) if s == nil { - if getSymbolType(t!) == CommonToken.EOF { + if getSymbolType(t) == CommonToken.EOF { s = "" } else { - s = "<\(getSymbolType(t!))>" + s = "<\(getSymbolType(t))>" } } return escapeWSAndQuote(s!) } - internal func getSymbolText(_ symbol: Token) -> String { - return symbol.getText()! + internal func getSymbolText(_ symbol: Token) -> String? { + return symbol.getText() } internal func getSymbolType(_ symbol: Token) -> Int { @@ -580,6 +596,7 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { return "'" + s + "'" } + /// /// Compute the error recovery set for the current rule. During /// rule invocation, the parser pushes the set of tokens that can /// follow that rule reference on the stack; this amounts to @@ -588,9 +605,9 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// This local follow set only includes tokens /// from within the rule; i.e., the FIRST computation done by /// ANTLR stops at the end of a rule. - /// + /// /// EXAMPLE - /// + /// /// When you find a "no viable alt exception", the input is not /// consistent with any of the alternatives for rule r. The best /// thing to do is to consume tokens until you see something that @@ -598,9 +615,9 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// You don't want the exact set of viable next tokens because the /// input might just be missing a token--you might consume the /// rest of the input looking for one of the missing tokens. - /// + /// /// Consider grammar: - /// + /// /// a : '[' b ']' /// | '(' b ')' /// ; @@ -608,30 +625,30 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// c : ID /// | INT /// ; - /// + /// /// At each rule invocation, the set of tokens that could follow /// that rule is pushed on a stack. Here are the various /// context-sensitive follow sets: - /// + /// /// FOLLOW(b1_in_a) = FIRST(']') = ']' /// FOLLOW(b2_in_a) = FIRST(')') = ')' /// FOLLOW(c_in_b) = FIRST('^') = '^' - /// + /// /// Upon erroneous input "[]", the call chain is - /// + /// /// a -> b -> c - /// + /// /// and, hence, the follow context stack is: - /// + /// /// depth follow set start of rule execution /// 0 a (from main()) /// 1 ']' b /// 2 '^' c - /// + /// /// Notice that ')' is not included, because b would have to have /// been called from a different context in rule a for ')' to be /// included. - /// + /// /// For error recovery, we cannot consider FOLLOW(c) /// (context-sensitive or otherwise). We need the combined set of /// all context-sensitive FOLLOW sets--the set of all tokens that @@ -648,53 +665,55 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy { /// the same recovery set and doesn't consume anything. Rule b /// exits normally returning to rule a. Now it finds the ']' (and /// with the successful match exits errorRecovery mode). - /// + /// /// So, you can see that the parser walks up the call chain looking /// for the token that was a member of the recovery set. - /// + /// /// Errors are not generated in errorRecovery mode. - /// + /// /// ANTLR's error recovery mechanism is based upon original ideas: - /// + /// /// "Algorithms + Data Structures = Programs" by Niklaus Wirth - /// + /// /// and - /// + /// /// "A note on error recovery in recursive descent parsers": /// http://portal.acm.org/citation.cfm?id=947902.947905 - /// + /// /// Later, Josef Grosch had some good ideas: - /// + /// /// "Efficient and Comfortable Error Recovery in Recursive Descent /// Parsers": /// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip - /// + /// /// Like Grosch I implement context-sensitive FOLLOW sets that are combined /// at run-time upon error to avoid overhead during parsing. - internal func getErrorRecoverySet(_ recognizer: Parser) throws -> IntervalSet { - let atn: ATN = recognizer.getInterpreter().atn + /// + internal func getErrorRecoverySet(_ recognizer: Parser) -> IntervalSet { + let atn = recognizer.getInterpreter().atn var ctx: RuleContext? = recognizer._ctx - let recoverSet: IntervalSet = try IntervalSet() - while let ctxWrap = ctx , ctxWrap.invokingState >= 0 { + let recoverSet = IntervalSet() + while let ctxWrap = ctx, ctxWrap.invokingState >= 0 { // compute what follows who invoked us - let invokingState: ATNState = atn.states[ctxWrap.invokingState]! - let rt: RuleTransition = invokingState.transition(0) as! RuleTransition - let follow: IntervalSet = try atn.nextTokens(rt.followState) - try recoverSet.addAll(follow) + let invokingState = atn.states[ctxWrap.invokingState]! + let rt = invokingState.transition(0) as! RuleTransition + let follow = atn.nextTokens(rt.followState) + try! recoverSet.addAll(follow) ctx = ctxWrap.parent } - try recoverSet.remove(CommonToken.EPSILON) + try! recoverSet.remove(CommonToken.EPSILON) // print("recover set "+recoverSet.toString(recognizer.getTokenNames())); return recoverSet } + /// /// Consume tokens until one matches the given token set. + /// internal func consumeUntil(_ recognizer: Parser, _ set: IntervalSet) throws { // errPrint("consumeUntil("+set.toString(recognizer.getTokenNames())+")"); - var ttype: Int = try getTokenStream(recognizer).LA(1) + var ttype = try getTokenStream(recognizer).LA(1) while ttype != CommonToken.EOF && !set.contains(ttype) { //print("consume during recover LA(1)="+getTokenNames()[input.LA(1)]); -// getTokenStream(recognizer).consume(); try recognizer.consume() ttype = try getTokenStream(recognizer).LA(1) } diff --git a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift index 75681f617..4109f672b 100644 --- a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift @@ -1,45 +1,53 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener} can be used to identify +/// +/// This implementation of _org.antlr.v4.runtime.ANTLRErrorListener_ can be used to identify /// certain potential correctness and performance problems in grammars. "Reports" -/// are made by calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} with the appropriate +/// are made by calling _org.antlr.v4.runtime.Parser#notifyErrorListeners_ with the appropriate /// message. -/// -///
        -///
      • Ambiguities: These are cases where more than one path through the -/// grammar can match the input.
      • -///
      • Weak context sensitivity: These are cases where full-context +/// +/// * __Ambiguities__: These are cases where more than one path through the +/// grammar can match the input. +/// * __Weak context sensitivity__: These are cases where full-context /// prediction resolved an SLL conflict to a unique alternative which equaled the -/// minimum alternative of the SLL conflict.
      • -///
      • Strong (forced) context sensitivity: These are cases where the +/// minimum alternative of the SLL conflict. +/// * __Strong (forced) context sensitivity__: These are cases where the /// full-context prediction resolved an SLL conflict to a unique alternative, -/// and the minimum alternative of the SLL conflict was found to not be +/// __and__ the minimum alternative of the SLL conflict was found to not be /// a truly viable alternative. Two-stage parsing cannot be used for inputs where -/// this situation occurs.
      • -///
      -/// +/// this situation occurs. +/// /// - Sam Harwell +/// import Foundation public class DiagnosticErrorListener: BaseErrorListener { - /// When {@code true}, only exactly known ambiguities are reported. + /// + /// When `true`, only exactly known ambiguities are reported. + /// internal final var exactOnly: Bool - /// Initializes a new instance of {@link org.antlr.v4.runtime.DiagnosticErrorListener} which only + /// + /// Initializes a new instance of _org.antlr.v4.runtime.DiagnosticErrorListener_ which only /// reports exact ambiguities. + /// public convenience override init() { self.init(true) } - /// Initializes a new instance of {@link org.antlr.v4.runtime.DiagnosticErrorListener}, specifying + /// + /// Initializes a new instance of _org.antlr.v4.runtime.DiagnosticErrorListener_, specifying /// whether all ambiguities or only exact ambiguities are reported. - /// - /// - parameter exactOnly: {@code true} to report only exact ambiguities, otherwise - /// {@code false} to report all ambiguities. + /// + /// - parameter exactOnly: `true` to report only exact ambiguities, otherwise + /// `false` to report all ambiguities. + /// public init(_ exactOnly: Bool) { self.exactOnly = exactOnly } @@ -51,16 +59,16 @@ public class DiagnosticErrorListener: BaseErrorListener { _ stopIndex: Int, _ exact: Bool, _ ambigAlts: BitSet, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { if exactOnly && !exact { return } let decision = getDecisionDescription(recognizer, dfa) - let conflictingAlts = try getConflictingAlts(ambigAlts, configs) - let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex)) + let conflictingAlts = getConflictingAlts(ambigAlts, configs) + let text = getTextInInterval(recognizer, startIndex, stopIndex) let message = "reportAmbiguity d=\(decision): ambigAlts=\(conflictingAlts), input='\(text)'" - try recognizer.notifyErrorListeners(message) + recognizer.notifyErrorListeners(message) } override @@ -69,11 +77,11 @@ public class DiagnosticErrorListener: BaseErrorListener { _ startIndex: Int, _ stopIndex: Int, _ conflictingAlts: BitSet?, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { let decision = getDecisionDescription(recognizer, dfa) - let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex)) + let text = getTextInInterval(recognizer, startIndex, stopIndex) let message = "reportAttemptingFullContext d=\(decision), input='\(text)'" - try recognizer.notifyErrorListeners(message) + recognizer.notifyErrorListeners(message) } override @@ -82,11 +90,11 @@ public class DiagnosticErrorListener: BaseErrorListener { _ startIndex: Int, _ stopIndex: Int, _ prediction: Int, - _ configs: ATNConfigSet) throws { + _ configs: ATNConfigSet) { let decision = getDecisionDescription(recognizer, dfa) - let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex)) + let text = getTextInInterval(recognizer, startIndex, stopIndex) let message = "reportContextSensitivity d=\(decision), input='\(text)'" - try recognizer.notifyErrorListeners(message) + recognizer.notifyErrorListeners(message) } internal func getDecisionDescription(_ recognizer: Parser, _ dfa: DFA) -> String { @@ -106,21 +114,28 @@ public class DiagnosticErrorListener: BaseErrorListener { return "\(decision) (\(ruleName))" } + /// /// Computes the set of conflicting or ambiguous alternatives from a /// configuration set, if that information was not already provided by the /// parser. - /// + /// /// - parameter reportedAlts: The set of conflicting or ambiguous alternatives, as /// reported by the parser. /// - parameter configs: The conflicting or ambiguous configuration set. - /// - returns: Returns {@code reportedAlts} if it is not {@code null}, otherwise - /// returns the set of alternatives represented in {@code configs}. - internal func getConflictingAlts(_ reportedAlts: BitSet?, _ configs: ATNConfigSet) throws -> BitSet { - if reportedAlts != nil { - return reportedAlts! - } - let result = try configs.getAltBitSet() - return result + /// - returns: Returns `reportedAlts` if it is not `null`, otherwise + /// returns the set of alternatives represented in `configs`. + /// + internal func getConflictingAlts(_ reportedAlts: BitSet?, _ configs: ATNConfigSet) -> BitSet { + return reportedAlts ?? configs.getAltBitSet() + } +} + + +fileprivate func getTextInInterval(_ recognizer: Parser, _ startIndex: Int, _ stopIndex: Int) -> String { + do { + return try recognizer.getTokenStream()?.getText(Interval.of(startIndex, stopIndex)) ?? "" + } + catch { + return "" } - } diff --git a/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift b/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift index 529ce372f..c4e2bd310 100644 --- a/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift +++ b/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift @@ -1,36 +1,28 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A semantic predicate failed during validation. Validation of predicates /// occurs when normally parsing the alternative just like matching a token. /// Disambiguating predicate evaluation occurs when we test a predicate during /// prediction. -public class FailedPredicateException: RecognitionException { +/// +public class FailedPredicateException: RecognitionException { private final var ruleIndex: Int private final var predicateIndex: Int private final var predicate: String? - public convenience init(_ recognizer: Parser) throws { - try self.init(recognizer, nil) - } + public init(_ recognizer: Parser, _ predicate: String? = nil, _ message: String? = nil) { + let s = recognizer.getInterpreter().atn.states[recognizer.getState()]! - public convenience init(_ recognizer: Parser, _ predicate: String?)throws { - try self.init(recognizer, predicate, nil) - } - - public init(_ recognizer: Parser, - _ predicate: String?, - _ message: String?) throws - { - - let s: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]! - - let trans: AbstractPredicateTransition = s.transition(0) as! AbstractPredicateTransition - if trans is PredicateTransition { - self.ruleIndex = (trans as! PredicateTransition).ruleIndex - self.predicateIndex = (trans as! PredicateTransition).predIndex + let trans = s.transition(0) as! AbstractPredicateTransition + if let predex = trans as? PredicateTransition { + self.ruleIndex = predex.ruleIndex + self.predicateIndex = predex.predIndex } else { self.ruleIndex = 0 @@ -39,9 +31,10 @@ public class FailedPredicateException: RecognitionException self.predicate = predicate - super.init(FailedPredicateException.formatMessage(predicate!, message), recognizer , recognizer.getInputStream()!, recognizer._ctx) - - try self.setOffendingToken(recognizer.getCurrentToken()) + super.init(recognizer, recognizer.getInputStream()!, recognizer._ctx, FailedPredicateException.formatMessage(predicate, message)) + if let token = try? recognizer.getCurrentToken() { + setOffendingToken(token) + } } public func getRuleIndex() -> Int { @@ -52,17 +45,17 @@ public class FailedPredicateException: RecognitionException return predicateIndex } - public func getPredicate() -> String? { return predicate } - private static func formatMessage(_ predicate: String, _ message: String?) -> String { + private static func formatMessage(_ predicate: String?, _ message: String?) -> String { if message != nil { return message! } - return "failed predicate: {predicate}?" //String.format(Locale.getDefault(), "failed predicate: {%s}?", predicate); + let predstr = predicate ?? "" + return "failed predicate: {\(predstr)}?" } } diff --git a/runtime/Swift/Sources/Antlr4/InputMismatchException.swift b/runtime/Swift/Sources/Antlr4/InputMismatchException.swift index 32f9ac89e..8af781702 100644 --- a/runtime/Swift/Sources/Antlr4/InputMismatchException.swift +++ b/runtime/Swift/Sources/Antlr4/InputMismatchException.swift @@ -1,14 +1,20 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This signifies any kind of mismatched input exceptions such as /// when the current input does not match the expected token. +/// -public class InputMismatchException: RecognitionException { - public init(_ recognizer: Parser) throws { +public class InputMismatchException: RecognitionException { + public init(_ recognizer: Parser) { super.init(recognizer, recognizer.getInputStream()!, recognizer._ctx) - self.setOffendingToken(try recognizer.getCurrentToken()) + if let token = try? recognizer.getCurrentToken() { + setOffendingToken(token) + } } } diff --git a/runtime/Swift/Sources/Antlr4/IntStream.swift b/runtime/Swift/Sources/Antlr4/IntStream.swift index 69af63969..d56a3f62c 100644 --- a/runtime/Swift/Sources/Antlr4/IntStream.swift +++ b/runtime/Swift/Sources/Antlr4/IntStream.swift @@ -1,117 +1,112 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A simple stream of symbols whose values are represented as integers. This -/// interface provides marked ranges with support for a minimum level +/// interface provides __marked ranges__ with support for a minimum level /// of buffering necessary to implement arbitrary lookahead during prediction. -/// For more information on marked ranges, see {@link #mark}. -/// -///

      Initializing Methods: Some methods in this interface have +/// For more information on marked ranges, see _#mark_. +/// +/// __Initializing Methods:__ Some methods in this interface have /// unspecified behavior if no call to an initializing method has occurred after -/// the stream was constructed. The following is a list of initializing methods:

      -/// -///
        -///
      • {@link #LA}
      • -///
      • {@link #consume}
      • -///
      • {@link #size}
      • -///
      - +/// the stream was constructed. The following is a list of initializing methods: +/// +/// * _#LA_ +/// * _#consume_ +/// * _#size_ +/// public protocol IntStream: class { - /// The value returned by {@link #LA LA()} when the end of the stream is - /// reached. - //let EOF : Int = -1; - - /// The value returned by {@link #getSourceName} when the actual name of the - /// underlying source is not known. - //let UNKNOWN_SOURCE_NAME : String = ""; + /// /// Consumes the current symbol in the stream. This method has the following /// effects: - /// - ///
        - ///
      • Forward movement: The value of {@link #index index()} - /// before calling this method is less than the value of {@code index()} - /// after calling this method.
      • - ///
      • Ordered lookahead: The value of {@code LA(1)} before - /// calling this method becomes the value of {@code LA(-1)} after calling - /// this method.
      • - ///
      - /// - /// Note that calling this method does not guarantee that {@code index()} is + /// + /// * __Forward movement:__ The value of _#index index()_ + /// before calling this method is less than the value of `index()` + /// after calling this method. + /// * __Ordered lookahead:__ The value of `LA(1)` before + /// calling this method becomes the value of `LA(-1)` after calling + /// this method. + /// + /// Note that calling this method does not guarantee that `index()` is /// incremented by exactly 1, as that would preclude the ability to implement - /// filtering streams (e.g. {@link org.antlr.v4.runtime.CommonTokenStream} which distinguishes + /// filtering streams (e.g. _org.antlr.v4.runtime.CommonTokenStream_ which distinguishes /// between "on-channel" and "off-channel" tokens). - /// - /// - IllegalStateException if an attempt is made to consume the the - /// end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling - /// {@code consume}). + /// + /// - throws: _ANTLRError.illegalState_ if an attempt is made to consume the the + /// end of the stream (i.e. if `LA(1)==`_#EOF EOF_ before calling + /// `consume`). + /// func consume() throws - /// Gets the value of the symbol at offset {@code i} from the current - /// position. When {@code i==1}, this method returns the value of the current + /// + /// Gets the value of the symbol at offset `i` from the current + /// position. When `i==1`, this method returns the value of the current /// symbol in the stream (which is the next symbol to be consumed). When - /// {@code i==-1}, this method returns the value of the previously read + /// `i==-1`, this method returns the value of the previously read /// symbol in the stream. It is not valid to call this method with - /// {@code i==0}, but the specific behavior is unspecified because this + /// `i==0`, but the specific behavior is unspecified because this /// method is frequently called from performance-critical code. - /// - ///

      This method is guaranteed to succeed if any of the following are true:

      - /// - ///
        - ///
      • {@code i>0}
      • - ///
      • {@code i==-1} and {@link #index index()} returns a value greater - /// than the value of {@code index()} after the stream was constructed - /// and {@code LA(1)} was called in that order. Specifying the current - /// {@code index()} relative to the index after the stream was created + /// + /// This method is guaranteed to succeed if any of the following are true: + /// + /// * `i>0` + /// * `i==-1` and _#index index()_ returns a value greater + /// than the value of `index()` after the stream was constructed + /// and `LA(1)` was called in that order. Specifying the current + /// `index()` relative to the index after the stream was created /// allows for filtering implementations that do not return every symbol - /// from the underlying source. Specifying the call to {@code LA(1)} - /// allows for lazily initialized streams.
      • - ///
      • {@code LA(i)} refers to a symbol consumed within a marked region - /// that has not yet been released.
      • - ///
      - /// - ///

      If {@code i} represents a position at or beyond the end of the stream, - /// this method returns {@link #EOF}.

      - /// - ///

      The return value is unspecified if {@code i<0} and fewer than {@code -i} - /// calls to {@link #consume consume()} have occurred from the beginning of - /// the stream before calling this method.

      - /// - /// - UnsupportedOperationException if the stream does not support + /// from the underlying source. Specifying the call to `LA(1)` + /// allows for lazily initialized streams. + /// * `LA(i)` refers to a symbol consumed within a marked region + /// that has not yet been released. + /// + /// If `i` represents a position at or beyond the end of the stream, + /// this method returns _#EOF_. + /// + /// The return value is unspecified if `i<0` and fewer than `-i` + /// calls to _#consume consume()_ have occurred from the beginning of + /// the stream before calling this method. + /// + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// retrieving the value of the specified symbol + /// func LA(_ i: Int) throws -> Int - /// A mark provides a guarantee that {@link #seek seek()} operations will be - /// valid over a "marked range" extending from the index where {@code mark()} - /// was called to the current {@link #index index()}. This allows the use of + /// + /// A mark provides a guarantee that _#seek seek()_ operations will be + /// valid over a "marked range" extending from the index where `mark()` + /// was called to the current _#index index()_. This allows the use of /// streaming input sources by specifying the minimum buffering requirements /// to support arbitrary lookahead during prediction. - /// - ///

      The returned mark is an opaque handle (type {@code int}) which is passed - /// to {@link #release release()} when the guarantees provided by the marked + /// + /// The returned mark is an opaque handle (type `int`) which is passed + /// to _#release release()_ when the guarantees provided by the marked /// range are no longer necessary. When calls to - /// {@code mark()}/{@code release()} are nested, the marks must be released + /// `mark()`/`release()` are nested, the marks must be released /// in reverse order of which they were obtained. Since marked regions are /// used during performance-critical sections of prediction, the specific /// behavior of invalid usage is unspecified (i.e. a mark is not released, or /// a mark is released twice, or marks are not released in reverse order from - /// which they were created).

      - /// - ///

      The behavior of this method is unspecified if no call to an - /// {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was - /// constructed.

      - /// - ///

      This method does not change the current position in the input stream.

      - /// - ///

      The following example shows the use of {@link #mark mark()}, - /// {@link #release release(mark)}, {@link #index index()}, and - /// {@link #seek seek(index)} as part of an operation to safely work within a + /// which they were created). + /// + /// The behavior of this method is unspecified if no call to an + /// _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream was + /// constructed. + /// + /// This method does not change the current position in the input stream. + /// + /// The following example shows the use of _#mark mark()_, + /// _#release release(mark)_, _#index index()_, and + /// _#seek seek(index)_ as part of an operation to safely work within a /// marked region, then restore the stream position to its original value and - /// release the mark.

      - ///
      +    /// release the mark.
      +    /// 
           /// IntStream stream = ...;
           /// int index = -1;
           /// int mark = stream.mark();
      @@ -124,70 +119,78 @@ public protocol IntStream: class {
           /// }
           /// stream.release(mark);
           /// }
      -    /// 
      - /// + /// + /// /// - returns: An opaque marker which should be passed to - /// {@link #release release()} when the marked range is no longer required. + /// _#release release()_ when the marked range is no longer required. + /// func mark() -> Int + /// /// This method releases a marked range created by a call to - /// {@link #mark mark()}. Calls to {@code release()} must appear in the - /// reverse order of the corresponding calls to {@code mark()}. If a mark is + /// _#mark mark()_. Calls to `release()` must appear in the + /// reverse order of the corresponding calls to `mark()`. If a mark is /// released twice, or if marks are not released in reverse order of the - /// corresponding calls to {@code mark()}, the behavior is unspecified. - /// - ///

      For more information and an example, see {@link #mark}.

      - /// - /// - parameter marker: A marker returned by a call to {@code mark()}. + /// corresponding calls to `mark()`, the behavior is unspecified. + /// + /// For more information and an example, see _#mark_. + /// + /// - parameter marker: A marker returned by a call to `mark()`. /// - seealso: #mark + /// func release(_ marker: Int) throws + /// /// Return the index into the stream of the input symbol referred to by - /// {@code LA(1)}. - /// - ///

      The behavior of this method is unspecified if no call to an - /// {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was - /// constructed.

      + /// `LA(1)`. + /// + /// The behavior of this method is unspecified if no call to an + /// _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream was + /// constructed. + /// func index() -> Int - /// Set the input cursor to the position indicated by {@code index}. If the + /// + /// Set the input cursor to the position indicated by `index`. If the /// specified index lies past the end of the stream, the operation behaves as - /// though {@code index} was the index of the EOF symbol. After this method + /// though `index` was the index of the EOF symbol. After this method /// returns without throwing an exception, then at least one of the following /// will be true. - /// - ///
        - ///
      • {@link #index index()} will return the index of the first symbol - /// appearing at or after the specified {@code index}. Specifically, + /// + /// * _#index index()_ will return the index of the first symbol + /// appearing at or after the specified `index`. Specifically, /// implementations which filter their sources should automatically - /// adjust {@code index} forward the minimum amount required for the - /// operation to target a non-ignored symbol.
      • - ///
      • {@code LA(1)} returns {@link #EOF}
      • - ///
      - /// - /// This operation is guaranteed to not throw an exception if {@code index} + /// adjust `index` forward the minimum amount required for the + /// operation to target a non-ignored symbol. + /// * `LA(1)` returns _#EOF_ + /// + /// This operation is guaranteed to not throw an exception if `index` /// lies within a marked region. For more information on marked regions, see - /// {@link #mark}. The behavior of this method is unspecified if no call to - /// an {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream + /// _#mark_. The behavior of this method is unspecified if no call to + /// an _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream /// was constructed. - /// + /// /// - parameter index: The absolute index to seek to. - /// - /// - IllegalArgumentException if {@code index} is less than 0 - /// - UnsupportedOperationException if the stream does not support + /// + /// - throws: _ANTLRError.illegalArgument_ if `index` is less than 0 + /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support /// seeking to the specified index + /// func seek(_ index: Int) throws + /// /// Returns the total number of symbols in the stream, including a single EOF /// symbol. - /// - /// - UnsupportedOperationException if the size of the stream is + /// + /// - throws: _ANTLRError.unsupportedOperation_ if the size of the stream is /// unknown. + /// func size() -> Int + /// /// Gets the name of the underlying symbol source. This method returns a /// non-null, non-empty string. If such a name is not known, this method - /// returns {@link #UNKNOWN_SOURCE_NAME}. - + /// returns _#UNKNOWN_SOURCE_NAME_. + /// func getSourceName() -> String } diff --git a/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift b/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift index ea4789386..b881a94ea 100644 --- a/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift @@ -1,33 +1,41 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This class extends {@link org.antlr.v4.runtime.ParserRuleContext} by allowing the value of -/// {@link #getRuleIndex} to be explicitly set for the context. -/// -///

      -/// {@link org.antlr.v4.runtime.ParserRuleContext} does not include field storage for the rule index +/// +/// This class extends _org.antlr.v4.runtime.ParserRuleContext_ by allowing the value of +/// _#getRuleIndex_ to be explicitly set for the context. +/// +/// +/// _org.antlr.v4.runtime.ParserRuleContext_ does not include field storage for the rule index /// since the context classes created by the code generator override the -/// {@link #getRuleIndex} method to return the correct value for that context. +/// _#getRuleIndex_ method to return the correct value for that context. /// Since the parser interpreter does not use the context classes generated for a /// parser, this class (with slightly more memory overhead per node) is used to -/// provide equivalent functionality.

      +/// provide equivalent functionality. +/// public class InterpreterRuleContext: ParserRuleContext { - /// This is the backing field for {@link #getRuleIndex}. + /// + /// This is the backing field for _#getRuleIndex_. + /// private var ruleIndex: Int = -1 public override init() { super.init() } - /// Constructs a new {@link org.antlr.v4.runtime.InterpreterRuleContext} with the specified + /// + /// Constructs a new _org.antlr.v4.runtime.InterpreterRuleContext_ with the specified /// parent, invoking state, and rule index. - /// + /// /// - parameter parent: The parent context. /// - parameter invokingStateNumber: The invoking state number. /// - parameter ruleIndex: The rule index for the current context. + /// public init(_ parent: ParserRuleContext?, _ invokingStateNumber: Int, _ ruleIndex: Int) { @@ -41,9 +49,11 @@ public class InterpreterRuleContext: ParserRuleContext { return ruleIndex } - /// Copy a {@link org.antlr.v4.runtime.ParserRuleContext} or {@link org.antlr.v4.runtime.InterpreterRuleContext} - /// stack to a {@link org.antlr.v4.runtime.InterpreterRuleContext} tree. - /// Return {@link null} if {@code ctx} is null. + /// + /// Copy a _org.antlr.v4.runtime.ParserRuleContext_ or _org.antlr.v4.runtime.InterpreterRuleContext_ + /// stack to a _org.antlr.v4.runtime.InterpreterRuleContext_ tree. + /// Return _null_ if `ctx` is null. + /// public static func fromParserRuleContext(_ ctx: ParserRuleContext?) -> InterpreterRuleContext? { guard let ctx = ctx else { return nil diff --git a/runtime/Swift/Sources/Antlr4/Lexer.swift b/runtime/Swift/Sources/Antlr4/Lexer.swift index 9c251ba66..e47aa1a93 100644 --- a/runtime/Swift/Sources/Antlr4/Lexer.swift +++ b/runtime/Swift/Sources/Antlr4/Lexer.swift @@ -1,35 +1,39 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A lexer is recognizer that draws input symbols from a character stream. /// lexer grammars result in a subclass of this object. A Lexer object /// uses simplified match() and error recovery mechanisms in the interest /// of speed. +/// import Foundation -//public class Lexer : Recognizer +open class Lexer: Recognizer, TokenSource { + public static let EOF = -1 + public static let DEFAULT_MODE = 0 + public static let MORE = -2 + public static let SKIP = -3 -open class Lexer: Recognizer - , TokenSource { - public static let EOF: Int = -1 - public static let DEFAULT_MODE: Int = 0 - public static let MORE: Int = -2 - public static let SKIP: Int = -3 - - public static let DEFAULT_TOKEN_CHANNEL: Int = CommonToken.DEFAULT_CHANNEL - public static let HIDDEN: Int = CommonToken.HIDDEN_CHANNEL - public static let MIN_CHAR_VALUE: Int = Character.MIN_VALUE; - public static let MAX_CHAR_VALUE: Int = Character.MAX_VALUE; + public static let DEFAULT_TOKEN_CHANNEL = CommonToken.DEFAULT_CHANNEL + public static let HIDDEN = CommonToken.HIDDEN_CHANNEL + public static let MIN_CHAR_VALUE = Character.MIN_VALUE; + public static let MAX_CHAR_VALUE = Character.MAX_VALUE; public var _input: CharStream? - internal var _tokenFactorySourcePair: (TokenSource?, CharStream?) + internal var _tokenFactorySourcePair: TokenSourceAndStream + /// /// How to create token objects - internal var _factory: TokenFactory = CommonTokenFactory.DEFAULT + /// + internal var _factory = CommonTokenFactory.DEFAULT + /// /// The goal of all lexer rules/methods is to create a token object. /// This is an instance variable as multiple rules may collaborate to /// create a single token. nextToken will return this object after @@ -37,44 +41,63 @@ open class Lexer: Recognizer /// emissions, then set this to the last token to be matched or /// something nonnull so that the auto token emit mechanism will not /// emit another token. + /// public var _token: Token? + /// /// What character index in the stream did the current token start at? /// Needed, for example, to get the text for current token. Set at /// the start of nextToken. - public var _tokenStartCharIndex: Int = -1 + /// + public var _tokenStartCharIndex = -1 + /// /// The line on which the first character of the token resides - public var _tokenStartLine: Int = 0 + /// + public var _tokenStartLine = 0 + /// /// The character position of first character within the line - public var _tokenStartCharPositionInLine: Int = 0 + /// + public var _tokenStartCharPositionInLine = 0 + /// /// Once we see EOF on char stream, next token will be EOF. /// If you have DONE : EOF ; then you see DONE EOF. - public var _hitEOF: Bool = false + /// + public var _hitEOF = false + /// /// The channel number for the current token - public var _channel: Int = 0 + /// + public var _channel = 0 + /// /// The token type for the current token - public var _type: Int = 0 + /// + public var _type = 0 - public final var _modeStack: Stack = Stack() - public var _mode: Int = Lexer.DEFAULT_MODE + public final var _modeStack = Stack() + public var _mode = Lexer.DEFAULT_MODE + /// /// You can set the text for the current token to override what is in /// the input char buffer. Use setText() or can set this instance var. + /// public var _text: String? public override init() { + self._tokenFactorySourcePair = TokenSourceAndStream() + super.init() + self._tokenFactorySourcePair.tokenSource = self } public init(_ input: CharStream) { - - super.init() self._input = input - self._tokenFactorySourcePair = (self, input) + self._tokenFactorySourcePair = TokenSourceAndStream() + super.init() + self._tokenFactorySourcePair.tokenSource = self + self._tokenFactorySourcePair.stream = input } open func reset() throws { @@ -97,8 +120,10 @@ open class Lexer: Recognizer getInterpreter().reset() } + /// /// Return a token from this source; i.e., match a token on the char /// stream. + /// open func nextToken() throws -> Token { guard let _input = _input else { @@ -107,7 +132,7 @@ open class Lexer: Recognizer // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token - var tokenStartMarker: Int = _input.mark() + var tokenStartMarker = _input.mark() defer { // make sure we release marker after match or // unbuffered char stream will keep buffering @@ -158,11 +183,13 @@ open class Lexer: Recognizer } + /// /// Instruct the lexer to skip creating a token for current lexer rule /// and look for another token. nextToken() knows to keep looking when /// a lexer rule finishes with token set to SKIP_TOKEN. Recall that /// if token==null at end of any token rule, it creates one for you /// and emits it. + /// open func skip() { _type = Lexer.SKIP } @@ -205,14 +232,16 @@ open class Lexer: Recognizer return _factory } + /// /// Set the char stream and reset the lexer + /// open override func setInputStream(_ input: IntStream) throws { self._input = nil - self._tokenFactorySourcePair = (self, _input!) + self._tokenFactorySourcePair = makeTokenSourceAndStream() try reset() self._input = input as? CharStream - self._tokenFactorySourcePair = (self, _input!) + self._tokenFactorySourcePair = makeTokenSourceAndStream() } @@ -225,40 +254,45 @@ open class Lexer: Recognizer return _input } + /// /// By default does not support multiple emits per nextToken invocation /// for efficiency reasons. Subclass and override this method, nextToken, /// and getToken (to push tokens into a list and pull from that list /// rather than a single variable as this implementation does). + /// open func emit(_ token: Token) { //System.err.println("emit "+token); self._token = token } + /// /// The standard method called to automatically emit a token at the /// outermost lexical rule. The token object should point into the /// char buffer start..stop. If there is a text override in 'text', /// use that to set the token's text. Override this method to emit /// custom Token objects or provide a new factory. + /// @discardableResult open func emit() -> Token { - let t: Token = _factory.create(_tokenFactorySourcePair, _type, _text, _channel, _tokenStartCharIndex, getCharIndex() - 1, - _tokenStartLine, _tokenStartCharPositionInLine) + let t = _factory.create(_tokenFactorySourcePair, _type, _text, _channel, _tokenStartCharIndex, getCharIndex() - 1, _tokenStartLine, _tokenStartCharPositionInLine) emit(t) return t } + @discardableResult open func emitEOF() -> Token { - let cpos: Int = getCharPositionInLine() - let line: Int = getLine() - let eof: Token = _factory.create( - _tokenFactorySourcePair, - CommonToken.EOF, - nil, - CommonToken.DEFAULT_CHANNEL, - _input!.index(), - _input!.index() - 1, - line, - cpos) + let cpos = getCharPositionInLine() + let line = getLine() + let idx = _input!.index() + let eof = _factory.create( + _tokenFactorySourcePair, + CommonToken.EOF, + nil, + CommonToken.DEFAULT_CHANNEL, + idx, + idx - 1, + line, + cpos) emit(eof) return eof } @@ -281,13 +315,17 @@ open class Lexer: Recognizer getInterpreter().setCharPositionInLine(charPositionInLine) } + /// /// What is the index of the current character of lookahead? + /// open func getCharIndex() -> Int { return _input!.index() } + /// /// Return the text matched so far for the current token or any /// text override. + /// open func getText() -> String { if _text != nil { return _text! @@ -295,13 +333,17 @@ open class Lexer: Recognizer return getInterpreter().getText(_input!) } + /// /// Set the complete text of this token; it wipes any previous /// changes to the text. + /// open func setText(_ text: String) { self._text = text } + /// /// Override if emitting multiple tokens. + /// open func getToken() -> Token { return _token! } @@ -334,19 +376,13 @@ open class Lexer: Recognizer return nil } - /// Used to print out token names like ID during debugging and - /// error reporting. The generated parsers implement a method - /// that overrides this to point to their String[] tokenNames. - override - open func getTokenNames() -> [String?]? { - return nil - } - + /// /// Return a list of all Token objects in input char stream. /// Forces load of all tokens. Does not include EOF token. - open func getAllTokens() throws -> Array { - var tokens: Array = Array() - var t: Token = try nextToken() + /// + open func getAllTokens() throws -> [Token] { + var tokens = [Token]() + var t = try nextToken() while t.getType() != CommonToken.EOF { tokens.append(t) t = try nextToken() @@ -361,32 +397,35 @@ open class Lexer: Recognizer } } - open func notifyListeners(_ e: LexerNoViableAltException, recognizer: Recognizer) { + open func notifyListeners(_ e: LexerNoViableAltException, recognizer: Recognizer) { - let text: String = _input!.getText(Interval.of(_tokenStartCharIndex, _input!.index())) - let msg: String = "token recognition error at: '\(getErrorDisplay(text))'" + let text: String + do { + text = try _input!.getText(Interval.of(_tokenStartCharIndex, _input!.index())) + } + catch { + text = "" + } + let msg = "token recognition error at: '\(getErrorDisplay(text))'" - let listener: ANTLRErrorListener = getErrorListenerDispatch() + let listener = getErrorListenerDispatch() listener.syntaxError(recognizer, nil, _tokenStartLine, _tokenStartCharPositionInLine, msg, e) } open func getErrorDisplay(_ s: String) -> String { - let buf: StringBuilder = StringBuilder() - for c: Character in s.characters { + let buf = StringBuilder() + for c in s.characters { buf.append(getErrorDisplay(c)) } return buf.toString() } open func getErrorDisplay(_ c: Character) -> String { - var s: String = String(c) // String.valueOf(c as Character); + var s = String(c) if c.integerValue == CommonToken.EOF { s = "" } switch s { -// case CommonToken.EOF : -// s = ""; -// break; case "\n": s = "\\n" case "\t": @@ -404,16 +443,18 @@ open class Lexer: Recognizer return "'\(s)'" } + /// /// Lexers can normally match any char in it's vocabulary after matching /// a token, so do the easy thing and just kill a character and hope /// it all works out. You can instead use the rule invocation stack /// to do sophisticated error recovery if you are in a fragment rule. - //public func recover(re : RecognitionException) { - + /// open func recover(_ re: AnyObject) throws { - //System.out.println("consuming char "+(char)input.LA(1)+" during recovery"); - //re.printStackTrace(); // TODO: Do we lose character or line position information? try _input!.consume() } + + internal func makeTokenSourceAndStream() -> TokenSourceAndStream { + return TokenSourceAndStream(self, _input) + } } diff --git a/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift b/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift index 55c2f26e8..99d9f72d8 100644 --- a/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift +++ b/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift @@ -1,24 +1,23 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class LexerInterpreter: Lexer { internal final var grammarFileName: String internal final var atn: ATN - ////@Deprecated - internal final var tokenNames: [String?]? internal final var ruleNames: [String] internal final var channelNames: [String] internal final var modeNames: [String] - private final var vocabulary: Vocabulary? internal final var _decisionToDFA: [DFA] - internal final var _sharedContextCache: PredictionContextCache = - PredictionContextCache() + internal final var _sharedContextCache = PredictionContextCache() + // public override init() { // super.init()} @@ -36,13 +35,6 @@ public class LexerInterpreter: Lexer { self.grammarFileName = grammarFileName self.atn = atn - self.tokenNames = [String?]() - //new String[atn.maxTokenType]; - let length = tokenNames!.count - for i in 0.. [String?]? { - return tokenNames - } - override public func getRuleNames() -> [String] { return ruleNames diff --git a/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift b/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift index 392400e37..9f560c6c6 100644 --- a/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift +++ b/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift @@ -1,13 +1,19 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -public class LexerNoViableAltException: RecognitionException, CustomStringConvertible { +public class LexerNoViableAltException: RecognitionException, CustomStringConvertible { + /// /// Matching attempted at what input index? + /// private final var startIndex: Int + /// /// Which configurations did we try at input.index() that couldn't match input.LA(1)? + /// private final var deadEndConfigs: ATNConfigSet public init(_ lexer: Lexer?, @@ -25,23 +31,15 @@ public class LexerNoViableAltException: RecognitionException, return startIndex } - public func getDeadEndConfigs() -> ATNConfigSet { return deadEndConfigs } - //override -// public func getInputStream() -> CharStream { -// return super.getInputStream() as! CharStream; -// } - - public var description: String { - var symbol: String = "" - if startIndex >= 0 && startIndex < getInputStream().size() { - let charStream: CharStream = getInputStream() as! CharStream - let interval: Interval = Interval.of(startIndex, startIndex) - symbol = charStream.getText(interval) + var symbol = "" + if let charStream = getInputStream() as? CharStream, startIndex >= 0 && startIndex < charStream.size() { + let interval = Interval.of(startIndex, startIndex) + symbol = try! charStream.getText(interval) symbol = Utils.escapeWhitespace(symbol, false) } diff --git a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift index a553642cd..0863e16ee 100644 --- a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift +++ b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift @@ -1,90 +1,98 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Provides an implementation of {@link org.antlr.v4.runtime.TokenSource} as a wrapper around a list -/// of {@link org.antlr.v4.runtime.Token} objects. -/// -///

      If the final token in the list is an {@link org.antlr.v4.runtime.Token#EOF} token, it will be used -/// as the EOF token for every call to {@link #nextToken} after the end of the -/// list is reached. Otherwise, an EOF token will be created.

      +/// +/// Provides an implementation of _org.antlr.v4.runtime.TokenSource_ as a wrapper around a list +/// of _org.antlr.v4.runtime.Token_ objects. +/// +/// If the final token in the list is an _org.antlr.v4.runtime.Token#EOF_ token, it will be used +/// as the EOF token for every call to _#nextToken_ after the end of the +/// list is reached. Otherwise, an EOF token will be created. +/// public class ListTokenSource: TokenSource { - /// The wrapped collection of {@link org.antlr.v4.runtime.Token} objects to return. - internal final var tokens: Array + /// + /// The wrapped collection of _org.antlr.v4.runtime.Token_ objects to return. + /// + internal final var tokens: [Token] - /// The name of the input source. If this value is {@code null}, a call to - /// {@link #getSourceName} should return the source name used to create the - /// the next token in {@link #tokens} (or the previous token if the end of + /// + /// The name of the input source. If this value is `null`, a call to + /// _#getSourceName_ should return the source name used to create the + /// the next token in _#tokens_ (or the previous token if the end of /// the input has been reached). + /// private final var sourceName: String? - /// The index into {@link #tokens} of token to return by the next call to - /// {@link #nextToken}. The end of the input is indicated by this value - /// being greater than or equal to the number of items in {@link #tokens}. - internal var i: Int = 0 + /// + /// The index into _#tokens_ of token to return by the next call to + /// _#nextToken_. The end of the input is indicated by this value + /// being greater than or equal to the number of items in _#tokens_. + /// + internal var i = 0 + /// /// This field caches the EOF token for the token source. + /// internal var eofToken: Token? - /// This is the backing field for {@link #getTokenFactory} and - /// {@link setTokenFactory}. - private var _factory: TokenFactory = CommonTokenFactory.DEFAULT + /// + /// This is the backing field for _#getTokenFactory_ and + /// _setTokenFactory_. + /// + private var _factory = CommonTokenFactory.DEFAULT - /// Constructs a new {@link org.antlr.v4.runtime.ListTokenSource} instance from the specified - /// collection of {@link org.antlr.v4.runtime.Token} objects. - /// - /// - parameter tokens: The collection of {@link org.antlr.v4.runtime.Token} objects to provide as a - /// {@link org.antlr.v4.runtime.TokenSource}. - /// - NullPointerException if {@code tokens} is {@code null} - public convenience init(_ tokens: Array) { + /// + /// Constructs a new _org.antlr.v4.runtime.ListTokenSource_ instance from the specified + /// collection of _org.antlr.v4.runtime.Token_ objects. + /// + /// - parameter tokens: The collection of _org.antlr.v4.runtime.Token_ objects to provide as a + /// _org.antlr.v4.runtime.TokenSource_. + /// + public convenience init(_ tokens: [Token]) { self.init(tokens, nil) } - /// Constructs a new {@link org.antlr.v4.runtime.ListTokenSource} instance from the specified - /// collection of {@link org.antlr.v4.runtime.Token} objects and source name. - /// - /// - parameter tokens: The collection of {@link org.antlr.v4.runtime.Token} objects to provide as a - /// {@link org.antlr.v4.runtime.TokenSource}. - /// - parameter sourceName: The name of the {@link org.antlr.v4.runtime.TokenSource}. If this value is - /// {@code null}, {@link #getSourceName} will attempt to infer the name from - /// the next {@link org.antlr.v4.runtime.Token} (or the previous token if the end of the input has + /// + /// Constructs a new _org.antlr.v4.runtime.ListTokenSource_ instance from the specified + /// collection of _org.antlr.v4.runtime.Token_ objects and source name. + /// + /// - parameter tokens: The collection of _org.antlr.v4.runtime.Token_ objects to provide as a + /// _org.antlr.v4.runtime.TokenSource_. + /// - parameter sourceName: The name of the _org.antlr.v4.runtime.TokenSource_. If this value is + /// `null`, _#getSourceName_ will attempt to infer the name from + /// the next _org.antlr.v4.runtime.Token_ (or the previous token if the end of the input has /// been reached). - /// - /// - NullPointerException if {@code tokens} is {@code null} - public init(_ tokens: Array, _ sourceName: String?) { - + /// + public init(_ tokens: [Token], _ sourceName: String?) { self.tokens = tokens self.sourceName = sourceName } - /// {@inheritDoc} - public func getCharPositionInLine() -> Int { if i < tokens.count { return tokens[i].getCharPositionInLine() - } else { - if let eofToken = eofToken { - return eofToken.getCharPositionInLine() - } else { - if tokens.count > 0 { - // have to calculate the result from the line/column of the previous - // token, along with the text of the token. - let lastToken: Token = tokens[tokens.count - 1] + } + else if let eofToken = eofToken { + return eofToken.getCharPositionInLine() + } + else if tokens.count > 0 { + // have to calculate the result from the line/column of the previous + // token, along with the text of the token. + let lastToken = tokens[tokens.count - 1] - if let tokenText = lastToken.getText() { - let lastNewLine: Int = tokenText.lastIndexOf("\n") - if lastNewLine >= 0 { - return tokenText.length - lastNewLine - 1 - } - } - var position = lastToken.getCharPositionInLine() - position += lastToken.getStopIndex() - position -= lastToken.getStartIndex() - position += 1 - return position + if let tokenText = lastToken.getText() { + let lastNewLine = tokenText.lastIndexOf("\n") + if lastNewLine >= 0 { + return tokenText.length - lastNewLine - 1 } } + return (lastToken.getCharPositionInLine() + + lastToken.getStopIndex() - + lastToken.getStartIndex() + 1) } // only reach this if tokens is empty, meaning EOF occurs at the first @@ -92,27 +100,26 @@ public class ListTokenSource: TokenSource { return 0 } - /// {@inheritDoc} - public func nextToken() -> Token { if i >= tokens.count { if eofToken == nil { - var start: Int = -1 + var start = -1 if tokens.count > 0 { - let previousStop: Int = tokens[tokens.count - 1].getStopIndex() + let previousStop = tokens[tokens.count - 1].getStopIndex() if previousStop != -1 { start = previousStop + 1 } } - let stop: Int = max(-1, start - 1) - eofToken = _factory.create((self, getInputStream()!), CommonToken.EOF, "EOF", CommonToken.DEFAULT_CHANNEL, start, stop, getLine(), getCharPositionInLine()) + let stop = max(-1, start - 1) + let source = TokenSourceAndStream(self, getInputStream()) + eofToken = _factory.create(source, CommonToken.EOF, "EOF", CommonToken.DEFAULT_CHANNEL, start, stop, getLine(), getCharPositionInLine()) } return eofToken! } - let t: Token = tokens[i] + let t = tokens[i] if i == tokens.count - 1 && t.getType() == CommonToken.EOF { eofToken = t } @@ -121,8 +128,6 @@ public class ListTokenSource: TokenSource { return t } - /// {@inheritDoc} - public func getLine() -> Int { if i < tokens.count { return tokens[i].getLine() @@ -133,8 +138,8 @@ public class ListTokenSource: TokenSource { if tokens.count > 0 { // have to calculate the result from the line/column of the previous // token, along with the text of the token. - let lastToken: Token = tokens[tokens.count - 1] - var line: Int = lastToken.getLine() + let lastToken = tokens[tokens.count - 1] + var line = lastToken.getLine() if let tokenText = lastToken.getText() { let length = tokenText.length @@ -156,30 +161,24 @@ public class ListTokenSource: TokenSource { return 1 } - /// {@inheritDoc} - public func getInputStream() -> CharStream? { if i < tokens.count { return tokens[i].getInputStream() - } else { - if let eofToken = eofToken{ - return eofToken.getInputStream() - } else { - if tokens.count > 0 { - return tokens[tokens.count - 1].getInputStream() - } - } + } + else if let eofToken = eofToken { + return eofToken.getInputStream() + } + else if tokens.count > 0 { + return tokens[tokens.count - 1].getInputStream() } // no input stream information is available return nil } - /// {@inheritDoc} - public func getSourceName() -> String { - if sourceName != nil { - return sourceName! + if let sourceName = sourceName { + return sourceName } if let inputStream = getInputStream() { @@ -189,14 +188,10 @@ public class ListTokenSource: TokenSource { return "List" } - /// {@inheritDoc} - public func setTokenFactory(_ factory: TokenFactory) { self._factory = factory } - /// {@inheritDoc} - public func getTokenFactory() -> TokenFactory { return _factory } diff --git a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift index 8a41f8cca..bf3ab415f 100644 --- a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift +++ b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift @@ -4,48 +4,49 @@ */ -/** Indicates that the parser could not decide which of two or more paths - * to take based upon the remaining input. It tracks the starting token - * of the offending input and also knows where the parser was - * in the various paths when the error. Reported by reportNoViableAlternative() - */ +/// Indicates that the parser could not decide which of two or more paths +/// to take based upon the remaining input. It tracks the starting token +/// of the offending input and also knows where the parser was +/// in the various paths when the error. Reported by reportNoViableAlternative() +/// -public class NoViableAltException: RecognitionException { - /** Which configurations did we try at input.index() that couldn't match input.LT(1)? */ +public class NoViableAltException: RecognitionException { + /// Which configurations did we try at input.index() that couldn't match input.LT(1)? private final var deadEndConfigs: ATNConfigSet? - /** The token object at the start index; the input stream might - * not be buffering tokens so get a reference to it. (At the - * time the error occurred, of course the stream needs to keep a - * buffer all of the tokens but later we might not have access to those.) - */ - + /// The token object at the start index; the input stream might + /// not be buffering tokens so get a reference to it. (At the + /// time the error occurred, of course the stream needs to keep a + /// buffer all of the tokens but later we might not have access to those.) + /// private final var startToken: Token - public convenience init(_ recognizer: Parser?) throws { + public convenience init(_ recognizer: Parser) { // LL(1) error + let token = try! recognizer.getCurrentToken() self.init(recognizer, - recognizer!.getInputStream()!, - try recognizer!.getCurrentToken(), - try recognizer!.getCurrentToken(), + recognizer.getInputStream()!, + token, + token, nil, - recognizer!._ctx) + recognizer._ctx) } public init(_ recognizer: Parser?, _ input: IntStream, _ startToken: Token, - _ offendingToken: Token, + _ offendingToken: Token?, _ deadEndConfigs: ATNConfigSet?, _ ctx: ParserRuleContext?) { self.deadEndConfigs = deadEndConfigs self.startToken = startToken - // as? Recognizer super.init(recognizer, input, ctx) - self.setOffendingToken(offendingToken) + if let offendingToken = offendingToken { + setOffendingToken(offendingToken) + } } diff --git a/runtime/Swift/Sources/Antlr4/Parser.swift b/runtime/Swift/Sources/Antlr4/Parser.swift index 81ce2ac4f..a031aaf88 100644 --- a/runtime/Swift/Sources/Antlr4/Parser.swift +++ b/runtime/Swift/Sources/Antlr4/Parser.swift @@ -1,14 +1,18 @@ -/// +/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// +/// import Foundation +/// /// This is all the parsing support code essentially; most of it is error recovery stuff. +/// open class Parser: Recognizer { - public static let EOF: Int = -1 + public static let EOF = -1 public static var ConsoleError = true public class TraceListener: ParseTreeListener { @@ -23,16 +27,13 @@ open class Parser: Recognizer { print("enter \(ruleName), LT(1)=\(lt1)") } - public func visitTerminal(_ node: TerminalNode) { print("consume \(String(describing: node.getSymbol())) rule \(host.getRuleNames()[host._ctx!.getRuleIndex()])") } - public func visitErrorNode(_ node: ErrorNode) { } - public func exitEveryRule(_ ctx: ParserRuleContext) throws { let ruleName = host.getRuleNames()[ctx.getRuleIndex()] let lt1 = try host._input.LT(1)!.getText()! @@ -41,58 +42,56 @@ open class Parser: Recognizer { } public class TrimToSizeListener: ParseTreeListener { - - - public static let INSTANCE: TrimToSizeListener = TrimToSizeListener() - + public static let INSTANCE = TrimToSizeListener() public func enterEveryRule(_ ctx: ParserRuleContext) { } - public func visitTerminal(_ node: TerminalNode) { } - public func visitErrorNode(_ node: ErrorNode) { } - public func exitEveryRule(_ ctx: ParserRuleContext) { // TODO: Print exit info. } } + /// /// mutex for bypassAltsAtnCache updates + /// private var bypassAltsAtnCacheMutex = Mutex() + /// /// mutex for decisionToDFA updates + /// private var decisionToDFAMutex = Mutex() - /** - * This field maps from the serialized ATN string to the deserialized {@link org.antlr.v4.runtime.atn.ATN} with - * bypass alternatives. - * - * @see org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions() - */ + /// + /// This field maps from the serialized ATN string to the deserialized _org.antlr.v4.runtime.atn.ATN_ with + /// bypass alternatives. + /// + /// - SeeAlso: org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions() + /// private let bypassAltsAtnCache: HashMap = HashMap() - /** - * The error handling strategy for the parser. The default value is a new - * instance of {@link org.antlr.v4.runtime.DefaultErrorStrategy}. - * - * @see #getErrorHandler - * @see #setErrorHandler - */ + /// + /// The error handling strategy for the parser. The default value is a new + /// instance of _org.antlr.v4.runtime.DefaultErrorStrategy_. + /// + /// - SeeAlso: #getErrorHandler + /// - SeeAlso: #setErrorHandler + /// public var _errHandler: ANTLRErrorStrategy = DefaultErrorStrategy() - /** - * The input stream. - * - * @see #getInputStream - * @see #setInputStream - */ + /// + /// The input stream. + /// + /// - SeeAlso: #getInputStream + /// - SeeAlso: #setInputStream + /// public var _input: TokenStream! internal var _precedenceStack: Stack = { @@ -102,42 +101,42 @@ open class Parser: Recognizer { }() - /** - * The {@link org.antlr.v4.runtime.ParserRuleContext} object for the currently executing rule. - * This is always non-null during the parsing process. - */ + /// + /// The _org.antlr.v4.runtime.ParserRuleContext_ object for the currently executing rule. + /// This is always non-null during the parsing process. + /// public var _ctx: ParserRuleContext? = nil - /** - * Specifies whether or not the parser should construct a parse tree during - * the parsing process. The default value is {@code true}. - * - * @see #getBuildParseTree - * @see #setBuildParseTree - */ + /// + /// Specifies whether or not the parser should construct a parse tree during + /// the parsing process. The default value is `true`. + /// + /// - SeeAlso: #getBuildParseTree + /// - SeeAlso: #setBuildParseTree + /// internal var _buildParseTrees: Bool = true - /** - * When {@link #setTrace}{@code (true)} is called, a reference to the - * {@link org.antlr.v4.runtime.Parser.TraceListener} is stored here so it can be easily removed in a - * later call to {@link #setTrace}{@code (false)}. The listener itself is - * implemented as a parser listener so this field is not directly used by - * other parser methods. - */ + /// + /// When _#setTrace_`(true)` is called, a reference to the + /// _org.antlr.v4.runtime.Parser.TraceListener_ is stored here so it can be easily removed in a + /// later call to _#setTrace_`(false)`. The listener itself is + /// implemented as a parser listener so this field is not directly used by + /// other parser methods. + /// private var _tracer: TraceListener? - /** - * The list of {@link org.antlr.v4.runtime.tree.ParseTreeListener} listeners registered to receive - * events during the parse. - * - * @see #addParseListener - */ + /// + /// The list of _org.antlr.v4.runtime.tree.ParseTreeListener_ listeners registered to receive + /// events during the parse. + /// + /// - SeeAlso: #addParseListener + /// public var _parseListeners: Array? - /** - * The number of syntax errors reported during parsing. This value is - * incremented each time {@link #notifyErrorListeners} is called. - */ + /// + /// The number of syntax errors reported during parsing. This value is + /// incremented each time _#notifyErrorListeners_ is called. + /// internal var _syntaxErrors: Int = 0 public init(_ input: TokenStream) throws { @@ -146,7 +145,7 @@ open class Parser: Recognizer { try setInputStream(input) } - /** reset the parser's state */ + /// reset the parser's state public func reset() throws { if (getInputStream() != nil) { try getInputStream()!.seek(0) @@ -164,28 +163,28 @@ open class Parser: Recognizer { } } - /** - * Match current input symbol against {@code ttype}. If the symbol type - * matches, {@link org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch} and {@link #consume} are - * called to complete the match process. - * - *

      If the symbol type does not match, - * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is called on the current error - * strategy to attempt recovery. If {@link #getBuildParseTree} is - * {@code true} and the token index of the symbol returned by - * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to - * the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then - * {@link ParserRuleContext#addErrorNode(ErrorNode)}.

      - * - * @param ttype the token type to match - * @return the matched symbol - * @throws org.antlr.v4.runtime.RecognitionException if the current input symbol did not match - * {@code ttype} and the error strategy could not recover from the - * mismatched symbol - */ + /// + /// Match current input symbol against `ttype`. If the symbol type + /// matches, _org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch_ and _#consume_ are + /// called to complete the match process. + /// + /// If the symbol type does not match, + /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is called on the current error + /// strategy to attempt recovery. If _#getBuildParseTree_ is + /// `true` and the token index of the symbol returned by + /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is -1, the symbol is added to + /// the parse tree by calling _#createErrorNode(ParserRuleContext, Token)_ then + /// _ParserRuleContext#addErrorNode(ErrorNode)_. + /// + /// - Parameter ttype: the token type to match + /// - Throws: org.antlr.v4.runtime.RecognitionException if the current input symbol did not match + /// `ttype` and the error strategy could not recover from the + /// mismatched symbol + /// - Returns: the matched symbol + /// @discardableResult public func match(_ ttype: Int) throws -> Token { - var t: Token = try getCurrentToken() + var t = try getCurrentToken() if t.getType() == ttype { _errHandler.reportMatch(self) try consume() @@ -200,27 +199,27 @@ open class Parser: Recognizer { return t } - /** - * Match current input symbol as a wildcard. If the symbol type matches - * (i.e. has a value greater than 0), {@link org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch} - * and {@link #consume} are called to complete the match process. - * - *

      If the symbol type does not match, - * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is called on the current error - * strategy to attempt recovery. If {@link #getBuildParseTree} is - * {@code true} and the token index of the symbol returned by - * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to - * the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then - * {@link ParserRuleContext#addErrorNode(ErrorNode)}.

      - * - * @return the matched symbol - * @throws org.antlr.v4.runtime.RecognitionException if the current input symbol did not match - * a wildcard and the error strategy could not recover from the mismatched - * symbol - *///; RecognitionException + /// + /// Match current input symbol as a wildcard. If the symbol type matches + /// (i.e. has a value greater than 0), _org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch_ + /// and _#consume_ are called to complete the match process. + /// + /// If the symbol type does not match, + /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is called on the current error + /// strategy to attempt recovery. If _#getBuildParseTree_ is + /// `true` and the token index of the symbol returned by + /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is -1, the symbol is added to + /// the parse tree by calling _#createErrorNode(ParserRuleContext, Token)_ then + /// _ParserRuleContext#addErrorNode(ErrorNode)_. + /// + /// - Throws: org.antlr.v4.runtime.RecognitionException if the current input symbol did not match + /// a wildcard and the error strategy could not recover from the mismatched + /// symbol + /// - Returns: the matched symbol + /// @discardableResult public func matchWildcard() throws -> Token { - var t: Token = try getCurrentToken() + var t = try getCurrentToken() if t.getType() > 0 { _errHandler.reportMatch(self) try consume() @@ -236,43 +235,43 @@ open class Parser: Recognizer { return t } - /** - * Track the {@link org.antlr.v4.runtime.ParserRuleContext} objects during the parse and hook - * them up using the {@link org.antlr.v4.runtime.ParserRuleContext#children} list so that it - * forms a parse tree. The {@link org.antlr.v4.runtime.ParserRuleContext} returned from the start - * rule represents the root of the parse tree. - * - *

      Note that if we are not building parse trees, rule contexts only point - * upwards. When a rule exits, it returns the context but that gets garbage - * collected if nobody holds a reference. It points upwards but nobody - * points at it.

      - * - *

      When we build parse trees, we are adding all of these contexts to - * {@link org.antlr.v4.runtime.ParserRuleContext#children} list. Contexts are then not candidates - * for garbage collection.

      - */ + /// + /// Track the _org.antlr.v4.runtime.ParserRuleContext_ objects during the parse and hook + /// them up using the _org.antlr.v4.runtime.ParserRuleContext#children_ list so that it + /// forms a parse tree. The _org.antlr.v4.runtime.ParserRuleContext_ returned from the start + /// rule represents the root of the parse tree. + /// + /// Note that if we are not building parse trees, rule contexts only point + /// upwards. When a rule exits, it returns the context but that gets garbage + /// collected if nobody holds a reference. It points upwards but nobody + /// points at it. + /// + /// When we build parse trees, we are adding all of these contexts to + /// _org.antlr.v4.runtime.ParserRuleContext#children_ list. Contexts are then not candidates + /// for garbage collection. + /// public func setBuildParseTree(_ buildParseTrees: Bool) { self._buildParseTrees = buildParseTrees } - /** - * Gets whether or not a complete parse tree will be constructed while - * parsing. This property is {@code true} for a newly constructed parser. - * - * @return {@code true} if a complete parse tree will be constructed while - * parsing, otherwise {@code false} - */ + /// + /// Gets whether or not a complete parse tree will be constructed while + /// parsing. This property is `true` for a newly constructed parser. + /// + /// - Returns: `true` if a complete parse tree will be constructed while + /// parsing, otherwise `false` + /// public func getBuildParseTree() -> Bool { return _buildParseTrees } - /** - * Trim the internal lists of the parse tree during parsing to conserve memory. - * This property is set to {@code false} by default for a newly constructed parser. - * - * @param trimParseTrees {@code true} to trim the capacity of the {@link org.antlr.v4.runtime.ParserRuleContext#children} - * list to its size after a rule is parsed. - */ + /// + /// Trim the internal lists of the parse tree during parsing to conserve memory. + /// This property is set to `false` by default for a newly constructed parser. + /// + /// - Parameter trimParseTrees: `true` to trim the capacity of the _org.antlr.v4.runtime.ParserRuleContext#children_ + /// list to its size after a rule is parsed. + /// public func setTrimParseTree(_ trimParseTrees: Bool) { if trimParseTrees { if getTrimParseTree() { @@ -284,72 +283,61 @@ open class Parser: Recognizer { } } - /** - * @return {@code true} if the {@link org.antlr.v4.runtime.ParserRuleContext#children} list is trimmed - * using the default {@link org.antlr.v4.runtime.Parser.TrimToSizeListener} during the parse process. - */ + /// + /// - Returns: `true` if the _org.antlr.v4.runtime.ParserRuleContext#children_ list is trimmed + /// using the default _org.antlr.v4.runtime.Parser.TrimToSizeListener_ during the parse process. + /// public func getTrimParseTree() -> Bool { - return !getParseListeners().filter({ $0 === TrimToSizeListener.INSTANCE }).isEmpty } - - public func getParseListeners() -> Array { - let listeners: Array? = _parseListeners - if listeners == nil { - return Array() - } - - return listeners! + public func getParseListeners() -> [ParseTreeListener] { + return _parseListeners ?? [ParseTreeListener]() } - /** - * Registers {@code listener} to receive events during the parsing process. - * - *

      To support output-preserving grammar transformations (including but not - * limited to left-recursion removal, automated left-factoring, and - * optimized code generation), calls to listener methods during the parse - * may differ substantially from calls made by - * {@link org.antlr.v4.runtime.tree.ParseTreeWalker#DEFAULT} used after the parse is complete. In - * particular, rule entry and exit events may occur in a different order - * during the parse than after the parser. In addition, calls to certain - * rule entry methods may be omitted.

      - * - *

      With the following specific exceptions, calls to listener events are - * deterministic, i.e. for identical input the calls to listener - * methods will be the same.

      - * - *
        - *
      • Alterations to the grammar used to generate code may change the - * behavior of the listener calls.
      • - *
      • Alterations to the command line options passed to ANTLR 4 when - * generating the parser may change the behavior of the listener calls.
      • - *
      • Changing the version of the ANTLR Tool used to generate the parser - * may change the behavior of the listener calls.
      • - *
      - * - * @param listener the listener to add - * - * @throws NullPointerException if {@code} listener is {@code null} - */ + /// + /// Registers `listener` to receive events during the parsing process. + /// + /// To support output-preserving grammar transformations (including but not + /// limited to left-recursion removal, automated left-factoring, and + /// optimized code generation), calls to listener methods during the parse + /// may differ substantially from calls made by + /// _org.antlr.v4.runtime.tree.ParseTreeWalker#DEFAULT_ used after the parse is complete. In + /// particular, rule entry and exit events may occur in a different order + /// during the parse than after the parser. In addition, calls to certain + /// rule entry methods may be omitted. + /// + /// With the following specific exceptions, calls to listener events are + /// __deterministic__, i.e. for identical input the calls to listener + /// methods will be the same. + /// + /// * Alterations to the grammar used to generate code may change the + /// behavior of the listener calls. + /// * Alterations to the command line options passed to ANTLR 4 when + /// generating the parser may change the behavior of the listener calls. + /// * Changing the version of the ANTLR Tool used to generate the parser + /// may change the behavior of the listener calls. + /// + /// - Parameter listener: the listener to add + /// public func addParseListener(_ listener: ParseTreeListener) { if _parseListeners == nil { - _parseListeners = Array() + _parseListeners = [ParseTreeListener]() } - self._parseListeners!.append(listener) + _parseListeners!.append(listener) } - /** - * Remove {@code listener} from the list of parse listeners. - * - *

      If {@code listener} is {@code null} or has not been added as a parse - * listener, this method does nothing.

      - * - * @see #addParseListener - * - * @param listener the listener to remove - */ + /// + /// Remove `listener` from the list of parse listeners. + /// + /// If `listener` is `null` or has not been added as a parse + /// listener, this method does nothing. + /// + /// - SeeAlso: #addParseListener + /// + /// - Parameter listener: the listener to remove + /// public func removeParseListener(_ listener: ParseTreeListener?) { if _parseListeners != nil { @@ -364,20 +352,20 @@ open class Parser: Recognizer { } } - /** - * Remove all parse listeners. - * - * @see #addParseListener - */ + /// + /// Remove all parse listeners. + /// + /// - SeeAlso: #addParseListener + /// public func removeParseListeners() { _parseListeners = nil } - /** - * Notify any parse listeners of an enter rule event. - * - * @see #addParseListener - */ + /// + /// Notify any parse listeners of an enter rule event. + /// + /// - SeeAlso: #addParseListener + /// public func triggerEnterRuleEvent() throws { if let _parseListeners = _parseListeners, let _ctx = _ctx { for listener: ParseTreeListener in _parseListeners { @@ -387,63 +375,59 @@ open class Parser: Recognizer { } } - /** - * Notify any parse listeners of an exit rule event. - * - * @see #addParseListener - */ + /// + /// Notify any parse listeners of an exit rule event. + /// + /// - SeeAlso: #addParseListener + /// public func triggerExitRuleEvent() throws { // reverse order walk of listeners if let _parseListeners = _parseListeners, let _ctx = _ctx { - var i: Int = _parseListeners.count - 1 + var i = _parseListeners.count - 1 while i >= 0 { - let listener: ParseTreeListener = _parseListeners[i] + let listener = _parseListeners[i] _ctx.exitRule(listener) - try listener.exitEveryRule(_ctx) + try listener.exitEveryRule(_ctx) i -= 1 } } } - /** - * Gets the number of syntax errors reported during parsing. This value is - * incremented each time {@link #notifyErrorListeners} is called. - * - * @see #notifyErrorListeners - */ + /// + /// Gets the number of syntax errors reported during parsing. This value is + /// incremented each time _#notifyErrorListeners_ is called. + /// + /// - SeeAlso: #notifyErrorListeners + /// public func getNumberOfSyntaxErrors() -> Int { return _syntaxErrors } override open func getTokenFactory() -> TokenFactory { - // return _input.getTokenSource().getTokenFactory() } - /** Tell our token source and error strategy about a new way to create tokens. */ + /// Tell our token source and error strategy about a new way to create tokens. override open func setTokenFactory(_ factory: TokenFactory) { - // _input.getTokenSource().setTokenFactory(factory) } - /** - * The ATN with bypass alternatives is expensive to create so we create it - * lazily. - * - * @throws UnsupportedOperationException if the current parser does not - * implement the {@link #getSerializedATN()} method. - */ - + /// + /// The ATN with bypass alternatives is expensive to create so we create it + /// lazily. + /// + /// - Throws: _ANTLRError.unsupportedOperation_ if the current parser does not + /// implement the _#getSerializedATN()_ method. + /// public func getATNWithBypassAlts() -> ATN { - let serializedAtn: String = getSerializedATN() + let serializedAtn = getSerializedATN() - var result: ATN? = bypassAltsAtnCache[serializedAtn] - bypassAltsAtnCacheMutex.synchronized { - [unowned self] in + var result = bypassAltsAtnCache[serializedAtn] + bypassAltsAtnCacheMutex.synchronized { [unowned self] in if result == nil { - let deserializationOptions: ATNDeserializationOptions = ATNDeserializationOptions() + let deserializationOptions = ATNDeserializationOptions() try! deserializationOptions.setGenerateRuleBypassTransitions(true) result = try! ATNDeserializer(deserializationOptions).deserialize(Array(serializedAtn.characters)) self.bypassAltsAtnCache[serializedAtn] = result! @@ -452,36 +436,34 @@ open class Parser: Recognizer { return result! } - /** - * The preferred method of getting a tree pattern. For example, here's a - * sample use: - * - *
      -     * ParseTree t = parser.expr();
      -     * ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
      -     * ParseTreeMatch m = p.match(t);
      -     * String id = m.get("ID");
      -     * 
      - */ + /// + /// The preferred method of getting a tree pattern. For example, here's a + /// sample use: + /// + /// + /// ParseTree t = parser.expr(); + /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr); + /// ParseTreeMatch m = p.match(t); + /// String id = m.get("ID"); + /// + /// public func compileParseTreePattern(_ pattern: String, _ patternRuleIndex: Int) throws -> ParseTreePattern { if let tokenStream = getTokenStream() { - let tokenSource: TokenSource = tokenStream.getTokenSource() - if tokenSource is Lexer { - let lexer: Lexer = tokenSource as! Lexer + let tokenSource = tokenStream.getTokenSource() + if let lexer = tokenSource as? Lexer { return try compileParseTreePattern(pattern, patternRuleIndex, lexer) } } throw ANTLRError.unsupportedOperation(msg: "Parser can't discover a lexer to use") - } - /** - * The same as {@link #compileParseTreePattern(String, int)} but specify a - * {@link org.antlr.v4.runtime.Lexer} rather than trying to deduce it from this parser. - */ + /// + /// The same as _#compileParseTreePattern(String, int)_ but specify a + /// _org.antlr.v4.runtime.Lexer_ rather than trying to deduce it from this parser. + /// public func compileParseTreePattern(_ pattern: String, _ patternRuleIndex: Int, _ lexer: Lexer) throws -> ParseTreePattern { - let m: ParseTreePatternMatcher = ParseTreePatternMatcher(lexer, self) + let m = ParseTreePatternMatcher(lexer, self) return try m.compile(pattern, patternRuleIndex) } @@ -508,7 +490,7 @@ open class Parser: Recognizer { return _input } - /** Set the token stream and reset the parser. */ + /// Set the token stream and reset the parser. public func setTokenStream(_ input: TokenStream) throws { //TODO self._input = nil; self._input = nil; @@ -516,74 +498,76 @@ open class Parser: Recognizer { self._input = input } - /** Match needs to return the current input symbol, which gets put - * into the label for the associated token ref; e.g., x=ID. - */ + /// Match needs to return the current input symbol, which gets put + /// into the label for the associated token ref; e.g., x=ID. + /// public func getCurrentToken() throws -> Token { return try _input.LT(1)! } - public final func notifyErrorListeners(_ msg: String) throws { - try notifyErrorListeners(getCurrentToken(), msg, nil) + public final func notifyErrorListeners(_ msg: String) { + let token = try? getCurrentToken() + notifyErrorListeners(token, msg, nil) } - public func notifyErrorListeners(_ offendingToken: Token, _ msg: String, - _ e: AnyObject?) { + public func notifyErrorListeners(_ offendingToken: Token?, _ msg: String, _ e: AnyObject?) { _syntaxErrors += 1 - var line: Int = -1 - var charPositionInLine: Int = -1 - line = offendingToken.getLine() - charPositionInLine = offendingToken.getCharPositionInLine() + var line = -1 + var charPositionInLine = -1 + if let offendingToken = offendingToken { + line = offendingToken.getLine() + charPositionInLine = offendingToken.getCharPositionInLine() + } - let listener: ANTLRErrorListener = getErrorListenerDispatch() + let listener = getErrorListenerDispatch() listener.syntaxError(self, offendingToken, line, charPositionInLine, msg, e) } - /** - * Consume and return the {@linkplain #getCurrentToken current symbol}. - * - *

      E.g., given the following input with {@code A} being the current - * lookahead symbol, this function moves the cursor to {@code B} and returns - * {@code A}.

      - * - *
      -     *  A B
      -     *  ^
      -     * 
      - * - * If the parser is not in error recovery mode, the consumed symbol is added - * to the parse tree using {@link ParserRuleContext#addChild(TerminalNode)}, and - * {@link org.antlr.v4.runtime.tree.ParseTreeListener#visitTerminal} is called on any parse listeners. - * If the parser is in error recovery mode, the consumed symbol is - * added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then - * {@link ParserRuleContext#addErrorNode(ErrorNode)} and - * {@link org.antlr.v4.runtime.tree.ParseTreeListener#visitErrorNode} is called on any parse - * listeners. - */ + /// + /// Consume and return the |: #getCurrentToken current symbol:|. + /// + /// E.g., given the following input with `A` being the current + /// lookahead symbol, this function moves the cursor to `B` and returns + /// `A`. + /// + /// + /// A B + /// ^ + /// + /// + /// If the parser is not in error recovery mode, the consumed symbol is added + /// to the parse tree using _ParserRuleContext#addChild(TerminalNode)_, and + /// _org.antlr.v4.runtime.tree.ParseTreeListener#visitTerminal_ is called on any parse listeners. + /// If the parser __is__ in error recovery mode, the consumed symbol is + /// added to the parse tree using _#createErrorNode(ParserRuleContext, Token)_ then + /// _ParserRuleContext#addErrorNode(ErrorNode)_ and + /// _org.antlr.v4.runtime.tree.ParseTreeListener#visitErrorNode_ is called on any parse + /// listeners. + /// @discardableResult public func consume() throws -> Token { - let o: Token = try getCurrentToken() + let o = try getCurrentToken() if o.getType() != Parser.EOF { try getInputStream()!.consume() } guard let _ctx = _ctx else { return o } - let hasListener: Bool = _parseListeners != nil && !_parseListeners!.isEmpty + let hasListener = _parseListeners != nil && !_parseListeners!.isEmpty if _buildParseTrees || hasListener { if _errHandler.inErrorRecoveryMode(self) { - let node: ErrorNode = _ctx.addErrorNode(createErrorNode(parent: _ctx, t: o)) + let node = _ctx.addErrorNode(createErrorNode(parent: _ctx, t: o)) if let _parseListeners = _parseListeners { - for listener: ParseTreeListener in _parseListeners { + for listener in _parseListeners { listener.visitErrorNode(node) } } } else { - let node: TerminalNode = _ctx.addChild(createTerminalNode(parent: _ctx, t: o)) + let node = _ctx.addChild(createTerminalNode(parent: _ctx, t: o)) if let _parseListeners = _parseListeners { - for listener: ParseTreeListener in _parseListeners { + for listener in _parseListeners { listener.visitTerminal(node) } } @@ -592,20 +576,20 @@ open class Parser: Recognizer { return o } - /** How to create a token leaf node associated with a parent. - * Typically, the terminal node to create is not a function of the parent. - * - * @since 4.7 - */ + /// How to create a token leaf node associated with a parent. + /// Typically, the terminal node to create is not a function of the parent. + /// + /// - Since: 4.7 + /// public func createTerminalNode(parent: ParserRuleContext, t: Token) -> TerminalNode { return TerminalNodeImpl(t); } - /** How to create an error node, given a token, associated with a parent. - * Typically, the error node to create is not a function of the parent. - * - * @since 4.7 - */ + /// How to create an error node, given a token, associated with a parent. + /// Typically, the error node to create is not a function of the parent. + /// + /// - Since: 4.7 + /// public func createErrorNode(parent: ParserRuleContext, t: Token) -> ErrorNode { return ErrorNode(t); } @@ -618,10 +602,10 @@ open class Parser: Recognizer { } } - /** - * Always called by generated parsers upon entry to a rule. Access field - * {@link #_ctx} get the current context. - */ + /// + /// Always called by generated parsers upon entry to a rule. Access field + /// _#_ctx_ get the current context. + /// public func enterRule(_ localctx: ParserRuleContext, _ state: Int, _ ruleIndex: Int) throws { setState(state) _ctx = localctx @@ -660,12 +644,12 @@ open class Parser: Recognizer { } } - /** - * Get the precedence level for the top-most precedence rule. - * - * @return The precedence level for the top-most precedence rule, or -1 if - * the parser context is not nested within a precedence rule. - */ + /// + /// Get the precedence level for the top-most precedence rule. + /// + /// - Returns: The precedence level for the top-most precedence rule, or -1 if + /// the parser context is not nested within a precedence rule. + /// public final func getPrecedence() -> Int { if _precedenceStack.isEmpty { return -1 @@ -674,11 +658,13 @@ open class Parser: Recognizer { return _precedenceStack.peek() ?? -1 } - /** - * @deprecated Use - * {@link #enterRecursionRule(org.antlr.v4.runtime.ParserRuleContext, int, int, int)} instead. - */ - ////@Deprecated + /// + /// Use + /// _#enterRecursionRule(org.antlr.v4.runtime.ParserRuleContext, int, int, int)_ instead. + /// + /// + /// /@Deprecated + /// public func enterRecursionRule(_ localctx: ParserRuleContext, _ ruleIndex: Int) throws { try enterRecursionRule(localctx, getATN().ruleToStartState[ruleIndex].stateNumber, ruleIndex, 0) } @@ -693,11 +679,11 @@ open class Parser: Recognizer { } } - /** Like {@link #enterRule} but for recursive rules. - * Make the current context the child of the incoming localctx. - */ + /// Like _#enterRule_ but for recursive rules. + /// Make the current context the child of the incoming localctx. + /// public func pushNewRecursionContext(_ localctx: ParserRuleContext, _ state: Int, _ ruleIndex: Int) throws { - let previous: ParserRuleContext = _ctx! + let previous = _ctx! previous.parent = localctx previous.invokingState = state previous.stop = try _input.LT(-1) @@ -716,12 +702,12 @@ open class Parser: Recognizer { public func unrollRecursionContexts(_ _parentctx: ParserRuleContext?) throws { _precedenceStack.pop() _ctx!.stop = try _input.LT(-1) - let retctx: ParserRuleContext = _ctx! // save current ctx (return value) + let retctx = _ctx! // save current ctx (return value) // unroll so _ctx is as it was before call to recursive method if _parseListeners != nil { - while let ctxWrap = _ctx , ctxWrap !== _parentctx { - try triggerExitRuleEvent() + while let ctxWrap = _ctx, ctxWrap !== _parentctx { + try triggerExitRuleEvent() _ctx = ctxWrap.parent as? ParserRuleContext } } else { @@ -738,7 +724,7 @@ open class Parser: Recognizer { } public func getInvokingContext(_ ruleIndex: Int) -> ParserRuleContext? { - var p: ParserRuleContext? = _ctx + var p = _ctx while let pWrap = p { if pWrap.getRuleIndex() == ruleIndex { return pWrap @@ -766,63 +752,63 @@ open class Parser: Recognizer { return false } - /** Given an AmbiguityInfo object that contains information about an - * ambiguous decision event, return the list of ambiguous parse trees. - * An ambiguity occurs when a specific token sequence can be recognized - * in more than one way by the grammar. These ambiguities are detected only - * at decision points. - * - * The list of trees includes the actual interpretation (that for - * the minimum alternative number) and all ambiguous alternatives. - * The actual interpretation is always first. - * - * This method reuses the same physical input token stream used to - * detect the ambiguity by the original parser in the first place. - * This method resets/seeks within but does not alter originalParser. - * The input position is restored upon exit from this method. - * Parsers using a {@link org.antlr.v4.runtime.UnbufferedTokenStream} may not be able to - * perform the necessary save index() / seek(saved_index) operation. - * - * The trees are rooted at the node whose start..stop token indices - * include the start and stop indices of this ambiguity event. That is, - * the trees returns will always include the complete ambiguous subphrase - * identified by the ambiguity event. - * - * Be aware that this method does NOT notify error or parse listeners as - * it would trigger duplicate or otherwise unwanted events. - * - * This uses a temporary ParserATNSimulator and a ParserInterpreter - * so we don't mess up any statistics, event lists, etc... - * The parse tree constructed while identifying/making ambiguityInfo is - * not affected by this method as it creates a new parser interp to - * get the ambiguous interpretations. - * - * Nodes in the returned ambig trees are independent of the original parse - * tree (constructed while identifying/creating ambiguityInfo). - * - * @since 4.5.1 - * - * @param originalParser The parser used to create ambiguityInfo; it - * is not modified by this routine and can be either - * a generated or interpreted parser. It's token - * stream *is* reset/seek()'d. - * @param ambiguityInfo The information about an ambiguous decision event - * for which you want ambiguous parse trees. - * @param startRuleIndex The start rule for the entire grammar, not - * the ambiguous decision. We re-parse the entire input - * and so we need the original start rule. - * - * @return The list of all possible interpretations of - * the input for the decision in ambiguityInfo. - * The actual interpretation chosen by the parser - * is always given first because this method - * retests the input in alternative order and - * ANTLR always resolves ambiguities by choosing - * the first alternative that matches the input. - * - * @throws org.antlr.v4.runtime.RecognitionException Throws upon syntax error while matching - * ambig input. - */ + /// Given an AmbiguityInfo object that contains information about an + /// ambiguous decision event, return the list of ambiguous parse trees. + /// An ambiguity occurs when a specific token sequence can be recognized + /// in more than one way by the grammar. These ambiguities are detected only + /// at decision points. + /// + /// The list of trees includes the actual interpretation (that for + /// the minimum alternative number) and all ambiguous alternatives. + /// The actual interpretation is always first. + /// + /// This method reuses the same physical input token stream used to + /// detect the ambiguity by the original parser in the first place. + /// This method resets/seeks within but does not alter originalParser. + /// The input position is restored upon exit from this method. + /// Parsers using a _org.antlr.v4.runtime.UnbufferedTokenStream_ may not be able to + /// perform the necessary save index() / seek(saved_index) operation. + /// + /// The trees are rooted at the node whose start..stop token indices + /// include the start and stop indices of this ambiguity event. That is, + /// the trees returns will always include the complete ambiguous subphrase + /// identified by the ambiguity event. + /// + /// Be aware that this method does NOT notify error or parse listeners as + /// it would trigger duplicate or otherwise unwanted events. + /// + /// This uses a temporary ParserATNSimulator and a ParserInterpreter + /// so we don't mess up any statistics, event lists, etc... + /// The parse tree constructed while identifying/making ambiguityInfo is + /// not affected by this method as it creates a new parser interp to + /// get the ambiguous interpretations. + /// + /// Nodes in the returned ambig trees are independent of the original parse + /// tree (constructed while identifying/creating ambiguityInfo). + /// + /// - Since: 4.5.1 + /// + /// - Parameter originalParser: The parser used to create ambiguityInfo; it + /// is not modified by this routine and can be either + /// a generated or interpreted parser. It's token + /// stream *is* reset/seek()'d. + /// - Parameter ambiguityInfo: The information about an ambiguous decision event + /// for which you want ambiguous parse trees. + /// - Parameter startRuleIndex: The start rule for the entire grammar, not + /// the ambiguous decision. We re-parse the entire input + /// and so we need the original start rule. + /// + /// - Throws: org.antlr.v4.runtime.RecognitionException Throws upon syntax error while matching + /// ambig input. + /// - Returns: The list of all possible interpretations of + /// the input for the decision in ambiguityInfo. + /// The actual interpretation chosen by the parser + /// is always given first because this method + /// retests the input in alternative order and + /// ANTLR always resolves ambiguities by choosing + /// the first alternative that matches the input. + /// + /// // public class func getAmbiguousParseTrees(originalParser : Parser, // _ ambiguityInfo : AmbiguityInfo, // _ startRuleIndex : Int) throws -> Array //; RecognitionException @@ -851,7 +837,7 @@ open class Parser: Recognizer { // parser.getInterpreter()!.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); // // // get ambig trees -// var alt : Int = ambiguityInfo.ambigAlts.nextSetBit(0); +// var alt : Int = ambiguityInfo.ambigAlts.firstSetBit(); // while alt>=0 { // // re-parse entire input for all ambiguous alternatives // // (don't have to do first as it's been parsed, but do again for simplicity @@ -875,26 +861,25 @@ open class Parser: Recognizer { // return trees; // } - /** - * Checks whether or not {@code symbol} can follow the current state in the - * ATN. The behavior of this method is equivalent to the following, but is - * implemented such that the complete context-sensitive follow set does not - * need to be explicitly constructed. - * - *
      -     * return getExpectedTokens().contains(symbol);
      -     * 
      - * - * @param symbol the symbol type to check - * @return {@code true} if {@code symbol} can follow the current state in - * the ATN, otherwise {@code false}. - */ - public func isExpectedToken(_ symbol: Int) throws -> Bool { -// return getInterpreter().atn.nextTokens(_ctx); - let atn: ATN = getInterpreter().atn + /// + /// Checks whether or not `symbol` can follow the current state in the + /// ATN. The behavior of this method is equivalent to the following, but is + /// implemented such that the complete context-sensitive follow set does not + /// need to be explicitly constructed. + /// + /// + /// return getExpectedTokens().contains(symbol); + /// + /// + /// - Parameter symbol: the symbol type to check + /// - Returns: `true` if `symbol` can follow the current state in + /// the ATN, otherwise `false`. + /// + public func isExpectedToken(_ symbol: Int) -> Bool { + let atn = getInterpreter().atn var ctx: ParserRuleContext? = _ctx - let s: ATNState = atn.states[getState()]! - var following: IntervalSet = try atn.nextTokens(s) + let s = atn.states[getState()]! + var following = atn.nextTokens(s) if following.contains(symbol) { return true } @@ -903,10 +888,10 @@ open class Parser: Recognizer { return false } - while let ctxWrap = ctx , ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) { - let invokingState: ATNState = atn.states[ctxWrap.invokingState]! - let rt: RuleTransition = invokingState.transition(0) as! RuleTransition - following = try atn.nextTokens(rt.followState) + while let ctxWrap = ctx, ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) { + let invokingState = atn.states[ctxWrap.invokingState]! + let rt = invokingState.transition(0) as! RuleTransition + following = atn.nextTokens(rt.followState) if following.contains(symbol) { return true } @@ -921,55 +906,51 @@ open class Parser: Recognizer { return false } - /** - * Computes the set of input symbols which could follow the current parser - * state and context, as given by {@link #getState} and {@link #getContext}, - * respectively. - * - * @see org.antlr.v4.runtime.atn.ATN#getExpectedTokens(int, org.antlr.v4.runtime.RuleContext) - */ + /// + /// Computes the set of input symbols which could follow the current parser + /// state and context, as given by _#getState_ and _#getContext_, + /// respectively. + /// + /// - SeeAlso: org.antlr.v4.runtime.atn.ATN#getExpectedTokens(int, org.antlr.v4.runtime.RuleContext) + /// public func getExpectedTokens() throws -> IntervalSet { return try getATN().getExpectedTokens(getState(), getContext()!) } - public func getExpectedTokensWithinCurrentRule() throws -> IntervalSet { - let atn: ATN = getInterpreter().atn - let s: ATNState = atn.states[getState()]! - return try atn.nextTokens(s) + public func getExpectedTokensWithinCurrentRule() -> IntervalSet { + let atn = getInterpreter().atn + let s = atn.states[getState()]! + return atn.nextTokens(s) } - /** Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. */ + /// Get a rule's index (i.e., `RULE_ruleName` field) or -1 if not found. public func getRuleIndex(_ ruleName: String) -> Int { - let ruleIndex: Int? = getRuleIndexMap()[ruleName] - if ruleIndex != nil { - return ruleIndex! - } - return -1 + return getRuleIndexMap()[ruleName] ?? -1 } public func getRuleContext() -> ParserRuleContext? { return _ctx } - /** Return List<String> of the rule names in your parser instance - * leading up to a call to the current rule. You could override if - * you want more details such as the file/line info of where - * in the ATN a rule is invoked. - * - * This is very useful for error messages. - */ - public func getRuleInvocationStack() -> Array { + /// Return List<String> of the rule names in your parser instance + /// leading up to a call to the current rule. You could override if + /// you want more details such as the file/line info of where + /// in the ATN a rule is invoked. + /// + /// This is very useful for error messages. + /// + public func getRuleInvocationStack() -> [String] { return getRuleInvocationStack(_ctx) } - public func getRuleInvocationStack(_ p: RuleContext?) -> Array { + public func getRuleInvocationStack(_ p: RuleContext?) -> [String] { var p = p - var ruleNames: [String] = getRuleNames() - var stack: Array = Array() + var ruleNames = getRuleNames() + var stack = [String]() while let pWrap = p { // compute what follows who invoked us - let ruleIndex: Int = pWrap.getRuleIndex() + let ruleIndex = pWrap.getRuleIndex() if ruleIndex < 0 { stack.append("n/a") } else { @@ -980,17 +961,15 @@ open class Parser: Recognizer { return stack } - /** For debugging and other purposes. */ - public func getDFAStrings() -> Array { - var s: Array = Array() + /// For debugging and other purposes. + public func getDFAStrings() -> [String] { + var s = [String]() guard let _interp = _interp else { return s } - decisionToDFAMutex.synchronized { - [unowned self] in - + decisionToDFAMutex.synchronized { [unowned self] in for d in 0..<_interp.decisionToDFA.count { - let dfa: DFA = _interp.decisionToDFA[d] + let dfa = _interp.decisionToDFA[d] s.append(dfa.toString(self.getVocabulary())) } @@ -998,17 +977,15 @@ open class Parser: Recognizer { return s } - /** For debugging and other purposes. */ + /// For debugging and other purposes. public func dumpDFA() { - guard let _interp = _interp else { + guard let _interp = _interp else { return } - decisionToDFAMutex.synchronized { - [unowned self] in - var seenOne: Bool = false + decisionToDFAMutex.synchronized { [unowned self] in + var seenOne = false - for d in 0..<_interp.decisionToDFA.count { - let dfa: DFA = _interp.decisionToDFA[d] + for dfa in _interp.decisionToDFA { if !dfa.states.isEmpty { if seenOne { print("") @@ -1028,36 +1005,35 @@ open class Parser: Recognizer { override open func getParseInfo() -> ParseInfo? { - let interp: ParserATNSimulator? = getInterpreter() - if interp is ProfilingATNSimulator { - return ParseInfo(interp as! ProfilingATNSimulator) + let interp = getInterpreter() + if let interp = interp as? ProfilingATNSimulator { + return ParseInfo(interp) } return nil } - /** - * @since 4.3 - */ + /// + /// - Since: 4.3 + /// public func setProfile(_ profile: Bool) { - let interp: ParserATNSimulator = getInterpreter() - let saveMode: PredictionMode = interp.getPredictionMode() + let interp = getInterpreter() + let saveMode = interp.getPredictionMode() if profile { if !(interp is ProfilingATNSimulator) { setInterpreter(ProfilingATNSimulator(self)) } } else { if interp is ProfilingATNSimulator { - let sim: ParserATNSimulator = - ParserATNSimulator(self, getATN(), interp.decisionToDFA, interp.getSharedContextCache()!) + let sim = ParserATNSimulator(self, getATN(), interp.decisionToDFA, interp.getSharedContextCache()!) setInterpreter(sim) } } getInterpreter().setPredictionMode(saveMode) } - /** During a parse is sometimes useful to listen in on the rule entry and exit - * events as well as token matches. This is for quick and dirty debugging. - */ + /// During a parse is sometimes useful to listen in on the rule entry and exit + /// events as well as token matches. This is for quick and dirty debugging. + /// public func setTrace(_ trace: Bool) { if !trace { removeParseListener(_tracer) @@ -1072,12 +1048,12 @@ open class Parser: Recognizer { } } - /** - * Gets whether a {@link org.antlr.v4.runtime.Parser.TraceListener} is registered as a parse listener - * for the parser. - * - * @see #setTrace(boolean) - */ + /// + /// Gets whether a _org.antlr.v4.runtime.Parser.TraceListener_ is registered as a parse listener + /// for the parser. + /// + /// - SeeAlso: #setTrace(boolean) + /// public func isTrace() -> Bool { return _tracer != nil } diff --git a/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift b/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift index 6f136cc68..1e760fc84 100644 --- a/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift +++ b/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift @@ -5,26 +5,26 @@ -/** A parser simulator that mimics what ANTLR's generated - * parser code does. A ParserATNSimulator is used to make - * predictions via adaptivePredict but this class moves a pointer through the - * ATN to simulate parsing. ParserATNSimulator just - * makes us efficient rather than having to backtrack, for example. - * - * This properly creates parse trees even for left recursive rules. - * - * We rely on the left recursive rule invocation and special predicate - * transitions to make left recursive rules work. - * - * See TestParserInterpreter for examples. - */ +/// A parser simulator that mimics what ANTLR's generated +/// parser code does. A ParserATNSimulator is used to make +/// predictions via adaptivePredict but this class moves a pointer through the +/// ATN to simulate parsing. ParserATNSimulator just +/// makes us efficient rather than having to backtrack, for example. +/// +/// This properly creates parse trees even for left recursive rules. +/// +/// We rely on the left recursive rule invocation and special predicate +/// transitions to make left recursive rules work. +/// +/// See TestParserInterpreter for examples. +/// public class ParserInterpreter: Parser { internal final var grammarFileName: String internal final var atn: ATN - /** This identifies StarLoopEntryState's that begin the (...)* - * precedence loops of left recursive rules. - */ + /// This identifies StarLoopEntryState's that begin the (...)* + /// precedence loops of left recursive rules. + /// internal final var statesNeedingLeftRecursionContext: BitSet internal final var decisionToDFA: [DFA] @@ -32,37 +32,34 @@ public class ParserInterpreter: Parser { internal final var sharedContextCache: PredictionContextCache = PredictionContextCache() - ////@Deprecated - internal final var tokenNames: [String] internal final var ruleNames: [String] private final var vocabulary: Vocabulary - /** Tracks LR rules for adjusting the contexts */ + /// Tracks LR rules for adjusting the contexts internal final var _parentContextStack: Array<(ParserRuleContext?, Int)> = Array<(ParserRuleContext?, Int)>() - /** We need a map from (decision,inputIndex)->forced alt for computing ambiguous - * parse trees. For now, we allow exactly one override. - */ + /// We need a map from (decision,inputIndex)->forced alt for computing ambiguous + /// parse trees. For now, we allow exactly one override. + /// internal var overrideDecision: Int = -1 internal var overrideDecisionInputIndex: Int = -1 internal var overrideDecisionAlt: Int = -1 - /** A copy constructor that creates a new parser interpreter by reusing - * the fields of a previous interpreter. - * - * @since 4.5.1 - * - * @param old The interpreter to copy - */ + /// A copy constructor that creates a new parser interpreter by reusing + /// the fields of a previous interpreter. + /// + /// - Since: 4.5.1 + /// + /// - Parameter old: The interpreter to copy + /// public init(_ old: ParserInterpreter) throws { self.atn = old.atn self.grammarFileName = old.grammarFileName self.statesNeedingLeftRecursionContext = old.statesNeedingLeftRecursionContext self.decisionToDFA = old.decisionToDFA - self.tokenNames = old.tokenNames self.ruleNames = old.ruleNames self.vocabulary = old.vocabulary try super.init(old.getTokenStream()!) @@ -71,26 +68,11 @@ public class ParserInterpreter: Parser { sharedContextCache)) } - /** - * @deprecated Use {@link #ParserInterpreter(String, org.antlr.v4.runtime.Vocabulary, java.util.Collection, org.antlr.v4.runtime.atn.ATN, org.antlr.v4.runtime.TokenStream)} instead. - */ - //@Deprecated - public convenience init(_ grammarFileName: String, _ tokenNames: Array?, - _ ruleNames: Array, _ atn: ATN, _ input: TokenStream) throws { - try self.init(grammarFileName, Vocabulary.fromTokenNames(tokenNames), ruleNames, atn, input) - } - public init(_ grammarFileName: String, _ vocabulary: Vocabulary, _ ruleNames: Array, _ atn: ATN, _ input: TokenStream) throws { self.grammarFileName = grammarFileName self.atn = atn - self.tokenNames = [String]()// new String[atn.maxTokenType]; - let length = tokenNames.count - for i in 0.. [String] { - return tokenNames - } - override public func getVocabulary() -> Vocabulary { return vocabulary @@ -142,19 +118,19 @@ public class ParserInterpreter: Parser { return grammarFileName } - /** Begin parsing at startRuleIndex */ + /// Begin parsing at startRuleIndex public func parse(_ startRuleIndex: Int) throws -> ParserRuleContext { - let startRuleStartState: RuleStartState = atn.ruleToStartState[startRuleIndex] + let startRuleStartState = atn.ruleToStartState[startRuleIndex] - let rootContext: InterpreterRuleContext = InterpreterRuleContext(nil, ATNState.INVALID_STATE_NUMBER, startRuleIndex) + let rootContext = InterpreterRuleContext(nil, ATNState.INVALID_STATE_NUMBER, startRuleIndex) if startRuleStartState.isPrecedenceRule { - try enterRecursionRule(rootContext, startRuleStartState.stateNumber, startRuleIndex, 0) + try enterRecursionRule(rootContext, startRuleStartState.stateNumber, startRuleIndex, 0) } else { try enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex) } while true { - let p: ATNState = getATNState()! + let p = getATNState()! switch p.getStateType() { case ATNState.RULE_STOP: // pop; return from rule @@ -204,7 +180,7 @@ public class ParserInterpreter: Parser { var altNum: Int if p.getNumberOfTransitions() > 1 { try getErrorHandler().sync(self) - let decision: Int = (p as! DecisionState).decision + let decision = (p as! DecisionState).decision if decision == overrideDecision && _input.index() == overrideDecisionInputIndex { altNum = overrideDecisionAlt } else { @@ -214,7 +190,7 @@ public class ParserInterpreter: Parser { altNum = 1 } - let transition: Transition = p.transition(altNum - 1) + let transition = p.transition(altNum - 1) switch transition.getSerializationType() { case Transition.EPSILON: if try statesNeedingLeftRecursionContext.get(p.stateNumber) && @@ -248,9 +224,9 @@ public class ParserInterpreter: Parser { break case Transition.RULE: - let ruleStartState: RuleStartState = transition.target as! RuleStartState - let ruleIndex: Int = ruleStartState.ruleIndex! - let ctx: InterpreterRuleContext = InterpreterRuleContext(_ctx, p.stateNumber, ruleIndex) + let ruleStartState = transition.target as! RuleStartState + let ruleIndex = ruleStartState.ruleIndex! + let ctx = InterpreterRuleContext(_ctx, p.stateNumber, ruleIndex) if ruleStartState.isPrecedenceRule { try enterRecursionRule(ctx, ruleStartState.stateNumber, ruleIndex, (transition as! RuleTransition).precedence) } else { @@ -259,25 +235,20 @@ public class ParserInterpreter: Parser { break case Transition.PREDICATE: - let predicateTransition: PredicateTransition = transition as! PredicateTransition + let predicateTransition = transition as! PredicateTransition if try !sempred(_ctx!, predicateTransition.ruleIndex, predicateTransition.predIndex) { - - throw try ANTLRException.recognition(e: FailedPredicateException(self)) - + throw ANTLRException.recognition(e: FailedPredicateException(self)) } - break case Transition.ACTION: - let actionTransition: ActionTransition = transition as! ActionTransition + let actionTransition = transition as! ActionTransition try action(_ctx, actionTransition.ruleIndex, actionTransition.actionIndex) break case Transition.PRECEDENCE: if !precpred(_ctx!, (transition as! PrecedencePredicateTransition).precedence) { - - throw try ANTLRException.recognition(e: FailedPredicateException(self, "precpred(_ctx,\((transition as! PrecedencePredicateTransition).precedence))")) - + throw ANTLRException.recognition(e: FailedPredicateException(self, "precpred(_ctx,\((transition as! PrecedencePredicateTransition).precedence))")) } break @@ -290,59 +261,59 @@ public class ParserInterpreter: Parser { } internal func visitRuleStopState(_ p: ATNState) throws { - let ruleStartState: RuleStartState = atn.ruleToStartState[p.ruleIndex!] + let ruleStartState = atn.ruleToStartState[p.ruleIndex!] if ruleStartState.isPrecedenceRule { - let parentContext: (ParserRuleContext?, Int) = _parentContextStack.pop() - try unrollRecursionContexts(parentContext.0!) - setState(parentContext.1) + let (parentContext, parentState) = _parentContextStack.pop() + try unrollRecursionContexts(parentContext!) + setState(parentState) } else { try exitRule() } - let ruleTransition: RuleTransition = atn.states[getState()]!.transition(0) as! RuleTransition + let ruleTransition = atn.states[getState()]!.transition(0) as! RuleTransition setState(ruleTransition.followState.stateNumber) } - /** Override this parser interpreters normal decision-making process - * at a particular decision and input token index. Instead of - * allowing the adaptive prediction mechanism to choose the - * first alternative within a block that leads to a successful parse, - * force it to take the alternative, 1..n for n alternatives. - * - * As an implementation limitation right now, you can only specify one - * override. This is sufficient to allow construction of different - * parse trees for ambiguous input. It means re-parsing the entire input - * in general because you're never sure where an ambiguous sequence would - * live in the various parse trees. For example, in one interpretation, - * an ambiguous input sequence would be matched completely in expression - * but in another it could match all the way back to the root. - * - * s : e '!'? ; - * e : ID - * | ID '!' - * ; - * - * Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first - * case, the ambiguous sequence is fully contained only by the root. - * In the second case, the ambiguous sequences fully contained within just - * e, as in: (e ID !). - * - * Rather than trying to optimize this and make - * some intelligent decisions for optimization purposes, I settled on - * just re-parsing the whole input and then using - * {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal - * subtree that contains the ambiguous sequence. I originally tried to - * record the call stack at the point the parser detected and ambiguity but - * left recursive rules create a parse tree stack that does not reflect - * the actual call stack. That impedance mismatch was enough to make - * it it challenging to restart the parser at a deeply nested rule - * invocation. - * - * Only parser interpreters can override decisions so as to avoid inserting - * override checking code in the critical ALL(*) prediction execution path. - * - * @since 4.5.1 - */ + /// Override this parser interpreters normal decision-making process + /// at a particular decision and input token index. Instead of + /// allowing the adaptive prediction mechanism to choose the + /// first alternative within a block that leads to a successful parse, + /// force it to take the alternative, 1..n for n alternatives. + /// + /// As an implementation limitation right now, you can only specify one + /// override. This is sufficient to allow construction of different + /// parse trees for ambiguous input. It means re-parsing the entire input + /// in general because you're never sure where an ambiguous sequence would + /// live in the various parse trees. For example, in one interpretation, + /// an ambiguous input sequence would be matched completely in expression + /// but in another it could match all the way back to the root. + /// + /// s : e '!'? ; + /// e : ID + /// | ID '!' + /// ; + /// + /// Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first + /// case, the ambiguous sequence is fully contained only by the root. + /// In the second case, the ambiguous sequences fully contained within just + /// e, as in: (e ID !). + /// + /// Rather than trying to optimize this and make + /// some intelligent decisions for optimization purposes, I settled on + /// just re-parsing the whole input and then using + /// {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal + /// subtree that contains the ambiguous sequence. I originally tried to + /// record the call stack at the point the parser detected and ambiguity but + /// left recursive rules create a parse tree stack that does not reflect + /// the actual call stack. That impedance mismatch was enough to make + /// it it challenging to restart the parser at a deeply nested rule + /// invocation. + /// + /// Only parser interpreters can override decisions so as to avoid inserting + /// override checking code in the critical ALL(*) prediction execution path. + /// + /// - Since: 4.5.1 + /// public func addDecisionOverride(_ decision: Int, _ tokenIndex: Int, _ forcedAlt: Int) { overrideDecision = decision overrideDecisionInputIndex = tokenIndex diff --git a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift index 532018058..deabbc0bd 100644 --- a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift @@ -4,83 +4,79 @@ */ -/** A rule invocation record for parsing. - * - * Contains all of the information about the current rule not stored in the - * RuleContext. It handles parse tree children list, Any ATN state - * tracing, and the default values available for rule invocations: - * start, stop, rule index, current alt number. - * - * Subclasses made for each rule and grammar track the parameters, - * return values, locals, and labels specific to that rule. These - * are the objects that are returned from rules. - * - * Note text is not an actual field of a rule return value; it is computed - * from start and stop using the input stream's toString() method. I - * could add a ctor to this so that we can pass in and store the input - * stream, but I'm not sure we want to do that. It would seem to be undefined - * to get the .text property anyway if the rule matches tokens from multiple - * input streams. - * - * I do not use getters for fields of objects that are used simply to - * group values such as this aggregate. The getters/setters are there to - * satisfy the superclass interface. - */ - +/// A rule invocation record for parsing. +/// +/// Contains all of the information about the current rule not stored in the +/// RuleContext. It handles parse tree children list, Any ATN state +/// tracing, and the default values available for rule invocations: +/// start, stop, rule index, current alt number. +/// +/// Subclasses made for each rule and grammar track the parameters, +/// return values, locals, and labels specific to that rule. These +/// are the objects that are returned from rules. +/// +/// Note text is not an actual field of a rule return value; it is computed +/// from start and stop using the input stream's toString() method. I +/// could add a ctor to this so that we can pass in and store the input +/// stream, but I'm not sure we want to do that. It would seem to be undefined +/// to get the .text property anyway if the rule matches tokens from multiple +/// input streams. +/// +/// I do not use getters for fields of objects that are used simply to +/// group values such as this aggregate. The getters/setters are there to +/// satisfy the superclass interface. +/// open class ParserRuleContext: RuleContext { public var visited = false - /** If we are debugging or building a parse tree for a visitor, - * we need to track all of the tokens and rule invocations associated - * with this rule's context. This is empty for parsing w/o tree constr. - * operation because we don't the need to track the details about - * how we parse this rule. - */ + /// If we are debugging or building a parse tree for a visitor, + /// we need to track all of the tokens and rule invocations associated + /// with this rule's context. This is empty for parsing w/o tree constr. + /// operation because we don't the need to track the details about + /// how we parse this rule. + /// public var children: Array? - /** For debugging/tracing purposes, we want to track all of the nodes in - * the ATN traversed by the parser for a particular rule. - * This list indicates the sequence of ATN nodes used to match - * the elements of the children list. This list does not include - * ATN nodes and other rules used to match rule invocations. It - * traces the rule invocation node itself but nothing inside that - * other rule's ATN submachine. - * - * There is NOT a one-to-one correspondence between the children and - * states list. There are typically many nodes in the ATN traversed - * for each element in the children list. For example, for a rule - * invocation there is the invoking state and the following state. - * - * The parser setState() method updates field s and adds it to this list - * if we are debugging/tracing. - * - * This does not trace states visited during prediction. - */ -// public List states; - + /// For debugging/tracing purposes, we want to track all of the nodes in + /// the ATN traversed by the parser for a particular rule. + /// This list indicates the sequence of ATN nodes used to match + /// the elements of the children list. This list does not include + /// ATN nodes and other rules used to match rule invocations. It + /// traces the rule invocation node itself but nothing inside that + /// other rule's ATN submachine. + /// + /// There is NOT a one-to-one correspondence between the children and + /// states list. There are typically many nodes in the ATN traversed + /// for each element in the children list. For example, for a rule + /// invocation there is the invoking state and the following state. + /// + /// The parser setState() method updates field s and adds it to this list + /// if we are debugging/tracing. + /// + /// This does not trace states visited during prediction. + /// public var start: Token?, stop: Token? - /** - * The exception that forced this rule to return. If the rule successfully - * completed, this is {@code null}. - */ + /// + /// The exception that forced this rule to return. If the rule successfully + /// completed, this is `null`. + /// public var exception: AnyObject! - //RecognitionException!; public override init() { super.init() } - /** COPY a ctx (I'm deliberately not using copy constructor) to avoid - * confusion with creating node with parent. Does not copy children. - * - * This is used in the generated parser code to flip a generic XContext - * node for rule X to a YContext for alt label Y. In that sense, it is - * not really a generic copy function. - * - * If we do an error sync() at start of a rule, we might add error nodes - * to the generic XContext so this function must copy those nodes to - * the YContext as well else they are lost! - */ + /// COPY a ctx (I'm deliberately not using copy constructor) to avoid + /// confusion with creating node with parent. Does not copy children. + /// + /// This is used in the generated parser code to flip a generic XContext + /// node for rule X to a YContext for alt label Y. In that sense, it is + /// not really a generic copy function. + /// + /// If we do an error sync() at start of a rule, we might add error nodes + /// to the generic XContext so this function must copy those nodes to + /// the YContext as well else they are lost! + /// open func copyFrom(_ ctx: ParserRuleContext) { self.parent = ctx.parent self.invokingState = ctx.invokingState @@ -112,17 +108,17 @@ open class ParserRuleContext: RuleContext { open func exitRule(_ listener: ParseTreeListener) { } - /** Add a parse tree node to this as a child. Works for - * internal and leaf nodes. Does not set parent link; - * other add methods must do that. Other addChild methods - * call this. - * - * We cannot set the parent pointer of the incoming node - * because the existing interfaces do not have a setParent() - * method and I don't want to break backward compatibility for this. - * - * @since 4.7 - */ + /// Add a parse tree node to this as a child. Works for + /// internal and leaf nodes. Does not set parent link; + /// other add methods must do that. Other addChild methods + /// call this. + /// + /// We cannot set the parent pointer of the incoming node + /// because the existing interfaces do not have a setParent() + /// method and I don't want to break backward compatibility for this. + /// + /// - Since: 4.7 + /// @discardableResult open func addAnyChild(_ t: T) -> T { if children == nil { @@ -137,28 +133,28 @@ open class ParserRuleContext: RuleContext { return addAnyChild(ruleInvocation) } - /** Add a token leaf node child and force its parent to be this node. */ + /// Add a token leaf node child and force its parent to be this node. @discardableResult open func addChild(_ t: TerminalNode) -> TerminalNode { t.setParent(self) return addAnyChild(t) } - /** Add an error node child and force its parent to be this node. - * - * @since 4.7 - */ + /// Add an error node child and force its parent to be this node. + /// + /// - Since: 4.7 + /// @discardableResult open func addErrorNode(_ errorNode: ErrorNode) -> ErrorNode { errorNode.setParent(self) return addAnyChild(errorNode) } - /** Add a child to this node based upon matchedToken. It - * creates a TerminalNodeImpl rather than using - * {@link Parser#createTerminalNode(ParserRuleContext, Token)}. I'm leaving this - * in for compatibility but the parser doesn't use this anymore. - */ + /// Add a child to this node based upon matchedToken. It + /// creates a TerminalNodeImpl rather than using + /// _Parser#createTerminalNode(ParserRuleContext, Token)_. I'm leaving this + /// in for compatibility but the parser doesn't use this anymore. + /// @available(*, deprecated) open func addChild(_ matchedToken: Token) -> TerminalNode { let t: TerminalNodeImpl = TerminalNodeImpl(matchedToken) @@ -167,11 +163,11 @@ open class ParserRuleContext: RuleContext { return t } - /** Add a child to this node based upon badToken. It - * creates a ErrorNodeImpl rather than using - * {@link Parser#createErrorNode(ParserRuleContext, Token)}. I'm leaving this - * in for compatibility but the parser doesn't use this anymore. - */ + /// Add a child to this node based upon badToken. It + /// creates a ErrorNodeImpl rather than using + /// _Parser#createErrorNode(ParserRuleContext, Token)_. I'm leaving this + /// in for compatibility but the parser doesn't use this anymore. + /// @discardableResult @available(*, deprecated) open func addErrorNode(_ badToken: Token) -> ErrorNode { @@ -186,10 +182,10 @@ open class ParserRuleContext: RuleContext { // states.add(s); // } - /** Used by enterOuterAlt to toss out a RuleContext previously added as - * we entered a rule. If we have # label, we will need to remove - * generic ruleContext object. - */ + /// Used by enterOuterAlt to toss out a RuleContext previously added as + /// we entered a rule. If we have # label, we will need to remove + /// generic ruleContext object. + /// open func removeLastChild() { if children != nil { children!.remove(at: children!.count-1) @@ -198,7 +194,9 @@ open class ParserRuleContext: RuleContext { override - /** Override to make type more specific */ + /// + /// Override to make type more specific + /// open func getParent() -> Tree? { return super.getParent() } @@ -307,24 +305,24 @@ open class ParserRuleContext: RuleContext { return Interval.of(start.getTokenIndex(), stop.getTokenIndex()) } - /** - * Get the initial token in this context. - * Note that the range from start to stop is inclusive, so for rules that do not consume anything - * (for example, zero length or error productions) this token may exceed stop. - */ + /// + /// Get the initial token in this context. + /// Note that the range from start to stop is inclusive, so for rules that do not consume anything + /// (for example, zero length or error productions) this token may exceed stop. + /// open func getStart() -> Token? { return start } - /** - * Get the final token in this context. - * Note that the range from start to stop is inclusive, so for rules that do not consume anything - * (for example, zero length or error productions) this token may precede start. - */ + /// + /// Get the final token in this context. + /// Note that the range from start to stop is inclusive, so for rules that do not consume anything + /// (for example, zero length or error productions) this token may precede start. + /// open func getStop() -> Token? { return stop } - /** Used for rule context info debugging during parse-time, not so much for ATN debugging */ + /// Used for rule context info debugging during parse-time, not so much for ATN debugging open func toInfoString(_ recognizer: Parser) -> String { var rules: Array = recognizer.getRuleInvocationStack(self) // Collections.reverse(rules); diff --git a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift index 53f172b13..17d6b21b7 100644 --- a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift +++ b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift @@ -4,31 +4,29 @@ */ -/** - * This implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener} dispatches all calls to a - * collection of delegate listeners. This reduces the effort required to support multiple - * listeners. - * - * @author Sam Harwell - */ +/// +/// This implementation of _org.antlr.v4.runtime.ANTLRErrorListener_ dispatches all calls to a +/// collection of delegate listeners. This reduces the effort required to support multiple +/// listeners. +/// +/// - Author: Sam Harwell +/// public class ProxyErrorListener: ANTLRErrorListener { - private final var delegates: Array - - public init(_ delegates: Array) { + private final var delegates: [ANTLRErrorListener] + public init(_ delegates: [ANTLRErrorListener]) { self.delegates = delegates } - //_ e : RecognitionException - public func syntaxError(_ recognizer: Recognizer, - _ offendingSymbol: AnyObject?, - _ line: Int, - _ charPositionInLine: Int, - _ msg: String, - _ e: AnyObject?) - { - for listener: ANTLRErrorListener in delegates { + public func syntaxError(_ recognizer: Recognizer, + _ offendingSymbol: AnyObject?, + _ line: Int, + _ charPositionInLine: Int, + _ msg: String, + _ e: AnyObject?) + { + for listener in delegates { listener.syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e) } } @@ -40,9 +38,9 @@ public class ProxyErrorListener: ANTLRErrorListener { _ stopIndex: Int, _ exact: Bool, _ ambigAlts: BitSet, - _ configs: ATNConfigSet) throws { - for listener: ANTLRErrorListener in delegates { - try listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + _ configs: ATNConfigSet) { + for listener in delegates { + listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) } } @@ -52,9 +50,9 @@ public class ProxyErrorListener: ANTLRErrorListener { _ startIndex: Int, _ stopIndex: Int, _ conflictingAlts: BitSet?, - _ configs: ATNConfigSet) throws { - for listener: ANTLRErrorListener in delegates { - try listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + _ configs: ATNConfigSet) { + for listener in delegates { + listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) } } @@ -64,9 +62,9 @@ public class ProxyErrorListener: ANTLRErrorListener { _ startIndex: Int, _ stopIndex: Int, _ prediction: Int, - _ configs: ATNConfigSet) throws { - for listener: ANTLRErrorListener in delegates { - try listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + _ configs: ATNConfigSet) { + for listener in delegates { + listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) } } } diff --git a/runtime/Swift/Sources/Antlr4/RecognitionException.swift b/runtime/Swift/Sources/Antlr4/RecognitionException.swift index ea3348b6a..49a8807f9 100644 --- a/runtime/Swift/Sources/Antlr4/RecognitionException.swift +++ b/runtime/Swift/Sources/Antlr4/RecognitionException.swift @@ -4,65 +4,56 @@ */ -/** The root of the ANTLR exception hierarchy. In general, ANTLR tracks just - * 3 kinds of errors: prediction errors, failed predicate errors, and - * mismatched input errors. In each case, the parser knows where it is - * in the input, where it is in the ATN, the rule invocation stack, - * and what kind of problem occurred. - */ +/// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +/// 3 kinds of errors: prediction errors, failed predicate errors, and +/// mismatched input errors. In each case, the parser knows where it is +/// in the input, where it is in the ATN, the rule invocation stack, +/// and what kind of problem occurred. +/// -public class RecognitionException { - /** The {@link org.antlr.v4.runtime.Recognizer} where this exception originated. */ - private final var recognizer: Recognizer? - //Recognizer? ; +public class RecognitionException { + /// + /// The _org.antlr.v4.runtime.Recognizer_ where this exception originated. + /// + private final var recognizer: RecognizerProtocol? - private final var ctx: RuleContext? + private final weak var ctx: RuleContext? - private final var input: IntStream + private final var input: IntStream? - /** - * The current {@link org.antlr.v4.runtime.Token} when an error occurred. Since not all streams - * support accessing symbols by index, we have to track the {@link org.antlr.v4.runtime.Token} - * instance itself. - */ + /// + /// The current _org.antlr.v4.runtime.Token_ when an error occurred. Since not all streams + /// support accessing symbols by index, we have to track the _org.antlr.v4.runtime.Token_ + /// instance itself. + /// private var offendingToken: Token! - private var offendingState: Int = -1 + private var offendingState = -1 public var message: String? - public init(_ recognizer: Recognizer?, + + public init(_ recognizer: RecognizerProtocol?, _ input: IntStream, - _ ctx: ParserRuleContext?) { + _ ctx: ParserRuleContext? = nil, + _ message: String? = nil) { self.recognizer = recognizer self.input = input self.ctx = ctx - if let recognizer = recognizer { - self.offendingState = recognizer.getState() - } - } - - public init(_ message: String, - _ recognizer: Recognizer?, - _ input: IntStream, - _ ctx: ParserRuleContext?) { self.message = message - self.recognizer = recognizer - self.input = input - self.ctx = ctx if let recognizer = recognizer { self.offendingState = recognizer.getState() } } - /** - * Get the ATN state number the parser was in at the time the error - * occurred. For {@link org.antlr.v4.runtime.NoViableAltException} and - * {@link org.antlr.v4.runtime.LexerNoViableAltException} exceptions, this is the - * {@link org.antlr.v4.runtime.atn.DecisionState} number. For others, it is the state whose outgoing - * edge we couldn't match. - * - *

      If the state number is not known, this method returns -1.

      - */ + /// + /// Get the ATN state number the parser was in at the time the error + /// occurred. For _org.antlr.v4.runtime.NoViableAltException_ and + /// _org.antlr.v4.runtime.LexerNoViableAltException_ exceptions, this is the + /// _org.antlr.v4.runtime.atn.DecisionState_ number. For others, it is the state whose outgoing + /// edge we couldn't match. + /// + /// If the state number is not known, this method returns -1. + /// public func getOffendingState() -> Int { return offendingState } @@ -71,50 +62,52 @@ public class RecognitionException { self.offendingState = offendingState } - /** - * Gets the set of input symbols which could potentially follow the - * previously matched symbol at the time this exception was thrown. - * - *

      If the set of expected tokens is not known and could not be computed, - * this method returns {@code null}.

      - * - * @return The set of token types that could potentially follow the current - * state in the ATN, or {@code null} if the information is not available. - */ + /// + /// Gets the set of input symbols which could potentially follow the + /// previously matched symbol at the time this exception was thrown. + /// + /// If the set of expected tokens is not known and could not be computed, + /// this method returns `null`. + /// + /// - Returns: The set of token types that could potentially follow the current + /// state in the ATN, or `null` if the information is not available. + /// public func getExpectedTokens() -> IntervalSet? { if let recognizer = recognizer { return try? recognizer.getATN().getExpectedTokens(offendingState, ctx!) } - return nil } - /** - * Gets the {@link org.antlr.v4.runtime.RuleContext} at the time this exception was thrown. - * - *

      If the context is not available, this method returns {@code null}.

      - * - * @return The {@link org.antlr.v4.runtime.RuleContext} at the time this exception was thrown. - * If the context is not available, this method returns {@code null}. - */ + /// + /// Gets the _org.antlr.v4.runtime.RuleContext_ at the time this exception was thrown. + /// + /// If the context is not available, this method returns `null`. + /// + /// - Returns: The _org.antlr.v4.runtime.RuleContext_ at the time this exception was thrown. + /// If the context is not available, this method returns `null`. + /// public func getCtx() -> RuleContext? { return ctx } - /** - * Gets the input stream which is the symbol source for the recognizer where - * this exception was thrown. - * - *

      If the input stream is not available, this method returns {@code null}.

      - * - * @return The input stream which is the symbol source for the recognizer - * where this exception was thrown, or {@code null} if the stream is not - * available. - */ - public func getInputStream() -> IntStream { + /// + /// Gets the input stream which is the symbol source for the recognizer where + /// this exception was thrown. + /// + /// If the input stream is not available, this method returns `null`. + /// + /// - Returns: The input stream which is the symbol source for the recognizer + /// where this exception was thrown, or `null` if the stream is not + /// available. + /// + public func getInputStream() -> IntStream? { return input } + public func clearInputStream() { + input = nil + } public func getOffendingToken() -> Token { return offendingToken @@ -124,15 +117,19 @@ public class RecognitionException { self.offendingToken = offendingToken } - /** - * Gets the {@link org.antlr.v4.runtime.Recognizer} where this exception occurred. - * - *

      If the recognizer is not available, this method returns {@code null}.

      - * - * @return The recognizer where this exception occurred, or {@code null} if - * the recognizer is not available. - */ - public func getRecognizer() -> Recognizer? { + /// + /// Gets the _org.antlr.v4.runtime.Recognizer_ where this exception occurred. + /// + /// If the recognizer is not available, this method returns `null`. + /// + /// - Returns: The recognizer where this exception occurred, or `null` if + /// the recognizer is not available. + /// + public func getRecognizer() -> RecognizerProtocol? { return recognizer } + + public func clearRecognizer() { + self.recognizer = nil + } } diff --git a/runtime/Swift/Sources/Antlr4/Recognizer.swift b/runtime/Swift/Sources/Antlr4/Recognizer.swift index 525e46756..6b3c60088 100644 --- a/runtime/Swift/Sources/Antlr4/Recognizer.swift +++ b/runtime/Swift/Sources/Antlr4/Recognizer.swift @@ -5,227 +5,172 @@ import Foundation -open class Recognizer { - //public static let EOF: Int = -1 + +public protocol RecognizerProtocol { + func getATN() -> ATN + func getGrammarFileName() -> String + func getParseInfo() -> ParseInfo? + func getRuleNames() -> [String] + func getSerializedATN() -> String + func getState() -> Int + func getTokenType(_ tokenName: String) -> Int + func getVocabulary() -> Vocabulary +} + + +open class Recognizer: RecognizerProtocol { //TODO: WeakKeyDictionary NSMapTable Dictionary MapTable> - private let tokenTypeMapCache = HashMap>() + private let tokenTypeMapCache = HashMap() - private let ruleIndexMapCache = HashMap,Dictionary>() - - - private var _listeners: Array = [ConsoleErrorListener.INSTANCE] + private let ruleIndexMapCache = HashMap, [String : Int]>() + private var _listeners: [ANTLRErrorListener] = [ConsoleErrorListener.INSTANCE] public var _interp: ATNInterpreter! - private var _stateNumber: Int = -1 + private var _stateNumber = -1 + /// /// mutex for tokenTypeMapCache updates - private var tokenTypeMapCacheMutex = Mutex() + /// + private let tokenTypeMapCacheMutex = Mutex() + /// /// mutex for ruleIndexMapCacheMutex updates - private var ruleIndexMapCacheMutex = Mutex() - - /** Used to print out token names like ID during debugging and - * error reporting. The generated parsers implement a method - * that overrides this to point to their String[] tokenNames. - * - * @deprecated Use {@link #getVocabulary()} instead. - */ - ////@Deprecated - open func getTokenNames() -> [String?]? { - RuntimeException(#function + " must be overridden") - return [] - } + /// + private let ruleIndexMapCacheMutex = Mutex() open func getRuleNames() -> [String] { - RuntimeException(#function + " must be overridden") - return [] + fatalError(#function + " must be overridden") } - - /** - * Get the vocabulary used by the recognizer. - * - * @return A {@link org.antlr.v4.runtime.Vocabulary} instance providing information about the - * vocabulary used by the grammar. - */ - + /// + /// Get the vocabulary used by the recognizer. + /// + /// - Returns: A _org.antlr.v4.runtime.Vocabulary_ instance providing information about the + /// vocabulary used by the grammar. + /// open func getVocabulary() -> Vocabulary { - return Vocabulary.fromTokenNames(getTokenNames()) + fatalError(#function + " must be overridden") } - /** - * Get a map from token names to token types. - * - *

      Used for XPath and tree pattern compilation.

      - */ - public func getTokenTypeMap() -> Dictionary { - let vocabulary: Vocabulary = getVocabulary() - var result: Dictionary? = self.tokenTypeMapCache[vocabulary] - tokenTypeMapCacheMutex.synchronized { - [unowned self] in + /// + /// Get a map from token names to token types. + /// + /// Used for XPath and tree pattern compilation. + /// + public func getTokenTypeMap() -> [String : Int] { + let vocabulary = getVocabulary() + var result = tokenTypeMapCache[vocabulary] + tokenTypeMapCacheMutex.synchronized { [unowned self] in if result == nil { - result = Dictionary() + result = [String : Int]() let length = self.getATN().maxTokenType for i in 0...length { - let literalName: String? = vocabulary.getLiteralName(i) - if literalName != nil { - result![literalName!] = i + if let literalName = vocabulary.getLiteralName(i) { + result![literalName] = i } - let symbolicName: String? = vocabulary.getSymbolicName(i) - if symbolicName != nil { - result![symbolicName!] = i + if let symbolicName = vocabulary.getSymbolicName(i) { + result![symbolicName] = i } } result!["EOF"] = CommonToken.EOF - //TODO Result Collections.unmodifiableMap - self.tokenTypeMapCache[vocabulary] = result! } } return result! - } - /** - * Get a map from rule names to rule indexes. - * - *

      Used for XPath and tree pattern compilation.

      - */ - public func getRuleIndexMap() -> Dictionary { - let ruleNames: [String] = getRuleNames() + /// + /// Get a map from rule names to rule indexes. + /// + /// Used for XPath and tree pattern compilation. + /// + public func getRuleIndexMap() -> [String : Int] { + let ruleNames = getRuleNames() - let result: Dictionary? = self.ruleIndexMapCache[ArrayWrapper(ruleNames)] - ruleIndexMapCacheMutex.synchronized { - [unowned self] in + let result = ruleIndexMapCache[ArrayWrapper(ruleNames)] + ruleIndexMapCacheMutex.synchronized { [unowned self] in if result == nil { self.ruleIndexMapCache[ArrayWrapper(ruleNames)] = Utils.toMap(ruleNames) } } return result! - } public func getTokenType(_ tokenName: String) -> Int { - let ttype: Int? = getTokenTypeMap()[tokenName] - if ttype != nil { - return ttype! - } - return CommonToken.INVALID_TYPE + return getTokenTypeMap()[tokenName] ?? CommonToken.INVALID_TYPE } - /** - * If this recognizer was generated, it will have a serialized ATN - * representation of the grammar. - * - *

      For interpreters, we don't know their serialized ATN despite having - * created the interpreter from it.

      - */ + /// + /// If this recognizer was generated, it will have a serialized ATN + /// representation of the grammar. + /// + /// For interpreters, we don't know their serialized ATN despite having + /// created the interpreter from it. + /// open func getSerializedATN() -> String { - RuntimeException("there is no serialized ATN") - fatalError() - ///throw ANTLRError.UnsupportedOperation /* throw UnsupportedOperationException("there is no /serialized ATN"); */ + fatalError("there is no serialized ATN") } - /** For debugging and other purposes, might want the grammar name. - * Have ANTLR generate an implementation for this method. - */ + /// For debugging and other purposes, might want the grammar name. + /// Have ANTLR generate an implementation for this method. + /// open func getGrammarFileName() -> String { - RuntimeException(#function + " must be overridden") - return "" + fatalError(#function + " must be overridden") } - /** - * Get the {@link org.antlr.v4.runtime.atn.ATN} used by the recognizer for prediction. - * - * @return The {@link org.antlr.v4.runtime.atn.ATN} used by the recognizer for prediction. - */ + /// + /// Get the _org.antlr.v4.runtime.atn.ATN_ used by the recognizer for prediction. + /// + /// - Returns: The _org.antlr.v4.runtime.atn.ATN_ used by the recognizer for prediction. + /// open func getATN() -> ATN { - RuntimeException(#function + " must be overridden") - fatalError() + fatalError(#function + " must be overridden") } - /** - * Get the ATN interpreter used by the recognizer for prediction. - * - * @return The ATN interpreter used by the recognizer for prediction. - */ + /// + /// Get the ATN interpreter used by the recognizer for prediction. + /// + /// - Returns: The ATN interpreter used by the recognizer for prediction. + /// open func getInterpreter() -> ATNInterpreter { return _interp } - /** If profiling during the parse/lex, this will return DecisionInfo records - * for each decision in recognizer in a ParseInfo object. - * - * @since 4.3 - */ + /// If profiling during the parse/lex, this will return DecisionInfo records + /// for each decision in recognizer in a ParseInfo object. + /// + /// - Since: 4.3 + /// open func getParseInfo() -> ParseInfo? { return nil } - /** - * Set the ATN interpreter used by the recognizer for prediction. - * - * @param interpreter The ATN interpreter used by the recognizer for - * prediction. - */ + /// + /// Set the ATN interpreter used by the recognizer for prediction. + /// + /// - Parameter interpreter: The ATN interpreter used by the recognizer for + /// prediction. + /// open func setInterpreter(_ interpreter: ATNInterpreter) { _interp = interpreter } - /** What is the error header, normally line/character position information? */ - //public func getErrorHeader(e : RecognitionException - - open func getErrorHeader(_ e: AnyObject) -> String { - let line: Int = (e as! RecognitionException).getOffendingToken().getLine() - let charPositionInLine: Int = (e as! RecognitionException).getOffendingToken().getCharPositionInLine() - return "line " + String(line) + ":" + String(charPositionInLine) + /// + /// What is the error header, normally line/character position information? + /// + open func getErrorHeader(_ e: RecognitionException) -> String { + let offending = e.getOffendingToken() + let line = offending.getLine() + let charPositionInLine = offending.getCharPositionInLine() + return "line \(line):\(charPositionInLine)" } - /** How should a token be displayed in an error message? The default - * is to display just the text, but during development you might - * want to have a lot of information spit out. Override in that case - * to use t.toString() (which, for CommonToken, dumps everything about - * the token). This is better than forcing you to override a method in - * your token objects because you don't have to go modify your lexer - * so that it creates a new Java type. - * - * @deprecated This method is not called by the ANTLR 4 Runtime. Specific - * implementations of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} may provide a similar - * feature when necessary. For example, see - * {@link org.antlr.v4.runtime.DefaultErrorStrategy#getTokenErrorDisplay}. - */ - ////@Deprecated - open func getTokenErrorDisplay(_ t: Token?) -> String { - guard let t = t else { - return "" - } - var s: String - - if let text = t.getText() { - s = text - } else { - if t.getType() == CommonToken.EOF { - s = "" - } else { - s = "<\(t.getType())>" - } - } - s = s.replacingOccurrences(of: "\n", with: "\\n") - s = s.replacingOccurrences(of: "\r", with: "\\r") - s = s.replacingOccurrences(of: "\t", with: "\\t") - return "\(s)" - } - - /** - * @exception NullPointerException if {@code listener} is {@code null}. - */ open func addErrorListener(_ listener: ANTLRErrorListener) { - _listeners.append(listener) } @@ -233,16 +178,13 @@ open class Recognizer { _listeners = _listeners.filter() { $0 !== listener } - - // _listeners.removeObject(listener); } open func removeErrorListeners() { _listeners.removeAll() } - - open func getErrorListeners() -> Array { + open func getErrorListeners() -> [ANTLRErrorListener] { return _listeners } @@ -256,7 +198,7 @@ open class Recognizer { return true } - open func precpred(_ localctx: RuleContext?, _ precedence: Int) throws -> Bool { + open func precpred(_ localctx: RuleContext?, _ precedence: Int) -> Bool { return true } @@ -267,13 +209,13 @@ open class Recognizer { return _stateNumber } - /** Indicate that the recognizer has changed internal state that is - * consistent with the ATN state passed in. This way we always know - * where we are in the ATN as the parser goes along. The rule - * context objects form a stack that lets us see the stack of - * invoking rules. Combine this and we have complete ATN - * configuration information. - */ + /// Indicate that the recognizer has changed internal state that is + /// consistent with the ATN state passed in. This way we always know + /// where we are in the ATN as the parser goes along. The rule + /// context objects form a stack that lets us see the stack of + /// invoking rules. Combine this and we have complete ATN + /// configuration information. + /// public final func setState(_ atnState: Int) { // System.err.println("setState "+atnState); _stateNumber = atnState @@ -281,26 +223,18 @@ open class Recognizer { } open func getInputStream() -> IntStream? { - RuntimeException(#function + "Must be overridden") - fatalError() + fatalError(#function + " must be overridden") } - open func setInputStream(_ input: IntStream) throws { - RuntimeException(#function + "Must be overridden") - + fatalError(#function + " must be overridden") } - open func getTokenFactory() -> TokenFactory { - RuntimeException(#function + "Must be overridden") - fatalError() + fatalError(#function + " must be overridden") } - open func setTokenFactory(_ input: TokenFactory) { - RuntimeException(#function + "Must be overridden") - + fatalError(#function + " must be overridden") } - } diff --git a/runtime/Swift/Sources/Antlr4/RuleContext.swift b/runtime/Swift/Sources/Antlr4/RuleContext.swift index ce19d0362..a44ce5373 100644 --- a/runtime/Swift/Sources/Antlr4/RuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/RuleContext.swift @@ -4,69 +4,70 @@ */ -/** A rule context is a record of a single rule invocation. -* -* We form a stack of these context objects using the parent -* pointer. A parent pointer of null indicates that the current -* context is the bottom of the stack. The ParserRuleContext subclass -* as a children list so that we can turn this data structure into a -* tree. -* -* The root node always has a null pointer and invokingState of -1. -* -* Upon entry to parsing, the first invoked rule function creates a -* context object (asubclass specialized for that rule such as -* SContext) and makes it the root of a parse tree, recorded by field -* Parser._ctx. -* -* public final SContext s() throws RecognitionException { -* SContext _localctx = new SContext(_ctx, getState()); <-- create new node -* enterRule(_localctx, 0, RULE_s); <-- push it -* ... -* exitRule(); <-- pop back to _localctx -* return _localctx; -* } -* -* A subsequent rule invocation of r from the start rule s pushes a -* new context object for r whose parent points at s and use invoking -* state is the state with r emanating as edge label. -* -* The invokingState fields from a context object to the root -* together form a stack of rule indication states where the root -* (bottom of the stack) has a -1 sentinel value. If we invoke start -* symbol s then call r1, which calls r2, the would look like -* this: -* -* SContext[-1] <- root node (bottom of the stack) -* R1Context[p] <- p in rule s called r1 -* R2Context[q] <- q in rule r1 called r2 -* -* So the top of the stack, _ctx, represents a call to the current -* rule and it holds the return address from another rule that invoke -* to this rule. To invoke a rule, we must always have a current context. -* -* The parent contexts are useful for computing lookahead sets and -* getting error information. -* -* These objects are used during parsing and prediction. -* For the special case of parsers, we use the subclass -* ParserRuleContext. -* -* @see org.antlr.v4.runtime.ParserRuleContext -*/ +/// A rule context is a record of a single rule invocation. +/// +/// We form a stack of these context objects using the parent +/// pointer. A parent pointer of null indicates that the current +/// context is the bottom of the stack. The ParserRuleContext subclass +/// as a children list so that we can turn this data structure into a +/// tree. +/// +/// The root node always has a null pointer and invokingState of -1. +/// +/// Upon entry to parsing, the first invoked rule function creates a +/// context object (asubclass specialized for that rule such as +/// SContext) and makes it the root of a parse tree, recorded by field +/// Parser._ctx. +/// +/// public final SContext s() throws RecognitionException { +/// SContext _localctx = new SContext(_ctx, getState()); <-- create new node +/// enterRule(_localctx, 0, RULE_s); <-- push it +/// ... +/// exitRule(); <-- pop back to _localctx +/// return _localctx; +/// } +/// +/// A subsequent rule invocation of r from the start rule s pushes a +/// new context object for r whose parent points at s and use invoking +/// state is the state with r emanating as edge label. +/// +/// The invokingState fields from a context object to the root +/// together form a stack of rule indication states where the root +/// (bottom of the stack) has a -1 sentinel value. If we invoke start +/// symbol s then call r1, which calls r2, the would look like +/// this: +/// +/// SContext[-1] <- root node (bottom of the stack) +/// R1Context[p] <- p in rule s called r1 +/// R2Context[q] <- q in rule r1 called r2 +/// +/// So the top of the stack, _ctx, represents a call to the current +/// rule and it holds the return address from another rule that invoke +/// to this rule. To invoke a rule, we must always have a current context. +/// +/// The parent contexts are useful for computing lookahead sets and +/// getting error information. +/// +/// These objects are used during parsing and prediction. +/// For the special case of parsers, we use the subclass +/// ParserRuleContext. +/// +/// - SeeAlso: org.antlr.v4.runtime.ParserRuleContext +/// open class RuleContext: RuleNode { - public static let EMPTY: ParserRuleContext = ParserRuleContext() + public static let EMPTY = ParserRuleContext() - /** What context invoked this rule? */ - public var parent: RuleContext? + /// What context invoked this rule? + public weak var parent: RuleContext? + + /// What state invoked the rule associated with this context? + /// The "return address" is the followState of invokingState + /// If parent is null, this should be -1 this context object represents + /// the start rule. + /// + public var invokingState = -1 - /** What state invoked the rule associated with this context? - * The "return address" is the followState of invokingState - * If parent is null, this should be -1 this context object represents - * the start rule. - */ - public var invokingState: Int = -1 override public init() { super.init() @@ -79,7 +80,7 @@ open class RuleContext: RuleNode { } open func depth() -> Int { - var n: Int = 0 + var n = 0 var p: RuleContext? = self while let pWrap = p { p = pWrap.parent @@ -88,9 +89,9 @@ open class RuleContext: RuleNode { return n } - /** A context is empty if there is no invoking state; meaning nobody called - * current context. - */ + /// A context is empty if there is no invoking state; meaning nobody called + /// current context. + /// open func isEmpty() -> Bool { return invokingState == -1 } @@ -117,13 +118,13 @@ open class RuleContext: RuleNode { return self } - /** Return the combined text of all child nodes. This method only considers - * tokens which have been added to the parse tree. - *

      - * Since tokens on hidden channels (e.g. whitespace or comments) are not - * added to the parse trees, they will not appear in the output of this - * method. - */ + /// Return the combined text of all child nodes. This method only considers + /// tokens which have been added to the parse tree. + /// + /// Since tokens on hidden channels (e.g. whitespace or comments) are not + /// added to the parse trees, they will not appear in the output of this + /// method. + /// open override func getText() -> String { let length = getChildCount() @@ -131,7 +132,7 @@ open class RuleContext: RuleNode { return "" } - let builder: StringBuilder = StringBuilder() + let builder = StringBuilder() for i in 0.. Future { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - return inspect(ruleNames); - } - - public func inspect(ruleNames : Array) -> Future { - var viewer : TreeViewer = TreeViewer(ruleNames, self); - return viewer.open(); - } - - /** Save this tree in a postscript file */ - public func save(parser : Parser, _ fileName : String) - throws; IOException, PrintException - { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - save(ruleNames, fileName); - } - - /** Save this tree in a postscript file using a particular font name and size */ - public func save(parser : Parser, _ fileName : String, - _ fontName : String, _ fontSize : Int) - throws; IOException - { - var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null; - save(ruleNames, fileName, fontName, fontSize); - } - - /** Save this tree in a postscript file */ - public func save(ruleNames : Array, _ fileName : String) - throws; IOException, PrintException - { - Trees.writePS(self, ruleNames, fileName); - } - - /** Save this tree in a postscript file using a particular font name and size */ - public func save(ruleNames : Array, _ fileName : String, - _ fontName : String, _ fontSize : Int) - throws; IOException - { - Trees.writePS(self, ruleNames, fileName, fontName, fontSize); - } - */ - /** Print out a whole tree, not just a node, in LISP format - * (root child1 .. childN). Print just a node if this is a leaf. - * We have to know the recognizer so we can get rule names. - */ - + /// Print out a whole tree, not just a node, in LISP format + /// (root child1 .. childN). Print just a node if this is a leaf. + /// We have to know the recognizer so we can get rule names. + /// open override func toStringTree(_ recog: Parser) -> String { return Trees.toStringTree(self, recog) } - /** Print out a whole tree, not just a node, in LISP format - * (root child1 .. childN). Print just a node if this is a leaf. - */ - public func toStringTree(_ ruleNames: Array?) -> String { + /// Print out a whole tree, not just a node, in LISP format + /// (root child1 .. childN). Print just a node if this is a leaf. + /// + public func toStringTree(_ ruleNames: [String]?) -> String { return Trees.toStringTree(self, ruleNames) } - open override func toStringTree() -> String { - let info: Array? = nil - return toStringTree(info) + return toStringTree(nil) } + open override var description: String { - let p1: Array? = nil - let p2: RuleContext? = nil - return toString(p1, p2) + return toString(nil, nil) } open override var debugDescription: String { return description } - public final func toString(_ recog: Recognizer) -> String { + public final func toString(_ recog: Recognizer) -> String { return toString(recog, ParserRuleContext.EMPTY) } - public final func toString(_ ruleNames: Array) -> String { + public final func toString(_ ruleNames: [String]) -> String { return toString(ruleNames, nil) } // recog null unless ParserRuleContext, in which case we use subclass toString(...) - open func toString(_ recog: Recognizer?, _ stop: RuleContext) -> String { - let ruleNames: [String]? = recog != nil ? recog!.getRuleNames() : nil - let ruleNamesList: Array? = ruleNames ?? nil - return toString(ruleNamesList, stop) + open func toString(_ recog: Recognizer?, _ stop: RuleContext) -> String { + let ruleNames = recog?.getRuleNames() + return toString(ruleNames, stop) } - open func toString(_ ruleNames: Array?, _ stop: RuleContext?) -> String { - let buf: StringBuilder = StringBuilder() + open func toString(_ ruleNames: [String]?, _ stop: RuleContext?) -> String { + let buf = StringBuilder() var p: RuleContext? = self buf.append("[") - while let pWrap = p , pWrap !== stop { - if ruleNames == nil { + while let pWrap = p, pWrap !== stop { + if let ruleNames = ruleNames { + let ruleIndex = pWrap.getRuleIndex() + let ruleIndexInRange = (ruleIndex >= 0 && ruleIndex < ruleNames.count) + let ruleName = (ruleIndexInRange ? ruleNames[ruleIndex] : String(ruleIndex)) + buf.append(ruleName) + } + else { if !pWrap.isEmpty() { buf.append(pWrap.invokingState) } - } else { - let ruleIndex: Int = pWrap.getRuleIndex() - let ruleIndexInRange: Bool = ruleIndex >= 0 && ruleIndex < ruleNames!.count - let ruleName: String = ruleIndexInRange ? ruleNames![ruleIndex] : String(ruleIndex) - buf.append(ruleName) } if pWrap.parent != nil && (ruleNames != nil || !pWrap.parent!.isEmpty()) { diff --git a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift index edabad1c3..384fa6d11 100644 --- a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift +++ b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift @@ -4,142 +4,132 @@ */ -/** - * This class provides access to the current version of the ANTLR 4 runtime - * library as compile-time and runtime constants, along with methods for - * checking for matching version numbers and notifying listeners in the case - * where a version mismatch is detected. - * - *

      - * The runtime version information is provided by {@link #VERSION} and - * {@link #getRuntimeVersion()}. Detailed information about these values is - * provided in the documentation for each member.

      - * - *

      - * The runtime version check is implemented by {@link #checkVersion}. Detailed - * information about incorporating this call into user code, as well as its use - * in generated code, is provided in the documentation for the method.

      - * - *

      - * Version strings x.y and x.y.z are considered "compatible" and no error - * would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are - * considered "compatible" because the major and minor components x.y - * are the same in each.

      - * - *

      - * To trap any error messages issued by this code, use System.setErr() - * in your main() startup code. - *

      - * - * @since 4.3 - */ +/// +/// This class provides access to the current version of the ANTLR 4 runtime +/// library as compile-time and runtime constants, along with methods for +/// checking for matching version numbers and notifying listeners in the case +/// where a version mismatch is detected. +/// +/// +/// The runtime version information is provided by _#VERSION_ and +/// _#getRuntimeVersion()_. Detailed information about these values is +/// provided in the documentation for each member. +/// +/// +/// The runtime version check is implemented by _#checkVersion_. Detailed +/// information about incorporating this call into user code, as well as its use +/// in generated code, is provided in the documentation for the method. +/// +/// +/// Version strings x.y and x.y.z are considered "compatible" and no error +/// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are +/// considered "compatible" because the major and minor components x.y +/// are the same in each. +/// +/// +/// To trap any error messages issued by this code, use System.setErr() +/// in your main() startup code. +/// +/// +/// - Since: 4.3 +/// public class RuntimeMetaData { - /** - * A compile-time constant containing the current version of the ANTLR 4 - * runtime library. - * - *

      - * This compile-time constant value allows generated parsers and other - * libraries to include a literal reference to the version of the ANTLR 4 - * runtime library the code was compiled against. At each release, we - * change this value.

      - * - *

      Version numbers are assumed to have the form - * - * major.minor.patch.revision-suffix, - * - * with the individual components defined as follows.

      - * - *
        - *
      • major is a required non-negative integer, and is equal to - * {@code 4} for ANTLR 4.
      • - *
      • minor is a required non-negative integer.
      • - *
      • patch is an optional non-negative integer. When - * patch is omitted, the {@code .} (dot) appearing before it is - * also omitted.
      • - *
      • revision is an optional non-negative integer, and may only - * be included when patch is also included. When revision - * is omitted, the {@code .} (dot) appearing before it is also omitted.
      • - *
      • suffix is an optional string. When suffix is - * omitted, the {@code -} (hyphen-minus) appearing before it is also - * omitted.
      • - *
      - */ + /// + /// A compile-time constant containing the current version of the ANTLR 4 + /// runtime library. + /// + /// This compile-time constant value allows generated parsers and other + /// libraries to include a literal reference to the version of the ANTLR 4 + /// runtime library the code was compiled against. At each release, we + /// change this value. + /// + /// Version numbers are assumed to have the form + /// + /// __major__.__minor__.__patch__.__revision__-__suffix__, + /// + /// with the individual components defined as follows. + /// + /// * __major__ is a required non-negative integer, and is equal to + /// `4` for ANTLR 4. + /// * __minor__ is a required non-negative integer. + /// * __patch__ is an optional non-negative integer. When + /// patch is omitted, the `.` (dot) appearing before it is + /// also omitted. + /// * __revision__ is an optional non-negative integer, and may only + /// be included when __patch__ is also included. When __revision__ + /// is omitted, the `.` (dot) appearing before it is also omitted. + /// * __suffix__ is an optional string. When __suffix__ is + /// omitted, the `-` (hyphen-minus) appearing before it is also + /// omitted. + /// public static let VERSION: String = "4.7" - /** - * Gets the currently executing version of the ANTLR 4 runtime library. - * - *

      - * This method provides runtime access to the {@link #VERSION} field, as - * opposed to directly referencing the field as a compile-time constant.

      - * - * @return The currently executing version of the ANTLR 4 library - */ + /// + /// Gets the currently executing version of the ANTLR 4 runtime library. + /// + /// + /// This method provides runtime access to the _#VERSION_ field, as + /// opposed to directly referencing the field as a compile-time constant. + /// + /// - Returns: The currently executing version of the ANTLR 4 library + /// public static func getRuntimeVersion() -> String { return RuntimeMetaData.VERSION } - /** - * This method provides the ability to detect mismatches between the version - * of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a - * parser was compiled against, and the version of the ANTLR runtime which - * is currently executing. - * - *

      - * The version check is designed to detect the following two specific - * scenarios.

      - * - *
        - *
      • The ANTLR Tool version used for code generation does not match the - * currently executing runtime version.
      • - *
      • The ANTLR Runtime version referenced at the time a parser was - * compiled does not match the currently executing runtime version.
      • - *
      - * - *

      - * Starting with ANTLR 4.3, the code generator emits a call to this method - * using two constants in each generated lexer and parser: a hard-coded - * constant indicating the version of the tool used to generate the parser - * and a reference to the compile-time constant {@link #VERSION}. At - * runtime, this method is called during the initialization of the generated - * parser to detect mismatched versions, and notify the registered listeners - * prior to creating instances of the parser.

      - * - *

      - * This method does not perform any detection or filtering of semantic - * changes between tool and runtime versions. It simply checks for a - * version match and emits an error to stderr if a difference - * is detected.

      - * - *

      - * Note that some breaking changes between releases could result in other - * types of runtime exceptions, such as a {@link LinkageError}, prior to - * calling this method. In these cases, the underlying version mismatch will - * not be reported here. This method is primarily intended to - * notify users of potential semantic changes between releases that do not - * result in binary compatibility problems which would be detected by the - * class loader. As with semantic changes, changes that break binary - * compatibility between releases are mentioned in the release notes - * accompanying the affected release.

      - * - *

      - * Additional note for target developers: The version check - * implemented by this class is designed to address specific compatibility - * concerns that may arise during the execution of Java applications. Other - * targets should consider the implementation of this method in the context - * of that target's known execution environment, which may or may not - * resemble the design provided for the Java target.

      - * - * @param generatingToolVersion The version of the tool used to generate a parser. - * This value may be null when called from user code that was not generated - * by, and does not reference, the ANTLR 4 Tool itself. - * @param compileTimeVersion The version of the runtime the parser was - * compiled against. This should always be passed using a direct reference - * to {@link #VERSION}. - */ + /// + /// This method provides the ability to detect mismatches between the version + /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a + /// parser was compiled against, and the version of the ANTLR runtime which + /// is currently executing. + /// + /// The version check is designed to detect the following two specific + /// scenarios. + /// + /// * The ANTLR Tool version used for code generation does not match the + /// currently executing runtime version. + /// * The ANTLR Runtime version referenced at the time a parser was + /// compiled does not match the currently executing runtime version. + /// + /// Starting with ANTLR 4.3, the code generator emits a call to this method + /// using two constants in each generated lexer and parser: a hard-coded + /// constant indicating the version of the tool used to generate the parser + /// and a reference to the compile-time constant _#VERSION_. At + /// runtime, this method is called during the initialization of the generated + /// parser to detect mismatched versions, and notify the registered listeners + /// prior to creating instances of the parser. + /// + /// This method does not perform any detection or filtering of semantic + /// changes between tool and runtime versions. It simply checks for a + /// version match and emits an error to stderr if a difference + /// is detected. + /// + /// Note that some breaking changes between releases could result in other + /// types of runtime exceptions, such as a _LinkageError_, prior to + /// calling this method. In these cases, the underlying version mismatch will + /// not be reported here. This method is primarily intended to + /// notify users of potential semantic changes between releases that do not + /// result in binary compatibility problems which would be detected by the + /// class loader. As with semantic changes, changes that break binary + /// compatibility between releases are mentioned in the release notes + /// accompanying the affected release. + /// + /// __ Additional note for target developers:__ The version check + /// implemented by this class is designed to address specific compatibility + /// concerns that may arise during the execution of Java applications. Other + /// targets should consider the implementation of this method in the context + /// of that target's known execution environment, which may or may not + /// resemble the design provided for the Java target. + /// + /// - Parameter generatingToolVersion: The version of the tool used to generate a parser. + /// This value may be null when called from user code that was not generated + /// by, and does not reference, the ANTLR 4 Tool itself. + /// - Parameter compileTimeVersion: The version of the runtime the parser was + /// compiled against. This should always be passed using a direct reference + /// to _#VERSION_. + /// public static func checkVersion(_ generatingToolVersion: String, _ compileTimeVersion: String) { let runtimeVersion: String = RuntimeMetaData.VERSION var runtimeConflictsWithGeneratingTool: Bool = false @@ -163,15 +153,15 @@ public class RuntimeMetaData { } } - /** - * Gets the major and minor version numbers from a version string. For - * details about the syntax of the input {@code version}. - * E.g., from x.y.z return x.y. - * - * @param version The complete version string. - * @return A string of the form major.minor containing - * only the major and minor components of the version string. - */ + /// + /// Gets the major and minor version numbers from a version string. For + /// details about the syntax of the input `version`. + /// E.g., from x.y.z return x.y. + /// + /// - Parameter version: The complete version string. + /// - Returns: A string of the form __major__.__minor__ containing + /// only the major and minor components of the version string. + /// public static func getMajorMinorVersion(_ version: String) -> String { let firstDot: Int = version.indexOf(".") let secondDot: Int = firstDot >= 0 ? version.indexOf(".", startIndex: firstDot + 1) : -1 diff --git a/runtime/Swift/Sources/Antlr4/Token.swift b/runtime/Swift/Sources/Antlr4/Token.swift index e41868fcb..264318ddd 100644 --- a/runtime/Swift/Sources/Antlr4/Token.swift +++ b/runtime/Swift/Sources/Antlr4/Token.swift @@ -5,98 +5,100 @@ -/** A token has properties: text, type, line, character position in the line - * (so we can ignore tabs), token channel, index, and source from which - * we obtained this token. - */ +/// A token has properties: text, type, line, character position in the line +/// (so we can ignore tabs), token channel, index, and source from which +/// we obtained this token. +/// public protocol Token: class, CustomStringConvertible { //let INVALID_TYPE : Int = 0; - /** During lookahead operations, this "token" signifies we hit rule end ATN state - * and did not follow it despite needing to. - */ + /// During lookahead operations, this "token" signifies we hit rule end ATN state + /// and did not follow it despite needing to. + /// //let EPSILON : Int = -2; //let MIN_USER_TOKEN_TYPE : Int = 1; //let EOF : Int = IntStream.EOF; - /** All tokens go to the parser (unless skip() is called in that rule) - * on a particular "channel". The parser tunes to a particular channel - * so that whitespace etc... can go to the parser on a "hidden" channel. - */ + /// All tokens go to the parser (unless skip() is called in that rule) + /// on a particular "channel". The parser tunes to a particular channel + /// so that whitespace etc... can go to the parser on a "hidden" channel. + /// //let DEFAULT_CHANNEL : Int = 0; - /** Anything on different channel than DEFAULT_CHANNEL is not parsed - * by parser. - */ + /// Anything on different channel than DEFAULT_CHANNEL is not parsed + /// by parser. + /// //let HIDDEN_CHANNEL : Int = 1; - /** - * This is the minimum constant value which can be assigned to a - * user-defined token channel. - * - *

      - * The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are - * assigned to the predefined channels {@link #DEFAULT_CHANNEL} and - * {@link #HIDDEN_CHANNEL}.

      - * - * @see org.antlr.v4.runtime.Token#getChannel() - */ + /// + /// This is the minimum constant value which can be assigned to a + /// user-defined token channel. + /// + /// + /// The non-negative numbers less than _#MIN_USER_CHANNEL_VALUE_ are + /// assigned to the predefined channels _#DEFAULT_CHANNEL_ and + /// _#HIDDEN_CHANNEL_. + /// + /// - SeeAlso: org.antlr.v4.runtime.Token#getChannel() + /// //let MIN_USER_CHANNEL_VALUE : Int = 2; - /** - * Get the text of the token. - */ + /// + /// Get the text of the token. + /// func getText() -> String? - /** Get the token type of the token */ + /// Get the token type of the token func getType() -> Int - /** The line number on which the 1st character of this token was matched, - * line=1..n - */ + /// The line number on which the 1st character of this token was matched, + /// line=1..n + /// func getLine() -> Int - /** The index of the first character of this token relative to the - * beginning of the line at which it occurs, 0..n-1 - */ + /// The index of the first character of this token relative to the + /// beginning of the line at which it occurs, 0..n-1 + /// func getCharPositionInLine() -> Int - /** Return the channel this token. Each token can arrive at the parser - * on a different channel, but the parser only "tunes" to a single channel. - * The parser ignores everything not on DEFAULT_CHANNEL. - */ + /// Return the channel this token. Each token can arrive at the parser + /// on a different channel, but the parser only "tunes" to a single channel. + /// The parser ignores everything not on DEFAULT_CHANNEL. + /// func getChannel() -> Int - /** An index from 0..n-1 of the token object in the input stream. - * This must be valid in order to print token streams and - * use TokenRewriteStream. - * - * Return -1 to indicate that this token was conjured up since - * it doesn't have a valid index. - */ + /// An index from 0..n-1 of the token object in the input stream. + /// This must be valid in order to print token streams and + /// use TokenRewriteStream. + /// + /// Return -1 to indicate that this token was conjured up since + /// it doesn't have a valid index. + /// func getTokenIndex() -> Int - /** The starting character index of the token - * This method is optional; return -1 if not implemented. - */ + /// The starting character index of the token + /// This method is optional; return -1 if not implemented. + /// func getStartIndex() -> Int - /** The last character index of the token. - * This method is optional; return -1 if not implemented. - */ + /// The last character index of the token. + /// This method is optional; return -1 if not implemented. + /// func getStopIndex() -> Int - /** Gets the {@link org.antlr.v4.runtime.TokenSource} which created this token. - */ + /// Gets the _org.antlr.v4.runtime.TokenSource_ which created this token. + /// func getTokenSource() -> TokenSource? - /** - * Gets the {@link org.antlr.v4.runtime.CharStream} from which this token was derived. - */ + /// + /// Gets the _org.antlr.v4.runtime.CharStream_ from which this token was derived. + /// func getInputStream() -> CharStream? + func getTokenSourceAndStream() -> TokenSourceAndStream + var visited: Bool { get set } } diff --git a/runtime/Swift/Sources/Antlr4/TokenFactory.swift b/runtime/Swift/Sources/Antlr4/TokenFactory.swift index b9501b6a5..63fa74b3c 100644 --- a/runtime/Swift/Sources/Antlr4/TokenFactory.swift +++ b/runtime/Swift/Sources/Antlr4/TokenFactory.swift @@ -4,22 +4,44 @@ */ -/** The default mechanism for creating tokens. It's used by default in Lexer and - * the error handling strategy (to create missing tokens). Notifying the parser - * of a new factory means that it notifies it's token source and error strategy. - */ - +/// The default mechanism for creating tokens. It's used by default in Lexer and +/// the error handling strategy (to create missing tokens). Notifying the parser +/// of a new factory means that it notifies it's token source and error strategy. +/// public protocol TokenFactory { //typealias Symbol - /** This is the method used to create tokens in the lexer and in the - * error handling strategy. If text!=null, than the start and stop positions - * are wiped to -1 in the text override is set in the CommonToken. - */ - func create(_ source: (TokenSource?, CharStream?), _ type: Int, _ text: String?, + /// This is the method used to create tokens in the lexer and in the + /// error handling strategy. If text!=null, than the start and stop positions + /// are wiped to -1 in the text override is set in the CommonToken. + /// + func create(_ source: TokenSourceAndStream, _ type: Int, _ text: String?, _ channel: Int, _ start: Int, _ stop: Int, _ line: Int, _ charPositionInLine: Int) -> Token - /** Generically useful */ + /// Generically useful func create(_ type: Int, _ text: String) -> Token } + + +/** + Holds the references to the TokenSource and CharStream used to create a Token. + These are together to reduce memory footprint by having one instance of + TokenSourceAndStream shared across many tokens. The references here are weak + to avoid retain cycles. + */ +public class TokenSourceAndStream { + /// + /// An empty TokenSourceAndStream which is used as the default value of + /// _#source_ for tokens that do not have a source. + /// + public static let EMPTY = TokenSourceAndStream() + + public weak var tokenSource: TokenSource? + public weak var stream: CharStream? + + public init(_ tokenSource: TokenSource? = nil, _ stream: CharStream? = nil) { + self.tokenSource = tokenSource + self.stream = stream + } +} diff --git a/runtime/Swift/Sources/Antlr4/TokenSource.swift b/runtime/Swift/Sources/Antlr4/TokenSource.swift index bbbd813b2..5b3780d76 100644 --- a/runtime/Swift/Sources/Antlr4/TokenSource.swift +++ b/runtime/Swift/Sources/Antlr4/TokenSource.swift @@ -4,79 +4,79 @@ */ -/** - * A source of tokens must provide a sequence of tokens via {@link #nextToken()} - * and also must reveal it's source of characters; {@link org.antlr.v4.runtime.CommonToken}'s text is - * computed from a {@link org.antlr.v4.runtime.CharStream}; it only store indices into the char - * stream. - * - *

      Errors from the lexer are never passed to the parser. Either you want to keep - * going or you do not upon token recognition error. If you do not want to - * continue lexing then you do not want to continue parsing. Just throw an - * exception not under {@link org.antlr.v4.runtime.RecognitionException} and Java will naturally toss - * you all the way out of the recognizers. If you want to continue lexing then - * you should not throw an exception to the parser--it has already requested a - * token. Keep lexing until you get a valid one. Just report errors and keep - * going, looking for a valid token.

      - */ +/// +/// A source of tokens must provide a sequence of tokens via _#nextToken()_ +/// and also must reveal it's source of characters; _org.antlr.v4.runtime.CommonToken_'s text is +/// computed from a _org.antlr.v4.runtime.CharStream_; it only store indices into the char +/// stream. +/// +/// Errors from the lexer are never passed to the parser. Either you want to keep +/// going or you do not upon token recognition error. If you do not want to +/// continue lexing then you do not want to continue parsing. Just throw an +/// exception not under _org.antlr.v4.runtime.RecognitionException_ and Java will naturally toss +/// you all the way out of the recognizers. If you want to continue lexing then +/// you should not throw an exception to the parser--it has already requested a +/// token. Keep lexing until you get a valid one. Just report errors and keep +/// going, looking for a valid token. +/// public protocol TokenSource: class { - /** - * Return a {@link org.antlr.v4.runtime.Token} object from your input stream (usually a - * {@link org.antlr.v4.runtime.CharStream}). Do not fail/return upon lexing error; keep chewing - * on the characters until you get a good one; errors are not passed through - * to the parser. - */ + /// + /// Return a _org.antlr.v4.runtime.Token_ object from your input stream (usually a + /// _org.antlr.v4.runtime.CharStream_). Do not fail/return upon lexing error; keep chewing + /// on the characters until you get a good one; errors are not passed through + /// to the parser. + /// func nextToken() throws -> Token - /** - * Get the line number for the current position in the input stream. The - * first line in the input is line 1. - * - * @return The line number for the current position in the input stream, or - * 0 if the current token source does not track line numbers. - */ + /// + /// Get the line number for the current position in the input stream. The + /// first line in the input is line 1. + /// + /// - Returns: The line number for the current position in the input stream, or + /// 0 if the current token source does not track line numbers. + /// func getLine() -> Int - /** - * Get the index into the current line for the current position in the input - * stream. The first character on a line has position 0. - * - * @return The line number for the current position in the input stream, or - * -1 if the current token source does not track character positions. - */ + /// + /// Get the index into the current line for the current position in the input + /// stream. The first character on a line has position 0. + /// + /// - Returns: The line number for the current position in the input stream, or + /// -1 if the current token source does not track character positions. + /// func getCharPositionInLine() -> Int - /** - * Get the {@link org.antlr.v4.runtime.CharStream} from which this token source is currently - * providing tokens. - * - * @return The {@link org.antlr.v4.runtime.CharStream} associated with the current position in - * the input, or {@code null} if no input stream is available for the token - * source. - */ + /// + /// Get the _org.antlr.v4.runtime.CharStream_ from which this token source is currently + /// providing tokens. + /// + /// - Returns: The _org.antlr.v4.runtime.CharStream_ associated with the current position in + /// the input, or `null` if no input stream is available for the token + /// source. + /// func getInputStream() -> CharStream? - /** - * Gets the name of the underlying input source. This method returns a - * non-null, non-empty string. If such a name is not known, this method - * returns {@link org.antlr.v4.runtime.IntStream#UNKNOWN_SOURCE_NAME}. - */ + /// + /// Gets the name of the underlying input source. This method returns a + /// non-null, non-empty string. If such a name is not known, this method + /// returns _org.antlr.v4.runtime.IntStream#UNKNOWN_SOURCE_NAME_. + /// func getSourceName() -> String - /** - * Set the {@link org.antlr.v4.runtime.TokenFactory} this token source should use for creating - * {@link org.antlr.v4.runtime.Token} objects from the input. - * - * @param factory The {@link org.antlr.v4.runtime.TokenFactory} to use for creating tokens. - */ + /// + /// Set the _org.antlr.v4.runtime.TokenFactory_ this token source should use for creating + /// _org.antlr.v4.runtime.Token_ objects from the input. + /// + /// - Parameter factory: The _org.antlr.v4.runtime.TokenFactory_ to use for creating tokens. + /// func setTokenFactory(_ factory: TokenFactory) - /** - * Gets the {@link org.antlr.v4.runtime.TokenFactory} this token source is currently using for - * creating {@link org.antlr.v4.runtime.Token} objects from the input. - * - * @return The {@link org.antlr.v4.runtime.TokenFactory} currently used by this token source. - */ + /// + /// Gets the _org.antlr.v4.runtime.TokenFactory_ this token source is currently using for + /// creating _org.antlr.v4.runtime.Token_ objects from the input. + /// + /// - Returns: The _org.antlr.v4.runtime.TokenFactory_ currently used by this token source. + /// func getTokenFactory() -> TokenFactory } diff --git a/runtime/Swift/Sources/Antlr4/TokenStream.swift b/runtime/Swift/Sources/Antlr4/TokenStream.swift index 92c35e549..536f33b90 100644 --- a/runtime/Swift/Sources/Antlr4/TokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/TokenStream.swift @@ -5,134 +5,133 @@ -/** - * An {@link org.antlr.v4.runtime.IntStream} whose symbols are {@link org.antlr.v4.runtime.Token} instances. - */ +/// +/// An _org.antlr.v4.runtime.IntStream_ whose symbols are _org.antlr.v4.runtime.Token_ instances. +/// public protocol TokenStream: IntStream { - /** - * Get the {@link org.antlr.v4.runtime.Token} instance associated with the value returned by - * {@link #LA LA(k)}. This method has the same pre- and post-conditions as - * {@link org.antlr.v4.runtime.IntStream#LA}. In addition, when the preconditions of this method - * are met, the return value is non-null and the value of - * {@code LT(k).getType()==LA(k)}. - * - * @see org.antlr.v4.runtime.IntStream#LA - */ + /// + /// Get the _org.antlr.v4.runtime.Token_ instance associated with the value returned by + /// _#LA LA(k)_. This method has the same pre- and post-conditions as + /// _org.antlr.v4.runtime.IntStream#LA_. In addition, when the preconditions of this method + /// are met, the return value is non-null and the value of + /// `LT(k).getType()==LA(k)`. + /// + /// - SeeAlso: org.antlr.v4.runtime.IntStream#LA + /// func LT(_ k: Int) throws -> Token? - /** - * Gets the {@link org.antlr.v4.runtime.Token} at the specified {@code index} in the stream. When - * the preconditions of this method are met, the return value is non-null. - * - *

      The preconditions for this method are the same as the preconditions of - * {@link org.antlr.v4.runtime.IntStream#seek}. If the behavior of {@code seek(index)} is - * unspecified for the current state and given {@code index}, then the - * behavior of this method is also unspecified.

      - * - *

      The symbol referred to by {@code index} differs from {@code seek()} only - * in the case of filtering streams where {@code index} lies before the end - * of the stream. Unlike {@code seek()}, this method does not adjust - * {@code index} to point to a non-ignored symbol.

      - * - * @throws IllegalArgumentException if {code index} is less than 0 - * @throws UnsupportedOperationException if the stream does not support - * retrieving the token at the specified index - */ + /// + /// Gets the _org.antlr.v4.runtime.Token_ at the specified `index` in the stream. When + /// the preconditions of this method are met, the return value is non-null. + /// + /// The preconditions for this method are the same as the preconditions of + /// _org.antlr.v4.runtime.IntStream#seek_. If the behavior of `seek(index)` is + /// unspecified for the current state and given `index`, then the + /// behavior of this method is also unspecified. + /// + /// The symbol referred to by `index` differs from `seek()` only + /// in the case of filtering streams where `index` lies before the end + /// of the stream. Unlike `seek()`, this method does not adjust + /// `index` to point to a non-ignored symbol. + /// + /// - Throws: ANTLRError.illegalArgument if {code index} is less than 0 + /// - Throws: ANTLRError.unsupportedOperation if the stream does not support + /// retrieving the token at the specified index + /// func get(_ index: Int) throws -> Token - /** - * Gets the underlying {@link org.antlr.v4.runtime.TokenSource} which provides tokens for this - * stream. - */ + /// + /// Gets the underlying _org.antlr.v4.runtime.TokenSource_ which provides tokens for this + /// stream. + /// func getTokenSource() -> TokenSource - /** - * Return the text of all tokens within the specified {@code interval}. This - * method behaves like the following code (including potential exceptions - * for violating preconditions of {@link #get}, but may be optimized by the - * specific implementation. - * - *
      -     * TokenStream stream = ...;
      -     * String text = "";
      -     * for (int i = interval.a; i <= interval.b; i++) {
      -     *   text += stream.get(i).getText();
      -     * }
      -     * 
      - * - * @param interval The interval of tokens within this stream to get text - * for. - * @return The text of all tokens within the specified interval in this - * stream. - * - * @throws NullPointerException if {@code interval} is {@code null} - */ + /// + /// Return the text of all tokens within the specified `interval`. This + /// method behaves like the following code (including potential exceptions + /// for violating preconditions of _#get_, but may be optimized by the + /// specific implementation. + /// + /// + /// TokenStream stream = ...; + /// String text = ""; + /// for (int i = interval.a; i <= interval.b; i++) { + /// text += stream.get(i).getText(); + /// } + /// + /// + /// - Parameter interval: The interval of tokens within this stream to get text + /// for. + /// - Returns: The text of all tokens within the specified interval in this + /// stream. + /// + /// func getText(_ interval: Interval) throws -> String - /** - * Return the text of all tokens in the stream. This method behaves like the - * following code, including potential exceptions from the calls to - * {@link org.antlr.v4.runtime.IntStream#size} and {@link #getText(org.antlr.v4.runtime.misc.Interval)}, but may be - * optimized by the specific implementation. - * - *
      -     * TokenStream stream = ...;
      -     * String text = stream.getText(new Interval(0, stream.size()));
      -     * 
      - * - * @return The text of all tokens in the stream. - */ + /// + /// Return the text of all tokens in the stream. This method behaves like the + /// following code, including potential exceptions from the calls to + /// _org.antlr.v4.runtime.IntStream#size_ and _#getText(org.antlr.v4.runtime.misc.Interval)_, but may be + /// optimized by the specific implementation. + /// + /// + /// TokenStream stream = ...; + /// String text = stream.getText(new Interval(0, stream.size())); + /// + /// + /// - Returns: The text of all tokens in the stream. + /// func getText() throws -> String - /** - * Return the text of all tokens in the source interval of the specified - * context. This method behaves like the following code, including potential - * exceptions from the call to {@link #getText(org.antlr.v4.runtime.misc.Interval)}, but may be - * optimized by the specific implementation. - * - *

      If {@code ctx.getSourceInterval()} does not return a valid interval of - * tokens provided by this stream, the behavior is unspecified.

      - * - *
      -     * TokenStream stream = ...;
      -     * String text = stream.getText(ctx.getSourceInterval());
      -     * 
      - * - * @param ctx The context providing the source interval of tokens to get - * text for. - * @return The text of all tokens within the source interval of {@code ctx}. - */ + /// + /// Return the text of all tokens in the source interval of the specified + /// context. This method behaves like the following code, including potential + /// exceptions from the call to _#getText(org.antlr.v4.runtime.misc.Interval)_, but may be + /// optimized by the specific implementation. + /// + /// If `ctx.getSourceInterval()` does not return a valid interval of + /// tokens provided by this stream, the behavior is unspecified. + /// + /// + /// TokenStream stream = ...; + /// String text = stream.getText(ctx.getSourceInterval()); + /// + /// + /// - Parameter ctx: The context providing the source interval of tokens to get + /// text for. + /// - Returns: The text of all tokens within the source interval of `ctx`. + /// func getText(_ ctx: RuleContext) throws -> String - /** - * Return the text of all tokens in this stream between {@code start} and - * {@code stop} (inclusive). - * - *

      If the specified {@code start} or {@code stop} token was not provided by - * this stream, or if the {@code stop} occurred before the {@code start} - * token, the behavior is unspecified.

      - * - *

      For streams which ensure that the {@link org.antlr.v4.runtime.Token#getTokenIndex} method is - * accurate for all of its provided tokens, this method behaves like the - * following code. Other streams may implement this method in other ways - * provided the behavior is consistent with this at a high level.

      - * - *
      -     * TokenStream stream = ...;
      -     * String text = "";
      -     * for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
      -     *   text += stream.get(i).getText();
      -     * }
      -     * 
      - * - * @param start The first token in the interval to get text for. - * @param stop The last token in the interval to get text for (inclusive). - * @return The text of all tokens lying between the specified {@code start} - * and {@code stop} tokens. - * - * @throws UnsupportedOperationException if this stream does not support - * this method for the specified tokens - */ + /// + /// Return the text of all tokens in this stream between `start` and + /// `stop` (inclusive). + /// + /// If the specified `start` or `stop` token was not provided by + /// this stream, or if the `stop` occurred before the `start` + /// token, the behavior is unspecified. + /// + /// For streams which ensure that the _org.antlr.v4.runtime.Token#getTokenIndex_ method is + /// accurate for all of its provided tokens, this method behaves like the + /// following code. Other streams may implement this method in other ways + /// provided the behavior is consistent with this at a high level. + /// + /// + /// TokenStream stream = ...; + /// String text = ""; + /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) { + /// text += stream.get(i).getText(); + /// } + /// + /// + /// - Parameter start: The first token in the interval to get text for. + /// - Parameter stop: The last token in the interval to get text for (inclusive). + /// - Throws: ANTLRError.unsupportedOperation if this stream does not support + /// this method for the specified tokens + /// - Returns: The text of all tokens lying between the specified `start` + /// and `stop` tokens. + /// + /// func getText(_ start: Token?, _ stop: Token?) throws -> String } diff --git a/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift b/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift index 9423ba7b7..2a74c3681 100644 --- a/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift +++ b/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift @@ -4,82 +4,82 @@ */ -/** - * Useful for rewriting out a buffered input token stream after doing some - * augmentation or other manipulations on it. - * - *

      - * You can insert stuff, replace, and delete chunks. Note that the operations - * are done lazily--only if you convert the buffer to a {@link String} with - * {@link org.antlr.v4.runtime.TokenStream#getText()}. This is very efficient because you are not - * moving data around all the time. As the buffer of tokens is converted to - * strings, the {@link #getText()} method(s) scan the input token stream and - * check to see if there is an operation at the current index. If so, the - * operation is done and then normal {@link String} rendering continues on the - * buffer. This is like having multiple Turing machine instruction streams - * (programs) operating on a single input tape. :)

      - * - *

      - * This rewriter makes no modifications to the token stream. It does not ask the - * stream to fill itself up nor does it advance the input cursor. The token - * stream {@link org.antlr.v4.runtime.TokenStream#index()} will return the same value before and - * after any {@link #getText()} call.

      - * - *

      - * The rewriter only works on tokens that you have in the buffer and ignores the - * current input cursor. If you are buffering tokens on-demand, calling - * {@link #getText()} halfway through the input will only do rewrites for those - * tokens in the first half of the file.

      - * - *

      - * Since the operations are done lazily at {@link #getText}-time, operations do - * not screw up the token index values. That is, an insert operation at token - * index {@code i} does not change the index values for tokens - * {@code i}+1..n-1.

      - * - *

      - * Because operations never actually alter the buffer, you may always get the - * original token stream back without undoing anything. Since the instructions - * are queued up, you can easily simulate transactions and roll back any changes - * if there is an error just by removing instructions. For example,

      - * - *
      - * CharStream input = new ANTLRFileStream("input");
      - * TLexer lex = new TLexer(input);
      - * CommonTokenStream tokens = new CommonTokenStream(lex);
      - * T parser = new T(tokens);
      - * TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
      - * parser.startRule();
      - * 
      - * - *

      - * Then in the rules, you can execute (assuming rewriter is visible):

      - * - *
      - * Token t,u;
      - * ...
      - * rewriter.insertAfter(t, "text to put after t");}
      - * rewriter.insertAfter(u, "text after u");}
      - * System.out.println(rewriter.getText());
      - * 
      - * - *

      - * You can also have multiple "instruction streams" and get multiple rewrites - * from a single pass over the input. Just name the instruction streams and use - * that name again when printing the buffer. This could be useful for generating - * a C file and also its header file--all from the same buffer:

      - * - *
      - * rewriter.insertAfter("pass1", t, "text to put after t");}
      - * rewriter.insertAfter("pass2", u, "text after u");}
      - * System.out.println(rewriter.getText("pass1"));
      - * System.out.println(rewriter.getText("pass2"));
      - * 
      - * - *

      - * If you don't use named rewrite streams, a "default" stream is used as the - * first example shows.

      - */ +/// +/// Useful for rewriting out a buffered input token stream after doing some +/// augmentation or other manipulations on it. +/// +/// +/// You can insert stuff, replace, and delete chunks. Note that the operations +/// are done lazily--only if you convert the buffer to a _String_ with +/// _org.antlr.v4.runtime.TokenStream#getText()_. This is very efficient because you are not +/// moving data around all the time. As the buffer of tokens is converted to +/// strings, the _#getText()_ method(s) scan the input token stream and +/// check to see if there is an operation at the current index. If so, the +/// operation is done and then normal _String_ rendering continues on the +/// buffer. This is like having multiple Turing machine instruction streams +/// (programs) operating on a single input tape. :) +/// +/// +/// This rewriter makes no modifications to the token stream. It does not ask the +/// stream to fill itself up nor does it advance the input cursor. The token +/// stream _org.antlr.v4.runtime.TokenStream#index()_ will return the same value before and +/// after any _#getText()_ call. +/// +/// +/// The rewriter only works on tokens that you have in the buffer and ignores the +/// current input cursor. If you are buffering tokens on-demand, calling +/// _#getText()_ halfway through the input will only do rewrites for those +/// tokens in the first half of the file. +/// +/// +/// Since the operations are done lazily at _#getText_-time, operations do +/// not screw up the token index values. That is, an insert operation at token +/// index `i` does not change the index values for tokens +/// `i`+1..n-1. +/// +/// +/// Because operations never actually alter the buffer, you may always get the +/// original token stream back without undoing anything. Since the instructions +/// are queued up, you can easily simulate transactions and roll back any changes +/// if there is an error just by removing instructions. For example, +/// +/// +/// CharStream input = new ANTLRFileStream("input"); +/// TLexer lex = new TLexer(input); +/// CommonTokenStream tokens = new CommonTokenStream(lex); +/// T parser = new T(tokens); +/// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens); +/// parser.startRule(); +/// +/// +/// +/// Then in the rules, you can execute (assuming rewriter is visible): +/// +/// +/// Token t,u; +/// ... +/// rewriter.insertAfter(t, "text to put after t");} +/// rewriter.insertAfter(u, "text after u");} +/// System.out.println(rewriter.getText()); +/// +/// +/// +/// You can also have multiple "instruction streams" and get multiple rewrites +/// from a single pass over the input. Just name the instruction streams and use +/// that name again when printing the buffer. This could be useful for generating +/// a C file and also its header file--all from the same buffer: +/// +/// +/// rewriter.insertAfter("pass1", t, "text to put after t");} +/// rewriter.insertAfter("pass2", u, "text after u");} +/// System.out.println(rewriter.getText("pass1")); +/// System.out.println(rewriter.getText("pass2")); +/// +/// +/// +/// If you don't use named rewrite streams, a "default" stream is used as the +/// first example shows. +/// import Foundation @@ -90,9 +90,9 @@ public class TokenStreamRewriter { // Define the rewrite operation hierarchy public class RewriteOperation: CustomStringConvertible { - /** What index into rewrites List are we? */ + /// What index into rewrites List are we? internal var instructionIndex: Int = 0 - /** Token buffer index. */ + /// Token buffer index. internal var index: Int internal var text: String? internal var lastIndex: Int = 0 @@ -109,9 +109,9 @@ public class TokenStreamRewriter { self.tokens = tokens } - /** Execute the rewrite operation by possibly adding to the buffer. - * Return the index of the next token to operate on. - */ + /// Execute the rewrite operation by possibly adding to the buffer. + /// Return the index of the next token to operate on. + /// public func execute(_ buf: StringBuilder) throws -> Int { return index } @@ -144,9 +144,9 @@ public class TokenStreamRewriter { } } - /** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp - * instructions. - */ + /// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp + /// instructions. + /// public class ReplaceOp: RewriteOperation { @@ -190,55 +190,55 @@ public class TokenStreamRewriter { final var isEmpty: Bool{ return rewrites.isEmpty } - /** We need to combine operations and report invalid operations (like - * overlapping replaces that are not completed nested). Inserts to - * same index need to be combined etc... Here are the cases: - * - * I.i.u I.j.v leave alone, nonoverlapping - * I.i.u I.i.v combine: Iivu - * - * R.i-j.u R.x-y.v | i-j in x-y delete first R - * R.i-j.u R.i-j.v delete first R - * R.i-j.u R.x-y.v | x-y in i-j ERROR - * R.i-j.u R.x-y.v | boundaries overlap ERROR - * - * Delete special case of replace (text==null): - * D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - * - * I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before - * we're not deleting i) - * I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping - * R.x-y.v I.i.u | i in x-y ERROR - * R.x-y.v I.x.u R.x-y.uv (combine, delete I) - * R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping - * - * I.i.u = insert u before op @ index i - * R.x-y.u = replace x-y indexed tokens with u - * - * First we need to examine replaces. For any replace op: - * - * 1. wipe out any insertions before op within that range. - * 2. Drop any replace op before that is contained completely within - * that range. - * 3. Throw exception upon boundary overlap with any previous replace. - * - * Then we can deal with inserts: - * - * 1. for any inserts to same index, combine even if not adjacent. - * 2. for any prior replace with same left boundary, combine this - * insert with replace and delete this replace. - * 3. throw exception if index in same range as previous replace - * - * Don't actually delete; make op null in list. Easier to walk list. - * Later we can throw as we add to index → op map. - * - * Note that I.2 R.2-2 will wipe out I.2 even though, technically, the - * inserted stuff would be before the replace range. But, if you - * add tokens in front of a method body '{' and then delete the method - * body, I think the stuff before the '{' you added should disappear too. - * - * Return a map from token index to operation. - */ + /// We need to combine operations and report invalid operations (like + /// overlapping replaces that are not completed nested). Inserts to + /// same index need to be combined etc... Here are the cases: + /// + /// I.i.u I.j.v leave alone, nonoverlapping + /// I.i.u I.i.v combine: Iivu + /// + /// R.i-j.u R.x-y.v | i-j in x-y delete first R + /// R.i-j.u R.i-j.v delete first R + /// R.i-j.u R.x-y.v | x-y in i-j ERROR + /// R.i-j.u R.x-y.v | boundaries overlap ERROR + /// + /// Delete special case of replace (text==null): + /// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + /// + /// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before + /// we're not deleting i) + /// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping + /// R.x-y.v I.i.u | i in x-y ERROR + /// R.x-y.v I.x.u R.x-y.uv (combine, delete I) + /// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping + /// + /// I.i.u = insert u before op @ index i + /// R.x-y.u = replace x-y indexed tokens with u + /// + /// First we need to examine replaces. For any replace op: + /// + /// 1. wipe out any insertions before op within that range. + /// 2. Drop any replace op before that is contained completely within + /// that range. + /// 3. Throw exception upon boundary overlap with any previous replace. + /// + /// Then we can deal with inserts: + /// + /// 1. for any inserts to same index, combine even if not adjacent. + /// 2. for any prior replace with same left boundary, combine this + /// insert with replace and delete this replace. + /// 3. throw exception if index in same range as previous replace + /// + /// Don't actually delete; make op null in list. Easier to walk list. + /// Later we can throw as we add to index → op map. + /// + /// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the + /// inserted stuff would be before the replace range. But, if you + /// add tokens in front of a method body '{' and then delete the method + /// body, I think the stuff before the '{' you added should disappear too. + /// + /// Return a map from token index to operation. + /// final func reduceToSingleOperationPerIndex() throws -> Dictionary { let rewritesCount = rewrites.count @@ -361,7 +361,7 @@ public class TokenStreamRewriter { return x + y } - /** Get all operations before an index of a particular kind */ + /// Get all operations before an index of a particular kind final func getKindOfOps(_ rewrites: inout [RewriteOperation?], _ kind: T.Type, _ before: Int ) -> [Int] { @@ -378,16 +378,16 @@ public class TokenStreamRewriter { } - /** Our source stream */ + /// Our source stream internal var tokens: TokenStream - /** You may have multiple, named streams of rewrite operations. - * I'm calling these things "programs." - * Maps String (name) → rewrite (List) - */ + /// You may have multiple, named streams of rewrite operations. + /// I'm calling these things "programs." + /// Maps String (name) → rewrite (List) + /// internal var programs: Dictionary //Array - /** Map String (program name) → Integer index */ + /// Map String (program name) → Integer index internal final var lastRewriteTokenIndexes: Dictionary public init(_ tokens: TokenStream) { @@ -405,10 +405,10 @@ public class TokenStreamRewriter { rollback(DEFAULT_PROGRAM_NAME, instructionIndex) } - /** Rollback the instruction stream for a program so that - * the indicated instruction (via instructionIndex) is no - * longer in the stream. UNTESTED! - */ + /// Rollback the instruction stream for a program so that + /// the indicated instruction (via instructionIndex) is no + /// longer in the stream. UNTESTED! + /// public func rollback(_ programName: String, _ instructionIndex: Int) { if let program = programs[programName] { program.rollback(instructionIndex) @@ -419,7 +419,7 @@ public class TokenStreamRewriter { deleteProgram(DEFAULT_PROGRAM_NAME) } - /** Reset the program so that no instructions exist */ + /// Reset the program so that no instructions exist public func deleteProgram(_ programName: String) { rollback(programName, TokenStreamRewriter.MIN_TOKEN_INDEX) } @@ -551,29 +551,29 @@ public class TokenStreamRewriter { return program } - /** Return the text from the original tokens altered per the - * instructions given to this rewriter. - */ + /// Return the text from the original tokens altered per the + /// instructions given to this rewriter. + /// public func getText() throws -> String { return try getText(DEFAULT_PROGRAM_NAME, Interval.of(0, tokens.size() - 1)) } - /** Return the text from the original tokens altered per the - * instructions given to this rewriter in programName. - */ + /// Return the text from the original tokens altered per the + /// instructions given to this rewriter in programName. + /// public func getText(_ programName: String) throws -> String { return try getText(programName, Interval.of(0, tokens.size() - 1)) } - /** Return the text associated with the tokens in the interval from the - * original token stream but with the alterations given to this rewriter. - * The interval refers to the indexes in the original token stream. - * We do not alter the token stream in any way, so the indexes - * and intervals are still consistent. Includes any operations done - * to the first and last token in the interval. So, if you did an - * insertBefore on the first token, you would get that insertion. - * The same is true if you do an insertAfter the stop token. - */ + /// Return the text associated with the tokens in the interval from the + /// original token stream but with the alterations given to this rewriter. + /// The interval refers to the indexes in the original token stream. + /// We do not alter the token stream in any way, so the indexes + /// and intervals are still consistent. Includes any operations done + /// to the first and last token in the interval. So, if you did an + /// insertBefore on the first token, you would get that insertion. + /// The same is true if you do an insertAfter the stop token. + /// public func getText(_ interval: Interval) throws -> String { return try getText(DEFAULT_PROGRAM_NAME, interval) } diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift new file mode 100644 index 000000000..900a62d32 --- /dev/null +++ b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift @@ -0,0 +1,385 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +import Foundation + + +/** Do not buffer up the entire char stream. It does keep a small buffer + * for efficiency and also buffers while a mark exists (set by the + * lookahead prediction in parser). "Unbuffered" here refers to fact + * that it doesn't buffer all data, not that's it's on demand loading of char. + * + * Before 4.7, this class used the default environment encoding to convert + * bytes to UTF-16, and held the UTF-16 bytes in the buffer as chars. + * + * As of 4.7, the class uses UTF-8 by default, and the buffer holds Unicode + * code points in the buffer as ints. + */ +open class UnbufferedCharStream: CharStream { + /** + * A moving window buffer of the data being scanned. While there's a marker, + * we keep adding to buffer. Otherwise, {@link #consume consume()} resets so + * we start filling at index 0 again. + */ + internal var data: [Int] + + /** + * The number of characters currently in {@link #data data}. + * + *

      This is not the buffer capacity, that's {@code data.length}.

      + */ + internal var n = 0 + + /** + * 0..n-1 index into {@link #data data} of next character. + * + *

      The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are + * out of buffered characters.

      + */ + internal var p = 0 + + /** + * Count up with {@link #mark mark()} and down with + * {@link #release release()}. When we {@code release()} the last mark, + * {@code numMarkers} reaches 0 and we reset the buffer. Copy + * {@code data[p]..data[n-1]} to {@code data[0]..data[(n-1)-p]}. + */ + internal var numMarkers = 0 + + /** + * This is the {@code LA(-1)} character for the current position. + */ + internal var lastChar = -1 + + /** + * When {@code numMarkers > 0}, this is the {@code LA(-1)} character for the + * first character in {@link #data data}. Otherwise, this is unspecified. + */ + internal var lastCharBufferStart = 0 + + /** + * Absolute character index. It's the index of the character about to be + * read via {@code LA(1)}. Goes from 0 to the number of characters in the + * entire stream, although the stream size is unknown before the end is + * reached. + */ + internal var currentCharIndex = 0 + + internal let input: InputStream + private var unicodeIterator: UnicodeScalarStreamIterator + + + /** The name or source of this char stream. */ + public var name: String = "" + + public init(_ input: InputStream, _ bufferSize: Int = 256) { + self.input = input + self.data = [Int](repeating: 0, count: bufferSize) + let si = UInt8StreamIterator(input) + self.unicodeIterator = UnicodeScalarStreamIterator(si) + } + + public func consume() throws { + if try LA(1) == CommonToken.EOF { + throw ANTLRError.illegalState(msg: "cannot consume EOF") + } + + // buf always has at least data[p==0] in this method due to ctor + lastChar = data[p] // track last char for LA(-1) + + if p == n - 1 && numMarkers == 0 { + n = 0 + p = -1 // p++ will leave this at 0 + lastCharBufferStart = lastChar + } + + p += 1 + currentCharIndex += 1 + sync(1) + } + + /** + * Make sure we have 'need' elements from current position {@link #p p}. + * Last valid {@code p} index is {@code data.length-1}. {@code p+need-1} is + * the char index 'need' elements ahead. If we need 1 element, + * {@code (p+1-1)==p} must be less than {@code data.length}. + */ + internal func sync(_ want: Int) { + let need = (p + want - 1) - n + 1 // how many more elements we need? + if need > 0 { + fill(need) + } + } + + /** + * Add {@code n} characters to the buffer. Returns the number of characters + * actually added to the buffer. If the return value is less than {@code n}, + * then EOF was reached before {@code n} characters could be added. + */ + @discardableResult internal func fill(_ toAdd: Int) -> Int { + for i in 0 ..< toAdd { + if n > 0 && data[n - 1] == CommonToken.EOF { + return i + } + + guard let c = nextChar() else { + return i + } + add(c) + } + + return n + } + + /** + * Override to provide different source of characters than + * {@link #input input}. + */ + internal func nextChar() -> Int? { + if let next = unicodeIterator.next() { + return Int(next.value) + } + else if unicodeIterator.hasErrorOccurred { + return nil + } + else { + return nil + } + } + + internal func add(_ c: Int) { + if n >= data.count { + data += [Int](repeating: 0, count: data.count) + } + data[n] = c + n += 1 + } + + public func LA(_ i: Int) throws -> Int { + let result = try LA_(i) + print("LA(\(i)) -> \(result)") + return result + } + + private func LA_(_ i: Int) throws -> Int { + + if i == -1 { + return lastChar // special case + } + sync(i) + let index = p + i - 1 + if index < 0 { + throw ANTLRError.indexOutOfBounds(msg: "") + } + if index >= n { + return CommonToken.EOF + } + return data[index] + } + + /** + * Return a marker that we can release later. + * + *

      The specific marker value used for this class allows for some level of + * protection against misuse where {@code seek()} is called on a mark or + * {@code release()} is called in the wrong order.

      + */ + public func mark() -> Int { + if numMarkers == 0 { + lastCharBufferStart = lastChar + } + + let mark = -numMarkers - 1 + numMarkers += 1 + return mark + } + + /** Decrement number of markers, resetting buffer if we hit 0. + * @param marker + */ + public func release(_ marker: Int) throws { + let expectedMark = -numMarkers + if marker != expectedMark { + preconditionFailure("release() called with an invalid marker.") + } + + numMarkers -= 1 + if numMarkers == 0 && p > 0 { + // release buffer when we can, but don't do unnecessary work + + // Copy data[p]..data[n-1] to data[0]..data[(n-1)-p], reset ptrs + // p is last valid char; move nothing if p==n as we have no valid char + let dataCapacity = data.capacity + data = Array(data[p ..< n]) + data += [Int](repeating: 0, count: dataCapacity - (n - p)) + precondition(data.capacity == dataCapacity) + n = n - p + p = 0 + lastCharBufferStart = lastChar + } + } + + public func index() -> Int { + return currentCharIndex + } + + /** Seek to absolute character index, which might not be in the current + * sliding window. Move {@code p} to {@code index-bufferStartIndex}. + */ + public func seek(_ index_: Int) throws { + var index = index_ + + if index == currentCharIndex { + return + } + + if index > currentCharIndex { + sync(index - currentCharIndex) + index = min(index, getBufferStartIndex() + n - 1) + } + + // index == to bufferStartIndex should set p to 0 + let i = index - getBufferStartIndex() + if i < 0 { + throw ANTLRError.illegalArgument(msg: "cannot seek to negative index \(index)") + } + else if i >= n { + let si = getBufferStartIndex() + let ei = si + n + let msg = "seek to index outside buffer: \(index) not in \(si)..\(ei)" + throw ANTLRError.unsupportedOperation(msg: msg) + } + + p = i + currentCharIndex = index + if p == 0 { + lastChar = lastCharBufferStart + } + else { + lastChar = data[p - 1] + } + } + + public func size() -> Int { + preconditionFailure("Unbuffered stream cannot know its size") + } + + public func getSourceName() -> String { + return name + } + + public func getText(_ interval: Interval) throws -> String { + if interval.a < 0 || interval.b < interval.a - 1 { + throw ANTLRError.illegalArgument(msg: "invalid interval") + } + + let bufferStartIndex = getBufferStartIndex() + if n > 0 && + data[n - 1] == CommonToken.EOF && + interval.a + interval.length() > bufferStartIndex + n { + throw ANTLRError.illegalArgument(msg: "the interval extends past the end of the stream") + } + + if interval.a < bufferStartIndex || interval.b >= bufferStartIndex + n { + let msg = "interval \(interval) outside buffer: \(bufferStartIndex)...\(bufferStartIndex + n - 1)" + throw ANTLRError.unsupportedOperation(msg: msg) + } + + if interval.b < interval.a { + // The EOF token. + return "" + } + + // convert from absolute to local index + let i = interval.a - bufferStartIndex + let j = interval.b - bufferStartIndex + + // Convert from Int codepoints to a String. + let codepoints = data[i ... j].map { Character(Unicode.Scalar($0)!) } + return String(codepoints) + } + + internal func getBufferStartIndex() -> Int { + return currentCharIndex - p + } +} + + +fileprivate struct UInt8StreamIterator: IteratorProtocol { + private static let bufferSize = 1024 + + private let stream: InputStream + private var buffer = [UInt8](repeating: 0, count: UInt8StreamIterator.bufferSize) + private var buffGen: IndexingIterator> + + var hasErrorOccurred = false + + + init(_ stream: InputStream) { + self.stream = stream + self.buffGen = buffer[0..<0].makeIterator() + } + + mutating func next() -> UInt8? { + if let result = buffGen.next() { + return result + } + + if hasErrorOccurred { + return nil + } + + switch stream.streamStatus { + case .notOpen, .writing, .closed: + preconditionFailure() + case .atEnd: + return nil + case .error: + hasErrorOccurred = true + return nil + case .opening, .open, .reading: + break + } + + let count = stream.read(&buffer, maxLength: buffer.capacity) + if count <= 0 { + hasErrorOccurred = true + return nil + } + + buffGen = buffer.prefix(count).makeIterator() + return buffGen.next() + } +} + + +fileprivate struct UnicodeScalarStreamIterator: IteratorProtocol { + private var streamIterator: UInt8StreamIterator + private var codec = Unicode.UTF8() + + var hasErrorOccurred = false + + init(_ streamIterator: UInt8StreamIterator) { + self.streamIterator = streamIterator + } + + mutating func next() -> Unicode.Scalar? { + if streamIterator.hasErrorOccurred { + hasErrorOccurred = true + return nil + } + + switch codec.decode(&streamIterator) { + case .scalarValue(let scalar): + return scalar + case .emptyInput: + return nil + case .error: + hasErrorOccurred = true + return nil + } + } +} diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift index 75b7008b2..da1c8476a 100644 --- a/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift +++ b/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift @@ -4,58 +4,58 @@ */ -public class UnbufferedTokenStream: TokenStream { +public class UnbufferedTokenStream: TokenStream { internal var tokenSource: TokenSource - /** - * A moving window buffer of the data being scanned. While there's a marker, - * we keep adding to buffer. Otherwise, {@link #consume consume()} resets so - * we start filling at index 0 again. - */ + /// + /// A moving window buffer of the data being scanned. While there's a marker, + /// we keep adding to buffer. Otherwise, _#consume consume()_ resets so + /// we start filling at index 0 again. + /// internal var tokens: [Token] - /** - * The number of tokens currently in {@link #tokens tokens}. - * - *

      This is not the buffer capacity, that's {@code tokens.length}.

      - */ + /// + /// The number of tokens currently in _#tokens tokens_. + /// + /// This is not the buffer capacity, that's `tokens.length`. + /// internal var n: Int - /** - * 0..n-1 index into {@link #tokens tokens} of next token. - * - *

      The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are - * out of buffered tokens.

      - */ + /// + /// 0..n-1 index into _#tokens tokens_ of next token. + /// + /// The `LT(1)` token is `tokens[p]`. If `p == n`, we are + /// out of buffered tokens. + /// internal var p: Int = 0 - /** - * Count up with {@link #mark mark()} and down with - * {@link #release release()}. When we {@code release()} the last mark, - * {@code numMarkers} reaches 0 and we reset the buffer. Copy - * {@code tokens[p]..tokens[n-1]} to {@code tokens[0]..tokens[(n-1)-p]}. - */ + /// + /// Count up with _#mark mark()_ and down with + /// _#release release()_. When we `release()` the last mark, + /// `numMarkers` reaches 0 and we reset the buffer. Copy + /// `tokens[p]..tokens[n-1]` to `tokens[0]..tokens[(n-1)-p]`. + /// internal var numMarkers: Int = 0 - /** - * This is the {@code LT(-1)} token for the current position. - */ + /// + /// This is the `LT(-1)` token for the current position. + /// internal var lastToken: Token! - /** - * When {@code numMarkers > 0}, this is the {@code LT(-1)} token for the - * first token in {@link #tokens}. Otherwise, this is {@code null}. - */ + /// + /// When `numMarkers > 0`, this is the `LT(-1)` token for the + /// first token in _#tokens_. Otherwise, this is `null`. + /// internal var lastTokenBufferStart: Token! - /** - * Absolute token index. It's the index of the token about to be read via - * {@code LT(1)}. Goes from 0 to the number of tokens in the entire stream, - * although the stream size is unknown before the end is reached. - * - *

      This value is used to set the token indexes if the stream provides tokens - * that implement {@link org.antlr.v4.runtime.WritableToken}.

      - */ + /// + /// Absolute token index. It's the index of the token about to be read via + /// `LT(1)`. Goes from 0 to the number of tokens in the entire stream, + /// although the stream size is unknown before the end is reached. + /// + /// This value is used to set the token indexes if the stream provides tokens + /// that implement _org.antlr.v4.runtime.WritableToken_. + /// internal var currentTokenIndex: Int = 0 public convenience init(_ tokenSource: TokenSource) throws { @@ -148,10 +148,10 @@ public class UnbufferedTokenStream: TokenStream { try sync(1) } - /** Make sure we have 'need' elements from current position {@link #p p}. Last valid - * {@code p} index is {@code tokens.length-1}. {@code p+need-1} is the tokens index 'need' elements - * ahead. If we need 1 element, {@code (p+1-1)==p} must be less than {@code tokens.length}. - */ + /// Make sure we have 'need' elements from current position _#p p_. Last valid + /// `p` index is `tokens.length-1`. `p+need-1` is the tokens index 'need' elements + /// ahead. If we need 1 element, `(p+1-1)==p` must be less than `tokens.length`. + /// internal func sync(_ want: Int) throws { let need: Int = (p + want - 1) - n + 1 // how many more elements we need? if need > 0 { @@ -159,11 +159,11 @@ public class UnbufferedTokenStream: TokenStream { } } - /** - * Add {@code n} elements to the buffer. Returns the number of tokens - * actually added to the buffer. If the return value is less than {@code n}, - * then EOF was reached before {@code n} tokens could be added. - */ + /// + /// Add `n` elements to the buffer. Returns the number of tokens + /// actually added to the buffer. If the return value is less than `n`, + /// then EOF was reached before `n` tokens could be added. + /// @discardableResult internal func fill(_ n: Int) throws -> Int { for i in 0..: TokenStream { n += 1 } - /** - * Return a marker that we can release later. - * - *

      The specific marker value used for this class allows for some level of - * protection against misuse where {@code seek()} is called on a mark or - * {@code release()} is called in the wrong order.

      - */ + /// + /// Return a marker that we can release later. + /// + /// The specific marker value used for this class allows for some level of + /// protection against misuse where `seek()` is called on a mark or + /// `release()` is called in the wrong order. + /// public func mark() -> Int { if numMarkers == 0 { @@ -274,10 +274,7 @@ public class UnbufferedTokenStream: TokenStream { public func size() -> Int { - - RuntimeException("Unbuffered stream cannot know its size") - fatalError() - + fatalError("Unbuffered stream cannot know its size") } diff --git a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift index 49abcb0ee..5b63ff4d4 100644 --- a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift +++ b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift @@ -4,24 +4,24 @@ */ -/** -* This class provides a default implementation of the {@link org.antlr.v4.runtime.Vocabulary} -* interface. -* -* @author Sam Harwell -*/ +/// +/// This class provides a default implementation of the _org.antlr.v4.runtime.Vocabulary_ +/// interface. +/// +/// - Author: Sam Harwell +/// public class Vocabulary: Hashable { private static let EMPTY_NAMES: [String?] = [String?](repeating: "", count: 1) - /** - * Gets an empty {@link org.antlr.v4.runtime.Vocabulary} instance. - * - *

      - * No literal or symbol names are assigned to token types, so - * {@link #getDisplayName(int)} returns the numeric value for all tokens - * except {@link org.antlr.v4.runtime.Token#EOF}.

      - */ + /// + /// Gets an empty _org.antlr.v4.runtime.Vocabulary_ instance. + /// + /// + /// No literal or symbol names are assigned to token types, so + /// _#getDisplayName(int)_ returns the numeric value for all tokens + /// except _org.antlr.v4.runtime.Token#EOF_. + /// public static let EMPTY_VOCABULARY: Vocabulary = Vocabulary(EMPTY_NAMES, EMPTY_NAMES, EMPTY_NAMES) @@ -31,59 +31,59 @@ public class Vocabulary: Hashable { private final var displayNames: [String?] - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.Vocabulary} from the specified - * literal and symbolic token names. - * - * @param literalNames The literal names assigned to tokens, or {@code null} - * if no literal names are assigned. - * @param symbolicNames The symbolic names assigned to tokens, or - * {@code null} if no symbolic names are assigned. - * - * @see #getLiteralName(int) - * @see #getSymbolicName(int) - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.Vocabulary_ from the specified + /// literal and symbolic token names. + /// + /// - SeeAlso: #getLiteralName(int) + /// - SeeAlso: #getSymbolicName(int) + /// - Parameter literalNames: The literal names assigned to tokens, or `null` + /// if no literal names are assigned. + /// - Parameter symbolicNames: The symbolic names assigned to tokens, or + /// `null` if no symbolic names are assigned. + /// + /// public convenience init(_ literalNames: [String?], _ symbolicNames: [String?]) { self.init(literalNames, symbolicNames, nil) } - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.Vocabulary} from the specified - * literal, symbolic, and display token names. - * - * @param literalNames The literal names assigned to tokens, or {@code null} - * if no literal names are assigned. - * @param symbolicNames The symbolic names assigned to tokens, or - * {@code null} if no symbolic names are assigned. - * @param displayNames The display names assigned to tokens, or {@code null} - * to use the values in {@code literalNames} and {@code symbolicNames} as - * the source of display names, as described in - * {@link #getDisplayName(int)}. - * - * @see #getLiteralName(int) - * @see #getSymbolicName(int) - * @see #getDisplayName(int) - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.Vocabulary_ from the specified + /// literal, symbolic, and display token names. + /// + /// - SeeAlso: #getLiteralName(int) + /// - SeeAlso: #getSymbolicName(int) + /// - SeeAlso: #getDisplayName(int) + /// - Parameter literalNames: The literal names assigned to tokens, or `null` + /// if no literal names are assigned. + /// - Parameter symbolicNames: The symbolic names assigned to tokens, or + /// `null` if no symbolic names are assigned. + /// - Parameter displayNames: The display names assigned to tokens, or `null` + /// to use the values in `literalNames` and `symbolicNames` as + /// the source of display names, as described in + /// _#getDisplayName(int)_. + /// + /// public init(_ literalNames: [String?]?, _ symbolicNames: [String?]?, _ displayNames: [String?]?) { self.literalNames = literalNames != nil ? literalNames! : Vocabulary.EMPTY_NAMES self.symbolicNames = symbolicNames != nil ? symbolicNames! : Vocabulary.EMPTY_NAMES self.displayNames = displayNames != nil ? displayNames! : Vocabulary.EMPTY_NAMES } - /** - * Returns a {@link org.antlr.v4.runtime.Vocabulary} instance from the specified set of token - * names. This method acts as a compatibility layer for the single - * {@code tokenNames} array generated by previous releases of ANTLR. - * - *

      The resulting vocabulary instance returns {@code null} for - * {@link #getLiteralName(int)} and {@link #getSymbolicName(int)}, and the - * value from {@code tokenNames} for the display names.

      - * - * @param tokenNames The token names, or {@code null} if no token names are - * available. - * @return A {@link org.antlr.v4.runtime.Vocabulary} instance which uses {@code tokenNames} for - * the display names of tokens. - */ + /// + /// Returns a _org.antlr.v4.runtime.Vocabulary_ instance from the specified set of token + /// names. This method acts as a compatibility layer for the single + /// `tokenNames` array generated by previous releases of ANTLR. + /// + /// The resulting vocabulary instance returns `null` for + /// _#getLiteralName(int)_ and _#getSymbolicName(int)_, and the + /// value from `tokenNames` for the display names. + /// + /// - Parameter tokenNames: The token names, or `null` if no token names are + /// available. + /// - Returns: A _org.antlr.v4.runtime.Vocabulary_ instance which uses `tokenNames` for + /// the display names of tokens. + /// public static func fromTokenNames(_ tokenNames: [String?]?) -> Vocabulary { guard let tokenNames = tokenNames , tokenNames.count > 0 else { return EMPTY_VOCABULARY diff --git a/runtime/Swift/Sources/Antlr4/atn/ATN.swift b/runtime/Swift/Sources/Antlr4/atn/ATN.swift index e67343fb6..bdfc938cd 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATN.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATN.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class ATN { @@ -9,67 +11,87 @@ public class ATN { public final var states: Array = Array() + /// /// Each subrule/rule is a decision point and we must track them so we /// can go back later and build DFA predictors for them. This includes /// all the rules, subrules, optional blocks, ()+, ()* etc... + /// public final var decisionToState: Array = Array() + /// /// Maps from rule index to starting state number. + /// public final var ruleToStartState: [RuleStartState]! + /// /// Maps from rule index to stop state number. + /// public final var ruleToStopState: [RuleStopState]! public final let modeNameToStartState: Dictionary = Dictionary() //LinkedHashMap(); + /// /// The type of the ATN. + /// public let grammarType: ATNType! + /// /// The maximum value for any symbol recognized by a transition in the ATN. + /// public let maxTokenType: Int + /// /// For lexer ATNs, this maps the rule index to the resulting token type. /// For parser ATNs, this maps the rule index to the generated bypass token /// type if the - /// {@link org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions} - /// deserialization option was specified; otherwise, this is {@code null}. + /// _org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions_ + /// deserialization option was specified; otherwise, this is `null`. + /// public final var ruleToTokenType: [Int]! - /// For lexer ATNs, this is an array of {@link org.antlr.v4.runtime.atn.LexerAction} objects which may + /// + /// For lexer ATNs, this is an array of _org.antlr.v4.runtime.atn.LexerAction_ objects which may /// be referenced by action transitions in the ATN. + /// public final var lexerActions: [LexerAction]! public final var modeToStartState: Array = Array() + /// /// Used for runtime deserialization of ATNs from strings + /// public init(_ grammarType: ATNType, _ maxTokenType: Int) { self.grammarType = grammarType self.maxTokenType = maxTokenType } - /// Compute the set of valid tokens that can occur starting in state {@code s}. - /// If {@code ctx} is null, the set of tokens will not include what can follow - /// the rule surrounding {@code s}. In other words, the set will be - /// restricted to tokens reachable staying within {@code s}'s rule. - public func nextTokens(_ s: ATNState, _ ctx: RuleContext?)throws -> IntervalSet { - let anal: LL1Analyzer = LL1Analyzer(self) - let next: IntervalSet = try anal.LOOK(s, ctx) + /// + /// Compute the set of valid tokens that can occur starting in state `s`. + /// If `ctx` is null, the set of tokens will not include what can follow + /// the rule surrounding `s`. In other words, the set will be + /// restricted to tokens reachable staying within `s`'s rule. + /// + public func nextTokens(_ s: ATNState, _ ctx: RuleContext?) -> IntervalSet { + let anal = LL1Analyzer(self) + let next = anal.LOOK(s, ctx) return next } - /// Compute the set of valid tokens that can occur starting in {@code s} and - /// staying in same rule. {@link org.antlr.v4.runtime.Token#EPSILON} is in set if we reach end of + /// + /// Compute the set of valid tokens that can occur starting in `s` and + /// staying in same rule. _org.antlr.v4.runtime.Token#EPSILON_ is in set if we reach end of /// rule. - public func nextTokens(_ s: ATNState) throws -> IntervalSet { + /// + public func nextTokens(_ s: ATNState) -> IntervalSet { if let nextTokenWithinRule = s.nextTokenWithinRule { return nextTokenWithinRule } - let intervalSet = try nextTokens(s, nil) + let intervalSet = nextTokens(s, nil) s.nextTokenWithinRule = intervalSet - try intervalSet.setReadonly(true) + try! intervalSet.setReadonly(true) return intervalSet } @@ -104,52 +126,52 @@ public class ATN { return decisionToState.count } + /// /// Computes the set of input symbols which could follow ATN state number - /// {@code stateNumber} in the specified full {@code context}. This method + /// `stateNumber` in the specified full `context`. This method /// considers the complete parser context, but does not evaluate semantic /// predicates (i.e. all predicates encountered during the calculation are /// assumed true). If a path in the ATN exists from the starting state to the - /// {@link org.antlr.v4.runtime.atn.RuleStopState} of the outermost context without matching any - /// symbols, {@link org.antlr.v4.runtime.Token#EOF} is added to the returned set. - /// - ///

      If {@code context} is {@code null}, it is treated as - /// {@link org.antlr.v4.runtime.ParserRuleContext#EMPTY}.

      - /// + /// _org.antlr.v4.runtime.atn.RuleStopState_ of the outermost context without matching any + /// symbols, _org.antlr.v4.runtime.Token#EOF_ is added to the returned set. + /// + /// If `context` is `null`, it is treated as + /// _org.antlr.v4.runtime.ParserRuleContext#EMPTY_. + /// /// - parameter stateNumber: the ATN state number /// - parameter context: the full parse context /// - returns: The set of potentially valid input symbols which could follow the /// specified state in the specified context. - /// - IllegalArgumentException if the ATN does not contain a state with - /// number {@code stateNumber} + /// - throws: _ANTLRError.illegalArgument_ if the ATN does not contain a state with + /// number `stateNumber` + /// public func getExpectedTokens(_ stateNumber: Int, _ context: RuleContext) throws -> IntervalSet { if stateNumber < 0 || stateNumber >= states.count { throw ANTLRError.illegalArgument(msg: "Invalid state number.") - /// throw IllegalArgumentException("Invalid state number."); } var ctx: RuleContext? = context - //TODO: s may be nil - let s: ATNState = states[stateNumber]! - var following: IntervalSet = try nextTokens(s) + let s = states[stateNumber]! + var following = nextTokens(s) if !following.contains(CommonToken.EPSILON) { return following } - let expected: IntervalSet = try IntervalSet() - try expected.addAll(following) - try expected.remove(CommonToken.EPSILON) + let expected = IntervalSet() + try! expected.addAll(following) + try! expected.remove(CommonToken.EPSILON) - while let ctxWrap = ctx , ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) { - let invokingState: ATNState = states[ctxWrap.invokingState]! - let rt: RuleTransition = invokingState.transition(0) as! RuleTransition - following = try nextTokens(rt.followState) - try expected.addAll(following) - try expected.remove(CommonToken.EPSILON) + while let ctxWrap = ctx, ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) { + let invokingState = states[ctxWrap.invokingState]! + let rt = invokingState.transition(0) as! RuleTransition + following = nextTokens(rt.followState) + try! expected.addAll(following) + try! expected.remove(CommonToken.EPSILON) ctx = ctxWrap.parent } if following.contains(CommonToken.EPSILON) { - try expected.add(CommonToken.EOF) + try! expected.add(CommonToken.EOF) } return expected diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift index 9ee49ecd1..f91854531 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift @@ -1,53 +1,67 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A tuple: (ATN state, predicted alt, syntactic, semantic context). /// The syntactic context is a graph-structured stack node whose /// path(s) to the root is the rule invocation(s) /// chain used to arrive at the state. The semantic context is /// the tree of semantic predicates encountered before reaching /// an ATN state. +/// public class ATNConfig: Hashable, CustomStringConvertible { + /// /// This field stores the bit mask for implementing the - /// {@link #isPrecedenceFilterSuppressed} property as a bit within the - /// existing {@link #reachesIntoOuterContext} field. + /// _#isPrecedenceFilterSuppressed_ property as a bit within the + /// existing _#reachesIntoOuterContext_ field. + /// private final let SUPPRESS_PRECEDENCE_FILTER: Int = 0x40000000 + /// /// The ATN state associated with this configuration + /// public final let state: ATNState + /// /// What alt (or lexer rule) is predicted by this configuration + /// public final let alt: Int + /// /// The stack of invoking states leading to the rule/states associated /// with this config. We track only those contexts pushed during /// execution of the ATN simulator. + /// public final var context: PredictionContext? + /// /// We cannot execute predicates dependent upon local context unless /// we know for sure we are in the correct context. Because there is /// no way to do this efficiently, we simply cannot evaluate /// dependent predicates unless we are in the rule that initially /// invokes the ATN simulator. - /// - ///

      + /// + /// /// closure() tracks the depth of how far we dip into the outer context: /// depth > 0. Note that it may not be totally accurate depth since I - /// don't ever decrement. TODO: make it a boolean then

      - /// - ///

      - /// For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method + /// don't ever decrement. TODO: make it a boolean then + /// + /// + /// For memory efficiency, the _#isPrecedenceFilterSuppressed_ method /// is also backed by this field. Since the field is publicly accessible, the /// highest bit which would not cause the value to become negative is used to /// store this field. This choice minimizes the risk that code which only /// compares this value to 0 would be affected by the new purpose of the - /// flag. It also ensures the performance of the existing {@link org.antlr.v4.runtime.atn.ATNConfig} + /// flag. It also ensures the performance of the existing _org.antlr.v4.runtime.atn.ATNConfig_ /// constructors as well as certain operations like - /// {@link org.antlr.v4.runtime.atn.ATNConfigSet#add(org.antlr.v4.runtime.atn.ATNConfig, DoubleKeyMap)} method are - /// completely unaffected by the change.

      + /// _org.antlr.v4.runtime.atn.ATNConfigSet#add(org.antlr.v4.runtime.atn.ATNConfig, DoubleKeyMap)_ method are + /// __completely__ unaffected by the change. + /// public final var reachesIntoOuterContext: Int = 0 //=0 intital by janyou @@ -108,9 +122,11 @@ public class ATNConfig: Hashable, CustomStringConvertible { self.reachesIntoOuterContext = c.reachesIntoOuterContext } - /// This method gets the value of the {@link #reachesIntoOuterContext} field + /// + /// This method gets the value of the _#reachesIntoOuterContext_ field /// as it existed prior to the introduction of the - /// {@link #isPrecedenceFilterSuppressed} method. + /// _#isPrecedenceFilterSuppressed_ method. + /// public final func getOuterContextDepth() -> Int { return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER } @@ -127,18 +143,19 @@ public class ATNConfig: Hashable, CustomStringConvertible { } } + /// /// An ATN configuration is equal to another if both have /// the same state, they predict the same alternative, and /// syntactic/semantic contexts are the same. + /// public var hashValue: Int { - var hashCode: Int = MurmurHash.initialize(7) + var hashCode = MurmurHash.initialize(7) hashCode = MurmurHash.update(hashCode, state.stateNumber) hashCode = MurmurHash.update(hashCode, alt) hashCode = MurmurHash.update(hashCode, context) hashCode = MurmurHash.update(hashCode, semanticContext) - hashCode = MurmurHash.finish(hashCode, 4) - return hashCode + return MurmurHash.finish(hashCode, 4) } @@ -149,26 +166,21 @@ public class ATNConfig: Hashable, CustomStringConvertible { //return "MyClass \(string)" return toString(nil, true) } - public func toString(_ recog: Recognizer?, _ showAlt: Bool) -> String { + public func toString(_ recog: Recognizer?, _ showAlt: Bool) -> String { let buf: StringBuilder = StringBuilder() -// if ( state.ruleIndex>=0 ) { -// if ( recog!=null ) buf.append(recog.getRuleNames()[state.ruleIndex]+":"); -// else buf.append(state.ruleIndex+":"); -// } buf.append("(") buf.append(state) if showAlt { buf.append(",") buf.append(alt) } - //TODO: context can be nil ? + if context != nil { buf.append(",[") buf.append(context!) buf.append("]") } - //TODO: semanticContext can be nil ? - //if ( semanticContext != nil && semanticContext != SemanticContext.NONE ) { + if semanticContext != SemanticContext.NONE { buf.append(",") buf.append(semanticContext) @@ -186,10 +198,7 @@ public func ==(lhs: ATNConfig, rhs: ATNConfig) -> Bool { if lhs === rhs { return true } - //TODO : rhs nil? - /// else { if (other == nil) { - /// return false; - /// } + if (lhs is LexerATNConfig) && (rhs is LexerATNConfig) { return (lhs as! LexerATNConfig) == (rhs as! LexerATNConfig) diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift index 91643513f..3c52bcd14 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift @@ -1,58 +1,72 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Specialized {@link java.util.Set}{@code <}{@link org.antlr.v4.runtime.atn.ATNConfig}{@code >} that can track +/// +/// Specialized _java.util.Set_`<`_org.antlr.v4.runtime.atn.ATNConfig_`>` that can track /// info about the set, with support for combining similar configurations using a /// graph-structured stack. -//: Set - +/// public class ATNConfigSet: Hashable, CustomStringConvertible { + /// /// The reason that we need this is because we don't want the hash map to use /// the standard hash code and equals. We need all configurations with the same - /// {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles + /// `(s,i,_,semctx)` to be equal. Unfortunately, this key effectively doubles /// the number of objects associated with ATNConfigs. The other solution is to /// use a hash table that lets us specify the equals/hashcode operation. + /// - /// Indicates that the set of configurations is read-only. Do not + /// + /// Indicates that the set of configurations is read-only. Do not /// allow any code to manipulate the set; DFA states will point at /// the sets and they must not change. This does not protect the other /// fields; in particular, conflictingAlts is set after /// we've made this readonly. - internal final var readonly: Bool = false + /// + internal final var readonly = false + /// /// All configs but hashed by (s, i, _, pi) not including context. Wiped out /// when we go readonly as this set becomes a DFA state. + /// public final var configLookup: LookupDictionary + /// /// Track the elements as they are added to the set; supports get(i) - public final var configs: Array = Array() + /// + public final var configs = [ATNConfig]() // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation // TODO: can we track conflicts as they are added to save scanning configs later? - public final var uniqueAlt: Int = 0 + public final var uniqueAlt = 0 //TODO no default + /// /// Currently this is only used when we detect SLL conflict; this does /// not necessarily represent the ambiguous alternatives. In fact, /// I should also point out that this seems to include predicated alternatives /// that have predicates that evaluate to false. Computed in computeTargetState(). + /// internal final var conflictingAlts: BitSet? // Used in parser and lexer. In lexer, it indicates we hit a pred // while computing a closure operation. Don't make a DFA state from this. - public final var hasSemanticContext: Bool = false + public final var hasSemanticContext = false //TODO no default - public final var dipsIntoOuterContext: Bool = false + public final var dipsIntoOuterContext = false //TODO no default + /// /// Indicates that this configuration set is part of a full context /// LL prediction. It will be used to determine how to merge $. With SLL /// it's a wildcard whereas it is not for LL context merge. + /// public final var fullCtx: Bool - private var cachedHashCode: Int = -1 + private var cachedHashCode = -1 public init(_ fullCtx: Bool) { configLookup = LookupDictionary() @@ -62,9 +76,9 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { self.init(true) } - public convenience init(_ old: ATNConfigSet) throws { + public convenience init(_ old: ATNConfigSet) { self.init(old.fullCtx) - try addAll(old) + try! addAll(old) self.uniqueAlt = old.uniqueAlt self.conflictingAlts = old.conflictingAlts self.hasSemanticContext = old.hasSemanticContext @@ -78,21 +92,22 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { return try add(config, &mergeCache) } + /// /// Adding a new config means merging contexts with existing configs for - /// {@code (s, i, pi, _)}, where {@code s} is the - /// {@link org.antlr.v4.runtime.atn.ATNConfig#state}, {@code i} is the {@link org.antlr.v4.runtime.atn.ATNConfig#alt}, and - /// {@code pi} is the {@link org.antlr.v4.runtime.atn.ATNConfig#semanticContext}. We use - /// {@code (s,i,pi)} as key. - /// - ///

      This method updates {@link #dipsIntoOuterContext} and - /// {@link #hasSemanticContext} when necessary.

      + /// `(s, i, pi, _)`, where `s` is the + /// _org.antlr.v4.runtime.atn.ATNConfig#state_, `i` is the _org.antlr.v4.runtime.atn.ATNConfig#alt_, and + /// `pi` is the _org.antlr.v4.runtime.atn.ATNConfig#semanticContext_. We use + /// `(s,i,pi)` as key. + /// + /// This method updates _#dipsIntoOuterContext_ and + /// _#hasSemanticContext_ when necessary. + /// @discardableResult public final func add( _ config: ATNConfig, _ mergeCache: inout DoubleKeyMap?) throws -> Bool { if readonly { throw ANTLRError.illegalState(msg: "This set is readonly") - } if config.semanticContext != SemanticContext.NONE { @@ -109,10 +124,9 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { return true } // a previous (s,i,pi,_), merge with it and save result - let rootIsWildcard: Bool = !fullCtx + let rootIsWildcard = !fullCtx - let merged: PredictionContext = - PredictionContext.merge(existing.context!, config.context!, rootIsWildcard, &mergeCache) + let merged = PredictionContext.merge(existing.context!, config.context!, rootIsWildcard, &mergeCache) // no need to check for existing.context, config.context in cache // since only way to create new graphs is "call rule" and here. We @@ -135,43 +149,42 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { } + /// /// Return a List holding list of configs - public final func elements() -> Array { + /// + public final func elements() -> [ATNConfig] { return configs } public final func getStates() -> Set { - - let length = configs.count - var states: Set = Set(minimumCapacity: length) - for i in 0..(minimumCapacity: configs.count) + for config in configs { + states.insert(config.state) } return states } + /// /// Gets the complete set of represented alternatives for the configuration /// set. - /// + /// /// - returns: the set of represented alternatives in this configuration set - /// - /// - 4.3 - - public final func getAlts() throws -> BitSet { - let alts: BitSet = BitSet() - let length = configs.count - for i in 0.. BitSet { + let alts = BitSet() + for config in configs { + try! alts.set(config.alt) } return alts } - public final func getPredicates() -> Array { - var preds: Array = Array() - let length = configs.count - for i in 0.. [SemanticContext] { + var preds = [SemanticContext]() + for config in configs { + if config.semanticContext != SemanticContext.NONE { + preds.append(config.semanticContext) } } return preds @@ -184,22 +197,20 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { public final func optimizeConfigs(_ interpreter: ATNSimulator) throws { if readonly { throw ANTLRError.illegalState(msg: "This set is readonly") - } if configLookup.isEmpty { return } - let length = configs.count - for i in 0.. Bool { - for c: ATNConfig in coll.configs { - try add(c) + for c in coll.configs { + try add(c) } return false } @@ -219,12 +230,9 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { private var configsHashValue: Int { var hashCode = 1 for item in configs { - hashCode = Int.multiplyWithOverflow(3, hashCode).0 - hashCode = Int.addWithOverflow(hashCode, item.hashValue).0 - + hashCode = hashCode &* 3 &+ item.hashValue } return hashCode - } public final var count: Int { @@ -250,7 +258,6 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { public final func clear() throws { if readonly { throw ANTLRError.illegalState(msg: "This set is readonly") - } configs.removeAll() cachedHashCode = -1 @@ -268,7 +275,7 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { } public var description: String { - let buf: StringBuilder = StringBuilder() + let buf = StringBuilder() buf.append(elements().map({ $0.description })) if hasSemanticContext { buf.append(",hasSemanticContext=") @@ -291,27 +298,21 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { return description } - // satisfy interface - // public func toArray() -> [ATNConfig] { - // return Array( configLookup.map{$0.config}) ; - // } - + /// /// override /// public func toArray(a : [T]) -> [T] { /// return configLookup.toArray(a); + /// private final func configHash(_ stateNumber: Int,_ context: PredictionContext?) -> Int{ - - var hashCode: Int = MurmurHash.initialize(7) + var hashCode = MurmurHash.initialize(7) hashCode = MurmurHash.update(hashCode, stateNumber) hashCode = MurmurHash.update(hashCode, context) - hashCode = MurmurHash.finish(hashCode, 2) - - return hashCode - + return MurmurHash.finish(hashCode, 2) } - public final func getConflictingAltSubsets() throws -> Array { + + public final func getConflictingAltSubsets() -> [BitSet] { let length = configs.count - let configToAlts: HashMap = HashMap(count: length) + let configToAlts = HashMap(count: length) for i in 0.. HashMap { + + public final func getStateToAltMap() -> HashMap { let length = configs.count - let m: HashMap = HashMap(count: length) //minimumCapacity: length) + let m = HashMap(count: length) for i in 0.. Set? { - var alts: Set = Set() - let length = configs.count - for i in 0..() + for config in configs { + alts.insert(config.alt) + } return alts } //for DiagnosticErrorListener - public final func getAltBitSet() throws -> BitSet { - let result: BitSet = BitSet() - let length = configs.count - for i in 0.. BitSet { + let result = BitSet() + for config in configs { + try! result.set(config.alt) } - return result } //LexerATNSimulator - public final var firstConfigWithRuleStopState: ATNConfig? { - let length = configs.count - for i in 0.. Int { - var alt: Int = ATN.INVALID_ALT_NUMBER - let length = configs.count - for i in 0.. Int { + var alt = ATN.INVALID_ALT_NUMBER + for config in configs { if alt == ATN.INVALID_ALT_NUMBER { - alt = configs[i].alt // found first alt - } else { - if configs[i].alt != alt { - return ATN.INVALID_ALT_NUMBER - } + alt = config.alt // found first alt + } else if config.alt != alt { + return ATN.INVALID_ALT_NUMBER } } return alt } - public final func removeAllConfigsNotInRuleStopState(_ mergeCache: inout DoubleKeyMap?,_ lookToEndOfRule: Bool,_ atn: ATN) throws -> ATNConfigSet { + + public final func removeAllConfigsNotInRuleStopState(_ mergeCache: inout DoubleKeyMap?,_ lookToEndOfRule: Bool,_ atn: ATN) -> ATNConfigSet { if PredictionMode.allConfigsInRuleStopStates(self) { return self } - let result: ATNConfigSet = ATNConfigSet(fullCtx) - let length = configs.count - for i in 0..?,_ parser: Parser,_ _outerContext: ParserRuleContext!) throws -> ATNConfigSet { - let configSet: ATNConfigSet = ATNConfigSet(fullCtx) - let length = configs.count - let statesFromAlt1: HashMap = HashMap(count: length) - for i in 0..(count: configs.count) + for config in configs { // handle alt 1 first - if configs[i].alt != 1 { + if config.alt != 1 { continue } - let updatedContext: SemanticContext? = try configs[i].semanticContext.evalPrecedence(parser, _outerContext) + let updatedContext = try config.semanticContext.evalPrecedence(parser, _outerContext) if updatedContext == nil { // the configuration was eliminated continue } - statesFromAlt1[configs[i].state.stateNumber] = configs[i].context - if updatedContext != configs[i].semanticContext { - try configSet.add(ATNConfig(configs[i], updatedContext!), &mergeCache) + statesFromAlt1[config.state.stateNumber] = config.context + if updatedContext != config.semanticContext { + try! configSet.add(ATNConfig(config, updatedContext!), &mergeCache) } else { - try configSet.add(configs[i],&mergeCache) + try! configSet.add(config, &mergeCache) } } - for i in 0..1 /// (basically a graph subtraction algorithm). - let context: PredictionContext? = statesFromAlt1[configs[i].state.stateNumber] - if context != nil && context == configs[i].context { + /// + let context = statesFromAlt1[config.state.stateNumber] + if context != nil && context == config.context { // eliminated continue } } - try configSet.add(configs[i], &mergeCache) + try! configSet.add(config, &mergeCache) } return configSet } - internal func getPredsForAmbigAlts(_ ambigAlts: BitSet, - _ nalts: Int) throws -> [SemanticContext?]? { - var altToPred: [SemanticContext?]? = [SemanticContext?](repeating: nil, count: nalts + 1) //new SemanticContext[nalts + 1]; - let length = configs.count - for i in 0.. [SemanticContext?]? { + var altToPred = [SemanticContext?](repeating: nil, count: nalts + 1) + for config in configs { + if try! ambigAlts.get(config.alt) { + altToPred[config.alt] = SemanticContext.or(altToPred[config.alt], config.semanticContext) } - var nPredAlts: Int = 0 - for i in 1...nalts { - if altToPred![i] == nil { - altToPred![i] = SemanticContext.NONE - } else { - if altToPred![i] != SemanticContext.NONE { - nPredAlts += 1 - } - } + } + var nPredAlts = 0 + for i in 1...nalts { + if altToPred[i] == nil { + altToPred[i] = SemanticContext.NONE } - - // // Optimize away p||p and p&&p TODO: optimize() was a no-op - // for (int i = 0; i < altToPred.length; i++) { - // altToPred[i] = altToPred[i].optimize(); - // } - - // nonambig alts are null in altToPred - if nPredAlts == 0 { - altToPred = nil + else if altToPred[i] != SemanticContext.NONE { + nPredAlts += 1 } + } - return altToPred + // // Optimize away p||p and p&&p TODO: optimize() was a no-op + // for (int i = 0; i < altToPred.length; i++) { + // altToPred[i] = altToPred[i].optimize(); + // } + // nonambig alts are null in altToPred + return (nPredAlts == 0 ? nil : altToPred) } - public final func getAltThatFinishedDecisionEntryRule() throws -> Int { - let alts: IntervalSet = try IntervalSet() - let length = configs.count - for i in 0.. 0 || - (configs[i].state is RuleStopState && - configs[i].context!.hasEmptyPath()) { - try alts.add(configs[i].alt) + + public final func getAltThatFinishedDecisionEntryRule() -> Int { + let alts = IntervalSet() + for config in configs { + if config.getOuterContextDepth() > 0 || + (config.state is RuleStopState && + config.context!.hasEmptyPath()) { + try! alts.add(config.alt) } } if alts.size() == 0 { @@ -521,49 +508,48 @@ public class ATNConfigSet: Hashable, CustomStringConvertible { return alts.getMinElement() } + /// /// Walk the list of configurations and split them according to /// those that have preds evaluating to true/false. If no pred, assume /// true pred and include in succeeded set. Returns Pair of sets. - /// + /// /// Create a new set so as not to alter the incoming parameter. - /// + /// /// Assumption: the input stream has been restored to the starting point /// prediction, which is where predicates need to evaluate. + /// public final func splitAccordingToSemanticValidity( _ outerContext: ParserRuleContext, - _ evalSemanticContext:( SemanticContext,ParserRuleContext,Int,Bool) throws -> Bool) throws -> (ATNConfigSet, ATNConfigSet) { - let succeeded: ATNConfigSet = ATNConfigSet(fullCtx) - let failed: ATNConfigSet = ATNConfigSet(fullCtx) - let length = configs.count - for i in 0.. Bool) rethrows -> (ATNConfigSet, ATNConfigSet) { + let succeeded = ATNConfigSet(fullCtx) + let failed = ATNConfigSet(fullCtx) + for config in configs { + if config.semanticContext != SemanticContext.NONE { + let predicateEvaluationResult = try evalSemanticContext(config.semanticContext, outerContext, config.alt,fullCtx) + if predicateEvaluationResult { + try! succeeded.add(config) } else { - try succeeded.add(configs[i]) + try! failed.add(config) } + } else { + try! succeeded.add(config) } - return (succeeded, failed) + } + return (succeeded, failed) } - //public enum PredictionMode - public final func dupConfigsWithoutSemanticPredicates() throws -> ATNConfigSet { - let dup: ATNConfigSet = ATNConfigSet() - let length = configs.count - for i in 0.. ATNConfigSet { + let dup = ATNConfigSet() + for config in configs { + let c = ATNConfig(config, SemanticContext.NONE) + try! dup.add(c) } return dup } + public final var hasConfigInRuleStopState: Bool { - let length = configs.count - for i in 0.. = { var suuid = Array() suuid.append(ATNDeserializer.BASE_SERIALIZED_UUID) @@ -46,7 +58,9 @@ public class ATNDeserializer { }() + /// /// This is the current serialized UUID. + /// public static let SERIALIZED_UUID: UUID = { // SERIALIZED_UUID = ADDED_UNICODE_SMP; return UUID(uuidString: "59627784-3BE5-417A-B9EB-8131A7286089")! @@ -70,17 +84,19 @@ public class ATNDeserializer { } + /// /// Determines if a particular serialized representation of an ATN supports - /// a particular feature, identified by the {@link java.util.UUID} used for serializing + /// a particular feature, identified by the _java.util.UUID_ used for serializing /// the ATN at the time the feature was first introduced. - /// - /// - parameter feature: The {@link java.util.UUID} marking the first time the feature was + /// + /// - parameter feature: The _java.util.UUID_ marking the first time the feature was /// supported in the serialized ATN. - /// - parameter actualUuid: The {@link java.util.UUID} of the actual serialized ATN which is + /// - parameter actualUuid: The _java.util.UUID_ of the actual serialized ATN which is /// currently being deserialized. - /// - returns: {@code true} if the {@code actualUuid} value represents a - /// serialized ATN at or after the feature identified by {@code feature} was - /// introduced; otherwise, {@code false}. + /// - returns: `true` if the `actualUuid` value represents a + /// serialized ATN at or after the feature identified by `feature` was + /// introduced; otherwise, `false`. + /// internal func isFeatureSupported(_ feature: UUID, _ actualUuid: UUID) -> Bool { let featureIndex: Int = ATNDeserializer.SUPPORTED_UUIDS.index(of: feature)! if featureIndex < 0 { @@ -101,26 +117,24 @@ public class ATNDeserializer { } var p: Int = 0 - let version: Int = data[p].unicodeValue //toInt(data[p++]); + let version = data[p].unicodeValue p += 1 if version != ATNDeserializer.SERIALIZED_VERSION { - - let reason: String = "Could not deserialize ATN with version \(version) (expected \(ATNDeserializer.SERIALIZED_VERSION))." - + let reason = "Could not deserialize ATN with version \(version) (expected \(ATNDeserializer.SERIALIZED_VERSION))." throw ANTLRError.unsupportedOperation(msg: reason) } let uuid: UUID = toUUID(data, p) p += 8 if !ATNDeserializer.SUPPORTED_UUIDS.contains(uuid) { - let reason: String = "Could not deserialize ATN with UUID \(uuid) (expected \(ATNDeserializer.SERIALIZED_UUID) or a legacy UUID)." + let reason = "Could not deserialize ATN with UUID \(uuid) (expected \(ATNDeserializer.SERIALIZED_UUID) or a legacy UUID)." throw ANTLRError.unsupportedOperation(msg: reason) } - let supportsPrecedencePredicates: Bool = isFeatureSupported(ATNDeserializer.ADDED_PRECEDENCE_TRANSITIONS, uuid) - let supportsLexerActions: Bool = isFeatureSupported(ATNDeserializer.ADDED_LEXER_ACTIONS, uuid) + let supportsPrecedencePredicates = isFeatureSupported(ATNDeserializer.ADDED_PRECEDENCE_TRANSITIONS, uuid) + let supportsLexerActions = isFeatureSupported(ATNDeserializer.ADDED_LEXER_ACTIONS, uuid) - let grammarType: ATNType = ATNType(rawValue: toInt(data[p]))! + let grammarType = ATNType(rawValue: toInt(data[p]))! p += 1 let maxTokenType: Int = toInt(data[p]) p += 1 @@ -129,12 +143,12 @@ public class ATNDeserializer { // // STATES // - var loopBackStateNumbers: Array<(LoopEndState, Int)> = Array<(LoopEndState, Int)>() - var endStateNumbers: Array<(BlockStartState, Int)> = Array<(BlockStartState, Int)>() - let nstates: Int = toInt(data[p]) + var loopBackStateNumbers = [(LoopEndState, Int)]() + var endStateNumbers = [(BlockStartState, Int)]() + let nstates = toInt(data[p]) p += 1 for _ in 0.. = Array() // First, deserialize sets with 16-bit arguments <= U+FFFF. - try readSets(data, &p, &sets, readUnicodeInt) + readSets(data, &p, &sets, readUnicodeInt) // Next, if the ATN was serialized with the Unicode SMP feature, // deserialize sets with 32-bit arguments <= U+10FFFF. if isFeatureSupported(ATNDeserializer.ADDED_UNICODE_SMP, uuid) { - try readSets(data, &p, &sets, readUnicodeInt32) + readSets(data, &p, &sets, readUnicodeInt32) } // @@ -532,23 +544,23 @@ public class ATNDeserializer { return result } - private func readSets(_ data: [Character], _ p: inout Int, _ sets: inout Array, _ readUnicode: ([Character], inout Int) -> Int) throws { - let nsets: Int = toInt(data[p]) + private func readSets(_ data: [Character], _ p: inout Int, _ sets: inout Array, _ readUnicode: ([Character], inout Int) -> Int) { + let nsets = toInt(data[p]) p += 1 for _ in 0.. = Array() - let nsets: Int = dict["nsets"] as! Int + var sets = [IntervalSet]() + let nsets = dict["nsets"] as! Int let intervalSet = dict["IntervalSet"] as! [Dictionary] for i in 0..] + let intervalsBuilder = setBuilder["Intervals"] as! [[String : Any]] for j in 0..] + let lexerActionsBuilder = dict["lexerActions"] as! [[String : Any]] if supportsLexerActions { - atn.lexerActions = [LexerAction](repeating: LexerAction(), count: lexerActionsBuilder.count) //[toInt(data[p++])]; + atn.lexerActions = [LexerAction](repeating: LexerAction(), count: lexerActionsBuilder.count) let length = atn.lexerActions.count for i in 0.. = Array() - for state: ATNState? in atn.states { + var legacyLexerActions = [LexerAction]() + for state in atn.states { if let state = state { let length = state.getNumberOfTransitions() for i in 0.. 0 { - let transition: Transition = atn.ruleToStartState[i].removeTransition(atn.ruleToStartState[i].getNumberOfTransitions() - 1) + let transition = atn.ruleToStartState[i].removeTransition(atn.ruleToStartState[i].getNumberOfTransitions() - 1) bypassStart.addTransition(transition) } @@ -971,7 +973,7 @@ public class ATNDeserializer { atn.ruleToStartState[i].addTransition(EpsilonTransition(bypassStart)) bypassStop.addTransition(EpsilonTransition(endState!)) - let matchState: ATNState = BasicState() + let matchState = BasicState() atn.addState(matchState) matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[i])) bypassStart.addTransition(EpsilonTransition(matchState)) @@ -987,21 +989,25 @@ public class ATNDeserializer { } - /// Analyze the {@link org.antlr.v4.runtime.atn.StarLoopEntryState} states in the specified ATN to set - /// the {@link org.antlr.v4.runtime.atn.StarLoopEntryState#precedenceRuleDecision} field to the + /// + /// Analyze the _org.antlr.v4.runtime.atn.StarLoopEntryState_ states in the specified ATN to set + /// the _org.antlr.v4.runtime.atn.StarLoopEntryState#precedenceRuleDecision_ field to the /// correct value. - /// + /// /// - parameter atn: The ATN. + /// internal func markPrecedenceDecisions(_ atn: ATN) { - for state: ATNState? in atn.states { + for state in atn.states { if let state = state as? StarLoopEntryState { + /// /// We analyze the ATN to determine if this ATN decision state is the /// decision for the closure block that determines whether a /// precedence rule should continue or complete. + /// if let stateRuleIndex = state.ruleIndex { if atn.ruleToStartState[stateRuleIndex].isPrecedenceRule { - let maybeLoopEndState: ATNState = state.transition(state.getNumberOfTransitions() - 1).target + let maybeLoopEndState = state.transition(state.getNumberOfTransitions() - 1).target if maybeLoopEndState is LoopEndState { if maybeLoopEndState.epsilonOnlyTransitions && maybeLoopEndState.transition(0).target is RuleStopState { state.precedenceRuleDecision = true @@ -1015,7 +1021,7 @@ public class ATNDeserializer { internal func verifyATN(_ atn: ATN) throws { // verify assumptions - for state: ATNState? in atn.states { + for state in atn.states { guard let state = state else { continue } @@ -1064,8 +1070,7 @@ public class ATNDeserializer { try checkCondition((state as! BlockEndState).startState != nil) } - if state is DecisionState { - let decisionState: DecisionState = state as! DecisionState + if let decisionState = state as? DecisionState { try checkCondition(decisionState.getNumberOfTransitions() <= 1 || decisionState.decision >= 0) } else { try checkCondition(state.getNumberOfTransitions() <= 1 || state is RuleStopState) @@ -1089,7 +1094,7 @@ public class ATNDeserializer { _ type: Int, _ src: Int, _ trg: Int, _ arg1: Int, _ arg2: Int, _ arg3: Int, _ sets: Array) throws -> Transition { - let target: ATNState = atn.states[trg]! + let target = atn.states[trg]! switch type { case Transition.EPSILON: return EpsilonTransition(target) case Transition.RANGE: @@ -1099,10 +1104,10 @@ public class ATNDeserializer { return RangeTransition(target, arg1, arg2) } case Transition.RULE: - let rt: RuleTransition = RuleTransition(atn.states[arg1] as! RuleStartState, arg2, arg3, target) + let rt = RuleTransition(atn.states[arg1] as! RuleStartState, arg2, arg3, target) return rt case Transition.PREDICATE: - let pt: PredicateTransition = PredicateTransition(target, arg1, arg2, arg3 != 0) + let pt = PredicateTransition(target, arg1, arg2, arg3 != 0) return pt case Transition.PRECEDENCE: return PrecedencePredicateTransition(target, arg1) @@ -1113,17 +1118,14 @@ public class ATNDeserializer { return AtomTransition(target, arg1) } case Transition.ACTION: - let a: ActionTransition = ActionTransition(target, arg1, arg2, arg3 != 0) - return a + return ActionTransition(target, arg1, arg2, arg3 != 0) + case Transition.SET: return SetTransition(target, sets[arg1]) case Transition.NOT_SET: return NotSetTransition(target, sets[arg1]) case Transition.WILDCARD: return WildcardTransition(target) default: throw ANTLRError.illegalState(msg: "The specified transition type is not valid.") - - } - } internal func stateFactory(_ type: Int, _ ruleIndex: Int) throws -> ATNState? { @@ -1177,12 +1179,6 @@ public class ATNDeserializer { case .type: return LexerTypeAction(data1) - - //default: - } - // let message : String = "The specified lexer action type \(type) is not valid." - // RuntimeException(message) - } } diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift index c8ef2da90..39631229b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNSimulator.swift @@ -1,60 +1,47 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// import Foundation open class ATNSimulator { - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#SERIALIZED_VERSION} instead. - public static let SERIALIZED_VERSION: Int = { - return ATNDeserializer.SERIALIZED_VERSION - }() - - - /// This is the current serialized UUID. - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#checkCondition(boolean)} instead. - public static let SERIALIZED_UUID: UUID = { - return (ATNDeserializer.SERIALIZED_UUID as UUID) - }() - - + /// /// Must distinguish between missing edge and edge we know leads nowhere - + /// public static let ERROR: DFAState = { let error = DFAState(ATNConfigSet()) error.stateNumber = Int.max return error }() - public var atn: ATN + public let atn: ATN + /// /// The context cache maps all PredictionContext objects that are equals() /// to a single cached copy. This cache is shared across all contexts /// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet /// to use only cached nodes/graphs in addDFAState(). We don't want to /// fill this during closure() since there are lots of contexts that /// pop up but are not used ever again. It also greatly slows down closure(). - /// - ///

      This cache makes a huge difference in memory and a little bit in speed. + /// + /// This cache makes a huge difference in memory and a little bit in speed. /// For the Java grammar on java.*, it dropped the memory requirements /// at the end from 25M to 16M. We don't store any of the full context /// graphs in the DFA because they are limited to local context only, /// but apparently there's a lot of repetition there as well. We optimize /// the config contexts before storing the config set in the DFA states - /// by literally rebuilding them with cached subgraphs only.

      - /// - ///

      I tried a cache for use during closure operations, that was + /// by literally rebuilding them with cached subgraphs only. + /// + /// I tried a cache for use during closure operations, that was /// whacked after each adaptivePredict(). It cost a little bit /// more time I think and doesn't save on the overall footprint - /// so it's not worth the complexity.

      + /// so it's not worth the complexity. + /// internal final var sharedContextCache: PredictionContextCache? - //static; { - //ERROR = DFAState(ATNConfigSet()); - // ERROR.stateNumber = Integer.MAX_VALUE; - //} - public init(_ atn: ATN, _ sharedContextCache: PredictionContextCache) { @@ -63,18 +50,20 @@ open class ATNSimulator { } open func reset() { - RuntimeException(" must overriden ") + fatalError(#function + " must be overridden") } + /// /// Clear the DFA cache used by the current instance. Since the DFA cache may /// be shared by multiple ATN simulators, this method may affect the /// performance (but not accuracy) of other parsers which are being used /// concurrently. - /// - /// - UnsupportedOperationException if the current instance does not + /// + /// - throws: ANTLRError.unsupportedOperation if the current instance does not /// support clearing the DFA. - /// - /// - 4.3 + /// + /// - since: 4.3 + /// open func clearDFA() throws { throw ANTLRError.unsupportedOperation(msg: "This ATN simulator does not support clearing the DFA. ") } @@ -90,71 +79,17 @@ open class ATNSimulator { //TODO: synced (sharedContextCache!) //synced (sharedContextCache!) { - let visited: HashMap = - HashMap() + let visited = HashMap() return PredictionContext.getCachedContext(context, sharedContextCache!, visited) - //} } - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#deserialize} instead. - ////@Deprecated - public static func deserialize(_ data: [Character]) throws -> ATN { - return try ATNDeserializer().deserialize(data) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#checkCondition(boolean)} instead. - ////@Deprecated - public static func checkCondition(_ condition: Bool) throws { - try ATNDeserializer().checkCondition(condition) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#checkCondition(boolean, String)} instead. - ////@Deprecated - public static func checkCondition(_ condition: Bool, _ message: String) throws { - try ATNDeserializer().checkCondition(condition, message) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toInt} instead. - ////@Deprecated - public func toInt(_ c: Character) -> Int { - return toInt(c) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toInt32} instead. - ////@Deprecated - public func toInt32(_ data: [Character], _ offset: Int) -> Int { - return toInt32(data, offset) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toLong} instead. - ////@Deprecated - public func toLong(_ data: [Character], _ offset: Int) -> Int64 { - return toLong(data, offset) - } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#toUUID} instead. - ////@Deprecated - //public class func toUUID(data : [Character], _ offset : Int) -> NSUUID { - //return ATNDeserializer.toUUID(data, offset); - //} - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#edgeFactory} instead. - ////@Deprecated - public static func edgeFactory(_ atn: ATN, _ type: Int, _ src: Int, _ trg: Int, _ arg1: Int, _ arg2: Int, _ arg3: Int, _ sets: Array) throws -> Transition { return try ATNDeserializer().edgeFactory(atn, type, src, trg, arg1, arg2, arg3, sets) } - - /// - Use {@link org.antlr.v4.runtime.atn.ATNDeserializer#stateFactory} instead. - ////@Deprecated - public static func stateFactory(_ type: Int, _ ruleIndex: Int) throws -> ATNState { - return try ATNDeserializer().stateFactory(type, ruleIndex)! - } - } diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNState.swift b/runtime/Swift/Sources/Antlr4/atn/ATNState.swift index d6c7c2aa4..88cf1a183 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNState.swift @@ -1,68 +1,70 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// - +/// +/// /// The following images show the relation of states and -/// {@link org.antlr.v4.runtime.atn.ATNState#transitions} for various grammar constructs. -/// -///
        -/// -///
      • Solid edges marked with an ε indicate a required -/// {@link org.antlr.v4.runtime.atn.EpsilonTransition}.
      • -/// -///
      • Dashed edges indicate locations where any transition derived from -/// {@link org.antlr.v4.runtime.atn.Transition} might appear.
      • -/// -///
      • Dashed nodes are place holders for either a sequence of linked -/// {@link org.antlr.v4.runtime.atn.BasicState} states or the inclusion of a block representing a nested -/// construct in one of the forms below.
      • -/// -///
      • Nodes showing multiple outgoing alternatives with a {@code ...} support -/// any number of alternatives (one or more). Nodes without the {@code ...} only -/// support the exact number of alternatives shown in the diagram.
      • -/// -///
      -/// -///

      Basic Blocks

      -/// -///

      Rule

      -/// -/// -/// -///

      Block of 1 or more alternatives

      -/// -/// -/// -///

      Greedy Loops

      -/// -///

      Greedy Closure: {@code (...)*}

      -/// -/// -/// -///

      Greedy Positive Closure: {@code (...)+}

      -/// -/// -/// -///

      Greedy Optional: {@code (...)?}

      -/// -/// -/// -///

      Non-Greedy Loops

      -/// -///

      Non-Greedy Closure: {@code (...)*?}

      -/// -/// -/// -///

      Non-Greedy Positive Closure: {@code (...)+?}

      -/// -/// -/// -///

      Non-Greedy Optional: {@code (...)??}

      -/// -/// - +/// _org.antlr.v4.runtime.atn.ATNState#transitions_ for various grammar constructs. +/// +/// +/// * Solid edges marked with an ε indicate a required +/// _org.antlr.v4.runtime.atn.EpsilonTransition_. +/// +/// * Dashed edges indicate locations where any transition derived from +/// _org.antlr.v4.runtime.atn.Transition_ might appear. +/// +/// * Dashed nodes are place holders for either a sequence of linked +/// _org.antlr.v4.runtime.atn.BasicState_ states or the inclusion of a block representing a nested +/// construct in one of the forms below. +/// +/// * Nodes showing multiple outgoing alternatives with a `...` support +/// any number of alternatives (one or more). Nodes without the `...` only +/// support the exact number of alternatives shown in the diagram. +/// +/// +/// ## Basic Blocks +/// +/// ### Rule +/// +/// +/// +/// ## Block of 1 or more alternatives +/// +/// +/// +/// ## Greedy Loops +/// +/// ### Greedy Closure: `(...)*` +/// +/// +/// +/// ### Greedy Positive Closure: `(...)+` +/// +/// +/// +/// ### Greedy Optional: `(...)?` +/// +/// +/// +/// ## Non-Greedy Loops +/// +/// ### Non-Greedy Closure: `(...)*?` +/// +/// +/// +/// ### Non-Greedy Positive Closure: `(...)+?` +/// +/// +/// +/// ### Non-Greedy Optional: `(...)??` +/// +/// +/// +/// public class ATNState: Hashable, CustomStringConvertible { public static let INITIAL_NUM_TRANSITIONS: Int = 4 @@ -100,7 +102,9 @@ public class ATNState: Hashable, CustomStringConvertible { public static let INVALID_STATE_NUMBER: Int = -1 + /// /// Which ATN are we in? + /// public final var atn: ATN? = nil public final var stateNumber: Int = INVALID_STATE_NUMBER @@ -110,11 +114,15 @@ public class ATNState: Hashable, CustomStringConvertible { public final var epsilonOnlyTransitions: Bool = false + /// /// Track the transitions emanating from this ATN state. + /// internal final var transitions: Array = Array() //Array(INITIAL_NUM_TRANSITIONS); + /// /// Used to cache lookahead during parsing, not used during construction + /// public final var nextTokenWithinRule: IntervalSet? @@ -175,8 +183,7 @@ public class ATNState: Hashable, CustomStringConvertible { } public func getStateType() -> Int { - RuntimeException(#function + " must be overridden") - return 0 + fatalError(#function + " must be overridden") } public final func onlyHasEpsilonTransitions() -> Bool { diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNType.swift b/runtime/Swift/Sources/Antlr4/atn/ATNType.swift index 6faeb81a0..80cfab2e8 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNType.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNType.swift @@ -1,23 +1,27 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// Represents the type of recognizer an ATN applies to. -/// +/// /// - Sam Harwell +/// public enum ATNType: Int { - /** - * A lexer grammar. - */ + /// + /// A lexer grammar. + /// case lexer = 0 - /** - * A parser grammar. - */ + /// + /// A parser grammar. + /// case parser } diff --git a/runtime/Swift/Sources/Antlr4/atn/AbstractPredicateTransition.swift b/runtime/Swift/Sources/Antlr4/atn/AbstractPredicateTransition.swift index 996e33dae..48af389e9 100644 --- a/runtime/Swift/Sources/Antlr4/atn/AbstractPredicateTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/AbstractPredicateTransition.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// +/// +/// /// - Sam Harwell +/// public class AbstractPredicateTransition: Transition { diff --git a/runtime/Swift/Sources/Antlr4/atn/ActionTransition.swift b/runtime/Swift/Sources/Antlr4/atn/ActionTransition.swift index 9c0aef76c..b16de5725 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ActionTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ActionTransition.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class ActionTransition: Transition, CustomStringConvertible { diff --git a/runtime/Swift/Sources/Antlr4/atn/AmbiguityInfo.swift b/runtime/Swift/Sources/Antlr4/atn/AmbiguityInfo.swift index 1698ee187..28ec4ba10 100644 --- a/runtime/Swift/Sources/Antlr4/atn/AmbiguityInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/AmbiguityInfo.swift @@ -1,44 +1,51 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class represents profiling event information for an ambiguity. /// Ambiguities are decisions where a particular input resulted in an SLL /// conflict, followed by LL prediction also reaching a conflict state /// (indicating a true ambiguity in the grammar). -/// -///

      +/// +/// /// This event may be reported during SLL prediction in cases where the /// conflicting SLL configuration set provides sufficient information to /// determine that the SLL conflict is truly an ambiguity. For example, if none /// of the ATN configurations in the conflicting SLL configuration set have /// traversed a global follow transition (i.e. -/// {@link org.antlr.v4.runtime.atn.ATNConfig#reachesIntoOuterContext} is 0 for all configurations), then +/// _org.antlr.v4.runtime.atn.ATNConfig#reachesIntoOuterContext_ is 0 for all configurations), then /// the result of SLL prediction for that input is known to be equivalent to the -/// result of LL prediction for that input.

      -/// -///

      +/// result of LL prediction for that input. +/// +/// /// In some cases, the minimum represented alternative in the conflicting LL /// configuration set is not equal to the minimum represented alternative in the /// conflicting SLL configuration set. Grammars and inputs which result in this -/// scenario are unable to use {@link org.antlr.v4.runtime.atn.PredictionMode#SLL}, which in turn means +/// scenario are unable to use _org.antlr.v4.runtime.atn.PredictionMode#SLL_, which in turn means /// they cannot use the two-stage parsing strategy to improve parsing performance -/// for that input.

      -/// +/// for that input. +/// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#reportAmbiguity /// - seealso: org.antlr.v4.runtime.ANTLRErrorListener#reportAmbiguity -/// +/// /// - 4.3 +/// public class AmbiguityInfo: DecisionEventInfo { + /// /// The set of alternative numbers for this decision event that lead to a valid parse. + /// public var ambigAlts: BitSet - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.AmbiguityInfo} class with the + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.AmbiguityInfo_ class with the /// specified detailed ambiguity information. - /// + /// /// - parameter decision: The decision number /// - parameter configs: The final configuration set identifying the ambiguous /// alternatives for the current input @@ -47,9 +54,10 @@ public class AmbiguityInfo: DecisionEventInfo { /// - parameter startIndex: The start index for the current prediction /// - parameter stopIndex: The index at which the ambiguity was identified during /// prediction - /// - parameter fullCtx: {@code true} if the ambiguity was identified during LL - /// prediction; otherwise, {@code false} if the ambiguity was identified + /// - parameter fullCtx: `true` if the ambiguity was identified during LL + /// prediction; otherwise, `false` if the ambiguity was identified /// during SLL prediction + /// public init(_ decision: Int, _ configs: ATNConfigSet, _ ambigAlts: BitSet, diff --git a/runtime/Swift/Sources/Antlr4/atn/ArrayPredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/ArrayPredictionContext.swift index 62d7421ca..9d0168036 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ArrayPredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ArrayPredictionContext.swift @@ -1,23 +1,25 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class ArrayPredictionContext: PredictionContext { + /// /// Parent can be null only if full ctx mode and we make an array - /// from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and - /// returnState == {@link #EMPTY_RETURN_STATE}. + /// from _#EMPTY_ and non-empty. We merge _#EMPTY_ by using null parent and + /// returnState == _#EMPTY_RETURN_STATE_. + /// public final var parents: [PredictionContext?] + /// /// Sorted for merge, no duplicates; if present, - /// {@link #EMPTY_RETURN_STATE} is always last. + /// _#EMPTY_RETURN_STATE_ is always last. + /// public final let returnStates: [Int] public convenience init(_ a: SingletonPredictionContext) { -// if a.parent == nil { -// // print("parent is nil") -// } - //self.init(new, PredictionContext[] {a.parent}, new, int[] {a.returnState}); let parents = [a.parent] self.init(parents, [a.returnState]) } @@ -51,12 +53,6 @@ public class ArrayPredictionContext: PredictionContext { return returnStates[index] } - // @Override - // public int findReturnState(int returnState) { - // return Arrays.binarySearch(returnStates, returnState); - // } - - override public var description: String { if isEmpty() { diff --git a/runtime/Swift/Sources/Antlr4/atn/AtomTransition.swift b/runtime/Swift/Sources/Antlr4/atn/AtomTransition.swift index 6aa33289f..b51c7147e 100644 --- a/runtime/Swift/Sources/Antlr4/atn/AtomTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/AtomTransition.swift @@ -1,12 +1,18 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// TODO: make all transitions sets? no, should remove set edges +/// public final class AtomTransition: Transition, CustomStringConvertible { + /// /// The token type or character value; or, signifies special label. + /// public let label: Int public init(_ target: ATNState, _ label: Int) { @@ -21,8 +27,8 @@ public final class AtomTransition: Transition, CustomStringConvertible { } override - public func labelIntervalSet() throws -> IntervalSet? { - return try IntervalSet.of(label) + public func labelIntervalSet() -> IntervalSet? { + return IntervalSet(label) } override diff --git a/runtime/Swift/Sources/Antlr4/atn/BasicBlockStartState.swift b/runtime/Swift/Sources/Antlr4/atn/BasicBlockStartState.swift index 5b9e53691..e735a1416 100644 --- a/runtime/Swift/Sources/Antlr4/atn/BasicBlockStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/BasicBlockStartState.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// +/// +/// /// - Sam Harwell +/// public final class BasicBlockStartState: BlockStartState { override diff --git a/runtime/Swift/Sources/Antlr4/atn/BasicState.swift b/runtime/Swift/Sources/Antlr4/atn/BasicState.swift index dcd77dcfd..5f40c3840 100644 --- a/runtime/Swift/Sources/Antlr4/atn/BasicState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/BasicState.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// +/// +/// /// - Sam Harwell +/// public final class BasicState: ATNState { diff --git a/runtime/Swift/Sources/Antlr4/atn/BlockEndState.swift b/runtime/Swift/Sources/Antlr4/atn/BlockEndState.swift index d5c37e6fe..d198a63ef 100644 --- a/runtime/Swift/Sources/Antlr4/atn/BlockEndState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/BlockEndState.swift @@ -1,10 +1,14 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Terminal node of a simple {@code (a|b|c)} block. +/// +/// Terminal node of a simple `(a|b|c)` block. +/// public final class BlockEndState: ATNState { public var startState: BlockStartState? diff --git a/runtime/Swift/Sources/Antlr4/atn/BlockStartState.swift b/runtime/Swift/Sources/Antlr4/atn/BlockStartState.swift index 21617cb27..b1dabb687 100644 --- a/runtime/Swift/Sources/Antlr4/atn/BlockStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/BlockStartState.swift @@ -1,9 +1,13 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// The start of a regular {@code (...)} block. +/// +/// The start of a regular `(...)` block. +/// public class BlockStartState: DecisionState { public var endState: BlockEndState? diff --git a/runtime/Swift/Sources/Antlr4/atn/ContextSensitivityInfo.swift b/runtime/Swift/Sources/Antlr4/atn/ContextSensitivityInfo.swift index 3fe4044f0..b13391173 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ContextSensitivityInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ContextSensitivityInfo.swift @@ -1,29 +1,34 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class represents profiling event information for a context sensitivity. /// Context sensitivities are decisions where a particular input resulted in an /// SLL conflict, but LL prediction produced a single unique alternative. -/// -///

      +/// +/// /// In some cases, the unique alternative identified by LL prediction is not /// equal to the minimum represented alternative in the conflicting SLL /// configuration set. Grammars and inputs which result in this scenario are -/// unable to use {@link org.antlr.v4.runtime.atn.PredictionMode#SLL}, which in turn means they cannot use +/// unable to use _org.antlr.v4.runtime.atn.PredictionMode#SLL_, which in turn means they cannot use /// the two-stage parsing strategy to improve parsing performance for that -/// input.

      -/// +/// input. +/// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#reportContextSensitivity /// - seealso: org.antlr.v4.runtime.ANTLRErrorListener#reportContextSensitivity -/// +/// /// - 4.3 +/// public class ContextSensitivityInfo: DecisionEventInfo { - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.ContextSensitivityInfo} class + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.ContextSensitivityInfo_ class /// with the specified detailed context sensitivity information. - /// + /// /// - parameter decision: The decision number /// - parameter configs: The final configuration set containing the unique /// alternative identified by full-context prediction @@ -31,6 +36,7 @@ public class ContextSensitivityInfo: DecisionEventInfo { /// - parameter startIndex: The start index for the current prediction /// - parameter stopIndex: The index at which the context sensitivity was /// identified during full-context prediction + /// public init(_ decision: Int, _ configs: ATNConfigSet, _ input: TokenStream, _ startIndex: Int, _ stopIndex: Int) { diff --git a/runtime/Swift/Sources/Antlr4/atn/DecisionEventInfo.swift b/runtime/Swift/Sources/Antlr4/atn/DecisionEventInfo.swift index 1424b60bd..7b6a3e49e 100644 --- a/runtime/Swift/Sources/Antlr4/atn/DecisionEventInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/DecisionEventInfo.swift @@ -1,46 +1,62 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This is the base class for gathering detailed information about prediction /// events which occur during parsing. -/// +/// /// Note that we could record the parser call stack at the time this event /// occurred but in the presence of left recursive rules, the stack is kind of /// meaningless. It's better to look at the individual configurations for their -/// individual stacks. Of course that is a {@link org.antlr.v4.runtime.atn.PredictionContext} object +/// individual stacks. Of course that is a _org.antlr.v4.runtime.atn.PredictionContext_ object /// not a parse tree node and so it does not have information about the extent /// (start...stop) of the various subtrees. Examining the stack tops of all /// configurations provide the return states for the rule invocations. /// From there you can get the enclosing rule. -/// +/// /// - 4.3 +/// public class DecisionEventInfo { + /// /// The invoked decision number which this event is related to. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.ATN#decisionToState + /// public let decision: Int + /// /// The configuration set containing additional information relevant to the - /// prediction state when the current event occurred, or {@code null} if no + /// prediction state when the current event occurred, or `null` if no /// additional information is relevant or available. + /// public let configs: ATNConfigSet? + /// /// The input token stream which is being parsed. + /// public let input: TokenStream + /// /// The token index in the input stream at which the current prediction was /// originally invoked. + /// public let startIndex: Int + /// /// The token index in the input stream at which the current event occurred. + /// public let stopIndex: Int - /// {@code true} if the current event occurred during LL prediction; - /// otherwise, {@code false} if the input occurred during SLL prediction. + /// + /// `true` if the current event occurred during LL prediction; + /// otherwise, `false` if the input occurred during SLL prediction. + /// public let fullCtx: Bool public init(_ decision: Int, diff --git a/runtime/Swift/Sources/Antlr4/atn/DecisionInfo.swift b/runtime/Swift/Sources/Antlr4/atn/DecisionInfo.swift index 5bf2f27fd..ceb939099 100644 --- a/runtime/Swift/Sources/Antlr4/atn/DecisionInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/DecisionInfo.swift @@ -1,178 +1,224 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class contains profiling gathered for a particular decision. -/// -///

      +/// +/// /// Parsing performance in ANTLR 4 is heavily influenced by both static factors /// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the /// choice of input and the state of the DFA cache at the time profiling /// operations are started). For best results, gather and use aggregate /// statistics from a large sample of inputs representing the inputs expected in -/// production before using the results to make changes in the grammar.

      -/// +/// production before using the results to make changes in the grammar. +/// /// - 4.3 +/// public class DecisionInfo: CustomStringConvertible { - /// The decision number, which is an index into {@link org.antlr.v4.runtime.atn.ATN#decisionToState}. + /// + /// The decision number, which is an index into _org.antlr.v4.runtime.atn.ATN#decisionToState_. + /// public final var decision: Int - /// The total number of times {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} was + /// + /// The total number of times _org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict_ was /// invoked for this decision. + /// public var invocations: Int64 = 0 - /// The total time spent in {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} for + /// + /// The total time spent in _org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict_ for /// this decision, in nanoseconds. - /// - ///

      + /// + /// /// The value of this field contains the sum of differential results obtained - /// by {@link System#nanoTime()}, and is not adjusted to compensate for JIT + /// by _System#nanoTime()_, and is not adjusted to compensate for JIT /// and/or garbage collection overhead. For best accuracy, use a modern JVM /// implementation that provides precise results from - /// {@link System#nanoTime()}, and perform profiling in a separate process + /// _System#nanoTime()_, and perform profiling in a separate process /// which is warmed up by parsing the input prior to profiling. If desired, - /// call {@link org.antlr.v4.runtime.atn.ATNSimulator#clearDFA} to reset the DFA cache to its initial - /// state before starting the profiling measurement pass.

      + /// call _org.antlr.v4.runtime.atn.ATNSimulator#clearDFA_ to reset the DFA cache to its initial + /// state before starting the profiling measurement pass. + /// public var timeInPrediction: Int64 = 0 + /// /// The sum of the lookahead required for SLL prediction for this decision. /// Note that SLL prediction is used before LL prediction for performance - /// reasons even when {@link org.antlr.v4.runtime.atn.PredictionMode#LL} or - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used. + /// reasons even when _org.antlr.v4.runtime.atn.PredictionMode#LL_ or + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ is used. + /// public var SLL_TotalLook: Int64 = 0 + /// /// Gets the minimum lookahead required for any single SLL prediction to /// complete for this decision, by reaching a unique prediction, reaching an /// SLL conflict state, or encountering a syntax error. + /// public var SLL_MinLook: Int64 = 0 + /// /// Gets the maximum lookahead required for any single SLL prediction to /// complete for this decision, by reaching a unique prediction, reaching an /// SLL conflict state, or encountering a syntax error. + /// public var SLL_MaxLook: Int64 = 0 - /// Gets the {@link org.antlr.v4.runtime.atn.LookaheadEventInfo} associated with the event where the - /// {@link #SLL_MaxLook} value was set. + /// + /// Gets the _org.antlr.v4.runtime.atn.LookaheadEventInfo_ associated with the event where the + /// _#SLL_MaxLook_ value was set. + /// public var SLL_MaxLookEvent: LookaheadEventInfo! + /// /// The sum of the lookahead required for LL prediction for this decision. /// Note that LL prediction is only used when SLL prediction reaches a /// conflict state. + /// public var LL_TotalLook: Int64 = 0 + /// /// Gets the minimum lookahead required for any single LL prediction to /// complete for this decision. An LL prediction completes when the algorithm /// reaches a unique prediction, a conflict state (for - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL}, an ambiguity state (for - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_, an ambiguity state (for + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_, or a syntax error. + /// public var LL_MinLook: Int64 = 0 + /// /// Gets the maximum lookahead required for any single LL prediction to /// complete for this decision. An LL prediction completes when the algorithm /// reaches a unique prediction, a conflict state (for - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL}, an ambiguity state (for - /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION}, or a syntax error. + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_, an ambiguity state (for + /// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_, or a syntax error. + /// public var LL_MaxLook: Int64 = 0 - /// Gets the {@link org.antlr.v4.runtime.atn.LookaheadEventInfo} associated with the event where the - /// {@link #LL_MaxLook} value was set. + /// + /// Gets the _org.antlr.v4.runtime.atn.LookaheadEventInfo_ associated with the event where the + /// _#LL_MaxLook_ value was set. + /// public var LL_MaxLookEvent: LookaheadEventInfo! - /// A collection of {@link org.antlr.v4.runtime.atn.ContextSensitivityInfo} instances describing the + /// + /// A collection of _org.antlr.v4.runtime.atn.ContextSensitivityInfo_ instances describing the /// context sensitivities encountered during LL prediction for this decision. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.ContextSensitivityInfo + /// public final var contextSensitivities: Array = Array() - /// A collection of {@link org.antlr.v4.runtime.atn.ErrorInfo} instances describing the parse errors - /// identified during calls to {@link org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict} for + /// + /// A collection of _org.antlr.v4.runtime.atn.ErrorInfo_ instances describing the parse errors + /// identified during calls to _org.antlr.v4.runtime.atn.ParserATNSimulator#adaptivePredict_ for /// this decision. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.ErrorInfo + /// public final var errors: Array = Array() - /// A collection of {@link org.antlr.v4.runtime.atn.AmbiguityInfo} instances describing the + /// + /// A collection of _org.antlr.v4.runtime.atn.AmbiguityInfo_ instances describing the /// ambiguities encountered during LL prediction for this decision. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.AmbiguityInfo + /// public final var ambiguities: Array = Array() - /// A collection of {@link org.antlr.v4.runtime.atn.PredicateEvalInfo} instances describing the + /// + /// A collection of _org.antlr.v4.runtime.atn.PredicateEvalInfo_ instances describing the /// results of evaluating individual predicates during prediction for this /// decision. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.PredicateEvalInfo + /// public final var predicateEvals: Array = Array() + /// /// The total number of ATN transitions required during SLL prediction for /// this decision. An ATN transition is determined by the number of times the /// DFA does not contain an edge that is required for prediction, resulting /// in on-the-fly computation of that edge. - /// - ///

      + /// + /// /// If DFA caching of SLL transitions is employed by the implementation, ATN /// computation may cache the computed edge for efficient lookup during /// future parsing of this decision. Otherwise, the SLL parsing algorithm - /// will use ATN transitions exclusively.

      - /// + /// will use ATN transitions exclusively. + /// /// - seealso: #SLL_ATNTransitions /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#computeTargetState /// - seealso: org.antlr.v4.runtime.atn.LexerATNSimulator#computeTargetState + /// public var SLL_ATNTransitions: Int64 = 0 + /// /// The total number of DFA transitions required during SLL prediction for /// this decision. - /// - ///

      If the ATN simulator implementation does not use DFA caching for SLL - /// transitions, this value will be 0.

      - /// + /// + /// If the ATN simulator implementation does not use DFA caching for SLL + /// transitions, this value will be 0. + /// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#getExistingTargetState /// - seealso: org.antlr.v4.runtime.atn.LexerATNSimulator#getExistingTargetState + /// public var SLL_DFATransitions: Int64 = 0 + /// /// Gets the total number of times SLL prediction completed in a conflict /// state, resulting in fallback to LL prediction. - /// - ///

      Note that this value is not related to whether or not - /// {@link org.antlr.v4.runtime.atn.PredictionMode#SLL} may be used successfully with a particular + /// + /// Note that this value is not related to whether or not + /// _org.antlr.v4.runtime.atn.PredictionMode#SLL_ may be used successfully with a particular /// grammar. If the ambiguity resolution algorithm applied to the SLL /// conflicts for this decision produce the same result as LL prediction for - /// this decision, {@link org.antlr.v4.runtime.atn.PredictionMode#SLL} would produce the same overall - /// parsing result as {@link org.antlr.v4.runtime.atn.PredictionMode#LL}.

      + /// this decision, _org.antlr.v4.runtime.atn.PredictionMode#SLL_ would produce the same overall + /// parsing result as _org.antlr.v4.runtime.atn.PredictionMode#LL_. + /// public var LL_Fallback: Int64 = 0 + /// /// The total number of ATN transitions required during LL prediction for /// this decision. An ATN transition is determined by the number of times the /// DFA does not contain an edge that is required for prediction, resulting /// in on-the-fly computation of that edge. - /// - ///

      + /// + /// /// If DFA caching of LL transitions is employed by the implementation, ATN /// computation may cache the computed edge for efficient lookup during /// future parsing of this decision. Otherwise, the LL parsing algorithm will - /// use ATN transitions exclusively.

      - /// + /// use ATN transitions exclusively. + /// /// - seealso: #LL_DFATransitions /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#computeTargetState /// - seealso: org.antlr.v4.runtime.atn.LexerATNSimulator#computeTargetState + /// public var LL_ATNTransitions: Int64 = 0 + /// /// The total number of DFA transitions required during LL prediction for /// this decision. - /// - ///

      If the ATN simulator implementation does not use DFA caching for LL - /// transitions, this value will be 0.

      - /// + /// + /// If the ATN simulator implementation does not use DFA caching for LL + /// transitions, this value will be 0. + /// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#getExistingTargetState /// - seealso: org.antlr.v4.runtime.atn.LexerATNSimulator#getExistingTargetState + /// public var LL_DFATransitions: Int64 = 0 - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.DecisionInfo} class to contain + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.DecisionInfo_ class to contain /// statistics for a particular decision. - /// + /// /// - parameter decision: The decision number + /// public init(_ decision: Int) { self.decision = decision } diff --git a/runtime/Swift/Sources/Antlr4/atn/DecisionState.swift b/runtime/Swift/Sources/Antlr4/atn/DecisionState.swift index 9a718c40e..10cfdad22 100644 --- a/runtime/Swift/Sources/Antlr4/atn/DecisionState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/DecisionState.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class DecisionState: ATNState { diff --git a/runtime/Swift/Sources/Antlr4/atn/DefaultATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/DefaultATNConfig.swift index ab70fcda4..b6d93be5b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/DefaultATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/DefaultATNConfig.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // DefaultATNConfig.swift diff --git a/runtime/Swift/Sources/Antlr4/atn/EmptyPredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/EmptyPredictionContext.swift index 5df9cc536..57deedbb0 100644 --- a/runtime/Swift/Sources/Antlr4/atn/EmptyPredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/EmptyPredictionContext.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class EmptyPredictionContext: SingletonPredictionContext { diff --git a/runtime/Swift/Sources/Antlr4/atn/EpsilonTransition.swift b/runtime/Swift/Sources/Antlr4/atn/EpsilonTransition.swift index f0fe94f18..a3dbb7b2d 100644 --- a/runtime/Swift/Sources/Antlr4/atn/EpsilonTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/EpsilonTransition.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class EpsilonTransition: Transition, CustomStringConvertible { @@ -17,12 +19,14 @@ public final class EpsilonTransition: Transition, CustomStringConvertible { super.init(target) } + /// /// - returns: the rule index of a precedence rule for which this transition is /// returning from, where the precedence value is 0; otherwise, -1. - /// + /// /// - seealso: org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed() /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#applyPrecedenceFilter(org.antlr.v4.runtime.atn.ATNConfigSet) /// - 4.4.1 + /// public func outermostPrecedenceReturn() -> Int { return outermostPrecedenceReturnInside } diff --git a/runtime/Swift/Sources/Antlr4/atn/ErrorInfo.swift b/runtime/Swift/Sources/Antlr4/atn/ErrorInfo.swift index a6c44f044..556ba2a00 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ErrorInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ErrorInfo.swift @@ -1,32 +1,38 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class represents profiling event information for a syntax error /// identified during prediction. Syntax errors occur when the prediction /// algorithm is unable to identify an alternative which would lead to a /// successful parse. -/// +/// /// - seealso: org.antlr.v4.runtime.Parser#notifyErrorListeners(org.antlr.v4.runtime.Token, String, org.antlr.v4.runtime.RecognitionException) /// - seealso: org.antlr.v4.runtime.ANTLRErrorListener#syntaxError -/// +/// /// - 4.3 +/// public class ErrorInfo: DecisionEventInfo { - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.ErrorInfo} class with the + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.ErrorInfo_ class with the /// specified detailed syntax error information. - /// + /// /// - parameter decision: The decision number /// - parameter configs: The final configuration set reached during prediction - /// prior to reaching the {@link org.antlr.v4.runtime.atn.ATNSimulator#ERROR} state + /// prior to reaching the _org.antlr.v4.runtime.atn.ATNSimulator#ERROR_ state /// - parameter input: The input token stream /// - parameter startIndex: The start index for the current prediction /// - parameter stopIndex: The index at which the syntax error was identified - /// - parameter fullCtx: {@code true} if the syntax error was identified during LL - /// prediction; otherwise, {@code false} if the syntax error was identified + /// - parameter fullCtx: `true` if the syntax error was identified during LL + /// prediction; otherwise, `false` if the syntax error was identified /// during SLL prediction + /// public init(_ decision: Int, _ configs: ATNConfigSet, _ input: TokenStream, _ startIndex: Int, _ stopIndex: Int, diff --git a/runtime/Swift/Sources/Antlr4/atn/LL1Analyzer.swift b/runtime/Swift/Sources/Antlr4/atn/LL1Analyzer.swift index a57490f00..3f594f234 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LL1Analyzer.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LL1Analyzer.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class LL1Analyzer { + /// /// Special value added to the lookahead sets to indicate that we hit - /// a predicate during analysis if {@code seeThruPreds==false}. + /// a predicate during analysis if `seeThruPreds==false`. + /// public let HIT_PRED: Int = CommonToken.INVALID_TYPE public let atn: ATN @@ -14,26 +18,28 @@ public class LL1Analyzer { self.atn = atn } + /// /// Calculates the SLL(1) expected lookahead set for each outgoing transition - /// of an {@link org.antlr.v4.runtime.atn.ATNState}. The returned array has one element for each - /// outgoing transition in {@code s}. If the closure from transition - /// i leads to a semantic predicate before matching a symbol, the - /// element at index i of the result will be {@code null}. - /// + /// of an _org.antlr.v4.runtime.atn.ATNState_. The returned array has one element for each + /// outgoing transition in `s`. If the closure from transition + /// __i__ leads to a semantic predicate before matching a symbol, the + /// element at index __i__ of the result will be `null`. + /// /// - parameter s: the ATN state - /// - returns: the expected symbols for each outgoing transition of {@code s}. - public func getDecisionLookahead(_ s: ATNState?) throws -> [IntervalSet?]? { + /// - returns: the expected symbols for each outgoing transition of `s`. + /// + public func getDecisionLookahead(_ s: ATNState?) -> [IntervalSet?]? { guard let s = s else { return nil } let length = s.getNumberOfTransitions() - var look: [IntervalSet?] = [IntervalSet?](repeating: nil, count: length) + var look = [IntervalSet?](repeating: nil, count: length) for alt in 0.. = Set() - let seeThruPreds: Bool = false // fail to get lookahead upon pred - try _LOOK(s.transition(alt).target, nil, PredictionContext.EMPTY, + look[alt] = IntervalSet() + var lookBusy = Set() + let seeThruPreds = false // fail to get lookahead upon pred + _LOOK(s.transition(alt).target, nil, PredictionContext.EMPTY, look[alt]!, &lookBusy, BitSet(), seeThruPreds, false) // Wipe out lookahead for this alternative if we found nothing // or we had a predicate when we !seeThruPreds @@ -44,92 +50,94 @@ public class LL1Analyzer { return look } - /// Compute set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. - /// - ///

      If {@code ctx} is {@code null} and the end of the rule containing - /// {@code s} is reached, {@link org.antlr.v4.runtime.Token#EPSILON} is added to the result set. - /// If {@code ctx} is not {@code null} and the end of the outermost rule is - /// reached, {@link org.antlr.v4.runtime.Token#EOF} is added to the result set.

      - /// + /// + /// Compute set of tokens that can follow `s` in the ATN in the + /// specified `ctx`. + /// + /// If `ctx` is `null` and the end of the rule containing + /// `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to the result set. + /// If `ctx` is not `null` and the end of the outermost rule is + /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. + /// /// - parameter s: the ATN state - /// - parameter ctx: the complete parser context, or {@code null} if the context + /// - parameter ctx: the complete parser context, or `null` if the context /// should be ignored - /// - /// - returns: The set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. - public func LOOK(_ s: ATNState, _ ctx: RuleContext?) throws -> IntervalSet { - return try LOOK(s, nil, ctx) + /// + /// - returns: The set of tokens that can follow `s` in the ATN in the + /// specified `ctx`. + /// + public func LOOK(_ s: ATNState, _ ctx: RuleContext?) -> IntervalSet { + return LOOK(s, nil, ctx) } - /// Compute set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. - /// - ///

      If {@code ctx} is {@code null} and the end of the rule containing - /// {@code s} is reached, {@link org.antlr.v4.runtime.Token#EPSILON} is added to the result set. - /// If {@code ctx} is not {@code null} and the end of the outermost rule is - /// reached, {@link org.antlr.v4.runtime.Token#EOF} is added to the result set.

      - /// + /// + /// Compute set of tokens that can follow `s` in the ATN in the + /// specified `ctx`. + /// + /// If `ctx` is `null` and the end of the rule containing + /// `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to the result set. + /// If `ctx` is not `null` and the end of the outermost rule is + /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. + /// /// - parameter s: the ATN state /// - parameter stopState: the ATN state to stop at. This can be a - /// {@link org.antlr.v4.runtime.atn.BlockEndState} to detect epsilon paths through a closure. - /// - parameter ctx: the complete parser context, or {@code null} if the context + /// _org.antlr.v4.runtime.atn.BlockEndState_ to detect epsilon paths through a closure. + /// - parameter ctx: the complete parser context, or `null` if the context /// should be ignored - /// - /// - returns: The set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. + /// + /// - returns: The set of tokens that can follow `s` in the ATN in the + /// specified `ctx`. + /// - public func LOOK(_ s: ATNState, _ stopState: ATNState?, _ ctx: RuleContext?) throws -> IntervalSet { - let r: IntervalSet = try IntervalSet() - let seeThruPreds: Bool = true // ignore preds; get all lookahead - let lookContext: PredictionContext? = ctx != nil ? PredictionContext.fromRuleContext(s.atn!, ctx) : nil + public func LOOK(_ s: ATNState, _ stopState: ATNState?, _ ctx: RuleContext?) -> IntervalSet { + let r = IntervalSet() + let seeThruPreds = true // ignore preds; get all lookahead + let lookContext = ctx != nil ? PredictionContext.fromRuleContext(s.atn!, ctx) : nil var config = Set() - try _LOOK(s, stopState, lookContext, - r, &config, BitSet(), seeThruPreds, true) + _LOOK(s, stopState, lookContext, r, &config, BitSet(), seeThruPreds, true) return r } - /// Compute set of tokens that can follow {@code s} in the ATN in the - /// specified {@code ctx}. - /// - ///

      If {@code ctx} is {@code null} and {@code stopState} or the end of the - /// rule containing {@code s} is reached, {@link org.antlr.v4.runtime.Token#EPSILON} is added to - /// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is - /// {@code true} and {@code stopState} or the end of the outermost rule is - /// reached, {@link org.antlr.v4.runtime.Token#EOF} is added to the result set.

      - /// + /// + /// Compute set of tokens that can follow `s` in the ATN in the + /// specified `ctx`. + /// + /// If `ctx` is `null` and `stopState` or the end of the + /// rule containing `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to + /// the result set. If `ctx` is not `null` and `addEOF` is + /// `true` and `stopState` or the end of the outermost rule is + /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. + /// /// - parameter s: the ATN state. /// - parameter stopState: the ATN state to stop at. This can be a - /// {@link org.antlr.v4.runtime.atn.BlockEndState} to detect epsilon paths through a closure. - /// - parameter ctx: The outer context, or {@code null} if the outer context should + /// _org.antlr.v4.runtime.atn.BlockEndState_ to detect epsilon paths through a closure. + /// - parameter ctx: The outer context, or `null` if the outer context should /// not be used. /// - parameter look: The result lookahead set. /// - parameter lookBusy: A set used for preventing epsilon closures in the ATN /// from causing a stack overflow. Outside code should pass - /// {@code new HashSet} for this argument. + /// `new HashSet` for this argument. /// - parameter calledRuleStack: A set used for preventing left recursion in the /// ATN from causing a stack overflow. Outside code should pass - /// {@code new BitSet()} for this argument. - /// - parameter seeThruPreds: {@code true} to true semantic predicates as - /// implicitly {@code true} and "see through them", otherwise {@code false} - /// to treat semantic predicates as opaque and add {@link #HIT_PRED} to the + /// `new BitSet()` for this argument. + /// - parameter seeThruPreds: `true` to true semantic predicates as + /// implicitly `true` and "see through them", otherwise `false` + /// to treat semantic predicates as opaque and add _#HIT_PRED_ to the /// result if one is encountered. - /// - parameter addEOF: Add {@link org.antlr.v4.runtime.Token#EOF} to the result if the end of the - /// outermost context is reached. This parameter has no effect if {@code ctx} - /// is {@code null}. + /// - parameter addEOF: Add _org.antlr.v4.runtime.Token#EOF_ to the result if the end of the + /// outermost context is reached. This parameter has no effect if `ctx` + /// is `null`. + /// internal func _LOOK(_ s: ATNState, _ stopState: ATNState?, _ ctx: PredictionContext?, _ look: IntervalSet, _ lookBusy: inout Set, _ calledRuleStack: BitSet, - _ seeThruPreds: Bool, _ addEOF: Bool) throws { + _ seeThruPreds: Bool, + _ addEOF: Bool) { // print ("_LOOK(\(s.stateNumber), ctx=\(ctx)"); - //TODO var c : ATNConfig = ATNConfig(s, 0, ctx); - if s.description == "273" { - var s = 0 - } - var c: ATNConfig = ATNConfig(s, 0, ctx) + let c = ATNConfig(s, 0, ctx) if lookBusy.contains(c) { return } else { @@ -138,12 +146,12 @@ public class LL1Analyzer { if s == stopState { guard let ctx = ctx else { - try look.add(CommonToken.EPSILON) + try! look.add(CommonToken.EPSILON) return } if ctx.isEmpty() && addEOF { - try look.add(CommonToken.EOF) + try! look.add(CommonToken.EOF) return } @@ -151,75 +159,64 @@ public class LL1Analyzer { if s is RuleStopState { guard let ctx = ctx else { - try look.add(CommonToken.EPSILON) + try! look.add(CommonToken.EPSILON) return } if ctx.isEmpty() && addEOF { - try look.add(CommonToken.EOF) + try! look.add(CommonToken.EOF) return } - if ctx != PredictionContext.EMPTY { // run thru all possible stack tops in ctx let length = ctx.size() for i in 0.. LexerActionExecutor? { return lexerActionExecutor } @@ -71,15 +77,14 @@ public class LexerATNConfig: ATNConfig { }*/ public var hashValue: Int { - var hashCode: Int = MurmurHash.initialize(7) + var hashCode = MurmurHash.initialize(7) hashCode = MurmurHash.update(hashCode, state.stateNumber) hashCode = MurmurHash.update(hashCode, alt) hashCode = MurmurHash.update(hashCode, context) hashCode = MurmurHash.update(hashCode, semanticContext) hashCode = MurmurHash.update(hashCode, passedThroughNonGreedyDecision ? 1 : 0) hashCode = MurmurHash.update(hashCode, lexerActionExecutor) - hashCode = MurmurHash.finish(hashCode, 6) - return hashCode + return MurmurHash.finish(hashCode, 6) } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift index 4d8694a96..5f7f619bf 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift @@ -1,33 +1,39 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// "dup" of ParserInterpreter +/// open class LexerATNSimulator: ATNSimulator { - public static let debug: Bool = false - public let dfa_debug: Bool = false + public static let debug = false + public let dfa_debug = false - public static let MIN_DFA_EDGE: Int = 0 - public static let MAX_DFA_EDGE: Int = 127 + public static let MIN_DFA_EDGE = 0 + public static let MAX_DFA_EDGE = 127 // forces unicode to stay in ATN + /// /// When we hit an accept state in either the DFA or the ATN, we /// have to notify the character stream to start buffering characters - /// via {@link org.antlr.v4.runtime.IntStream#mark} and record the current state. The current sim state + /// via _org.antlr.v4.runtime.IntStream#mark_ and record the current state. The current sim state /// includes the current index into the input, the current line, /// and current character position in that line. Note that the Lexer is /// tracking the starting line and characterization of the token. These /// variables track the "state" of the simulator when it hits an accept state. - /// - ///

      We track these variables separately for the DFA and ATN simulation + /// + /// We track these variables separately for the DFA and ATN simulation /// because the DFA simulation often has to fail over to the ATN /// simulation. If the ATN simulation fails, we need the DFA to fall /// back to its previously accepted state, if any. If the ATN succeeds, /// then the ATN does the accept and the DFA simulator that invoked it - /// can simply return the predicted token type.

      + /// can simply return the predicted token type. + /// internal class SimState { internal var index: Int = -1 @@ -44,35 +50,47 @@ open class LexerATNSimulator: ATNSimulator { } - internal let recog: Lexer? + internal weak var recog: Lexer? + /// /// The current token's starting index into the character stream. /// Shared across DFA to ATN simulation in case the ATN fails and the /// DFA did not have a previous accept state. In this case, we use the /// ATN-generated exception object. - internal var startIndex: Int = -1 + /// + internal var startIndex = -1 + /// /// line number 1..n within the input - public var line: Int = 1 + /// + public var line = 1 + /// /// The index of the character relative to the beginning of the line 0..n-1 - public var charPositionInLine: Int = 0 + /// + public var charPositionInLine = 0 public final var decisionToDFA: [DFA] - internal var mode: Int = Lexer.DEFAULT_MODE + internal var mode = Lexer.DEFAULT_MODE + /// /// mutex for DFAState change + /// private var dfaStateMutex = Mutex() + /// /// mutex for changes to all DFAStates map + /// private var dfaStatesMutex = Mutex() + /// /// Used during DFA/ATN exec to record the most recent accept configuration info + /// - internal final var prevAccept: SimState = SimState() + internal final var prevAccept = SimState() - public static var match_calls: Int = 0 + public static var match_calls = 0 public convenience init(_ atn: ATN, _ decisionToDFA: [DFA], _ sharedContextCache: PredictionContextCache) { @@ -98,11 +116,11 @@ open class LexerATNSimulator: ATNSimulator { open func match(_ input: CharStream, _ mode: Int) throws -> Int { LexerATNSimulator.match_calls += 1 self.mode = mode - var mark: Int = input.mark() + var mark = input.mark() do { self.startIndex = input.index() self.prevAccept.reset() - var dfa: DFA = decisionToDFA[mode] + var dfa = decisionToDFA[mode] defer { try! input.release(mark) } @@ -128,31 +146,30 @@ open class LexerATNSimulator: ATNSimulator { override open func clearDFA() { - for d in 0.. Int { - let startState: ATNState = atn.modeToStartState[mode] + let startState = atn.modeToStartState[mode] if LexerATNSimulator.debug { print("matchATN mode \(mode) start: \(startState)\n") } - let old_mode: Int = mode + let old_mode = mode - let s0_closure: ATNConfigSet = try computeStartState(input, startState) - let suppressEdge: Bool = s0_closure.hasSemanticContext + let s0_closure = try computeStartState(input, startState) + let suppressEdge = s0_closure.hasSemanticContext s0_closure.hasSemanticContext = false - let next: DFAState = addDFAState(s0_closure) + let next = addDFAState(s0_closure) if !suppressEdge { decisionToDFA[mode].s0 = next } - let predict: Int = try execATN(input, next) + let predict = try execATN(input, next) if LexerATNSimulator.debug { print("DFA after matchATN: \(decisionToDFA[old_mode].toLexerString())") @@ -172,14 +189,13 @@ open class LexerATNSimulator: ATNSimulator { captureSimState(prevAccept, input, ds0) } - var t: Int = try input.LA(1) + var t = try input.LA(1) - var s: DFAState = ds0 // s is current/from DFA state + var s = ds0 // s is current/from DFA state while true { // while more work if LexerATNSimulator.debug { - print("execATN loop starting closure: \(s.configs)\n") } @@ -233,22 +249,24 @@ open class LexerATNSimulator: ATNSimulator { return try failOrAccept(prevAccept, input, s.configs, t) } + /// /// Get an existing target state for an edge in the DFA. If the target state /// for the edge has not yet been computed or is otherwise not available, - /// this method returns {@code null}. - /// + /// this method returns `null`. + /// /// - parameter s: The current DFA state /// - parameter t: The next input symbol /// - returns: The existing target DFA state for the given input symbol - /// {@code t}, or {@code null} if the target state for this edge is not + /// `t`, or `null` if the target state for this edge is not /// already cached + /// internal func getExistingTargetState(_ s: DFAState, _ t: Int) -> DFAState? { if s.edges == nil || t < LexerATNSimulator.MIN_DFA_EDGE || t > LexerATNSimulator.MAX_DFA_EDGE { return nil } - let target: DFAState? = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE] + let target = s.edges[t - LexerATNSimulator.MIN_DFA_EDGE] if LexerATNSimulator.debug && target != nil { print("reuse state \(s.stateNumber) edge to \(target!.stateNumber)") } @@ -256,19 +274,21 @@ open class LexerATNSimulator: ATNSimulator { return target } + /// /// Compute a target state for an edge in the DFA, and attempt to add the /// computed state and corresponding edge to the DFA. - /// + /// /// - parameter input: The input stream /// - parameter s: The current DFA state /// - parameter t: The next input symbol - /// + /// /// - returns: The computed target DFA state for the given input symbol - /// {@code t}. If {@code t} does not lead to a valid DFA state, this method - /// returns {@link #ERROR}. + /// `t`. If `t` does not lead to a valid DFA state, this method + /// returns _#ERROR_. + /// internal func computeTargetState(_ input: CharStream, _ s: DFAState, _ t: Int) throws -> DFAState { - let reach: ATNConfigSet = OrderedATNConfigSet() + let reach = OrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions @@ -294,7 +314,7 @@ open class LexerATNSimulator: ATNSimulator { internal func failOrAccept(_ prevAccept: SimState, _ input: CharStream, _ reach: ATNConfigSet, _ t: Int) throws -> Int { if let dfaState = prevAccept.dfaState { - let lexerActionExecutor: LexerActionExecutor? = dfaState.lexerActionExecutor + let lexerActionExecutor = dfaState.lexerActionExecutor try accept(input, lexerActionExecutor, startIndex, prevAccept.index, prevAccept.line, prevAccept.charPos) return dfaState.prediction @@ -303,23 +323,24 @@ open class LexerATNSimulator: ATNSimulator { if t == BufferedTokenStream.EOF && input.index() == startIndex { return CommonToken.EOF } - throw ANTLRException.recognition(e: LexerNoViableAltException(recog, input, startIndex, reach)) - + throw ANTLRException.recognition(e: LexerNoViableAltException(recog, input, startIndex, reach)) } } + /// /// Given a starting configuration set, figure out all ATN configurations - /// we can reach upon input {@code t}. Parameter {@code reach} is a return + /// we can reach upon input `t`. Parameter `reach` is a return /// parameter. + /// internal func getReachableConfigSet(_ input: CharStream, _ closureConfig: ATNConfigSet, _ reach: ATNConfigSet, _ t: Int) throws { // this is used to skip processing for configs which have a lower priority // than a config that already reached an accept state for the same rule - var skipAlt: Int = ATN.INVALID_ALT_NUMBER - for c: ATNConfig in closureConfig.configs { + var skipAlt = ATN.INVALID_ALT_NUMBER + for c in closureConfig.configs { guard let c = c as? LexerATNConfig else { continue } - let currentAltReachedAcceptState: Bool = c.alt == skipAlt + let currentAltReachedAcceptState = (c.alt == skipAlt) if currentAltReachedAcceptState && c.hasPassedThroughNonGreedyDecision() { continue } @@ -329,17 +350,17 @@ open class LexerATNSimulator: ATNSimulator { } - let n: Int = c.state.getNumberOfTransitions() + let n = c.state.getNumberOfTransitions() for ti in 0.. ATNConfigSet { - let initialContext: PredictionContext = PredictionContext.EMPTY - let configs: ATNConfigSet = OrderedATNConfigSet() + let initialContext = PredictionContext.EMPTY + let configs = OrderedATNConfigSet() let length = p.getNumberOfTransitions() for i in 0.. Bool { var currentAltReachedAcceptState = currentAltReachedAcceptState @@ -415,10 +437,8 @@ open class LexerATNSimulator: ATNSimulator { if LexerATNSimulator.debug { if recog != nil { print("closure at \(recog!.getRuleNames()[config.state.ruleIndex!]) rule stop \(config)\n") - } else { print("closure at rule stop \(config)\n") - } } @@ -436,9 +456,9 @@ open class LexerATNSimulator: ATNSimulator { let length = configContext.size() for i in 0..If {@code speculative} is {@code true}, this method was called before - /// {@link #consume} for the matched character. This method should call - /// {@link #consume} before evaluating the predicate to ensure position - /// sensitive values, including {@link org.antlr.v4.runtime.Lexer#getText}, {@link org.antlr.v4.runtime.Lexer#getLine}, - /// and {@link org.antlr.v4.runtime.Lexer#getCharPositionInLine}, properly reflect the current - /// lexer state. This method should restore {@code input} and the simulator + /// + /// If `speculative` is `true`, this method was called before + /// _#consume_ for the matched character. This method should call + /// _#consume_ before evaluating the predicate to ensure position + /// sensitive values, including _org.antlr.v4.runtime.Lexer#getText_, _org.antlr.v4.runtime.Lexer#getLine_, + /// and _org.antlr.v4.runtime.Lexer#getCharPositionInLine_, properly reflect the current + /// lexer state. This method should restore `input` and the simulator /// to the original state before returning (i.e. undo the actions made by the - /// call to {@link #consume}.

      - /// + /// call to _#consume_. + /// /// - parameter input: The input stream. /// - parameter ruleIndex: The rule containing the predicate. /// - parameter predIndex: The index of the predicate within the rule. - /// - parameter speculative: {@code true} if the current index in {@code input} is + /// - parameter speculative: `true` if the current index in `input` is /// one character before the predicate's location. - /// - /// - returns: {@code true} if the specified predicate evaluates to - /// {@code true}. + /// + /// - returns: `true` if the specified predicate evaluates to + /// `true`. + /// final func evaluatePredicate(_ input: CharStream, _ ruleIndex: Int, _ predIndex: Int, _ speculative: Bool) throws -> Bool { // assume true if no recognizer was provided guard let recog = recog else { @@ -589,10 +611,10 @@ open class LexerATNSimulator: ATNSimulator { return try recog.sempred(nil, ruleIndex, predIndex) } - var savedCharPositionInLine: Int = charPositionInLine - var savedLine: Int = line - var index: Int = input.index() - var marker: Int = input.mark() + var savedCharPositionInLine = charPositionInLine + var savedLine = line + var index = input.index() + var marker = input.mark() do { try consume(input) defer @@ -621,21 +643,21 @@ open class LexerATNSimulator: ATNSimulator { final func addDFAEdge(_ from: DFAState, _ t: Int, _ q: ATNConfigSet) -> DFAState { + /// /// leading to this call, ATNConfigSet.hasSemanticContext is used as a /// marker indicating dynamic predicate evaluation makes this edge /// dependent on the specific input sequence, so the static edge in the /// DFA should be omitted. The target DFAState is still created since /// execATN has the ability to resynchronize with the DFA state cache /// following the predicate evaluation step. - /// + /// /// TJP notes: next time through the DFA, we see a pred again and eval. /// If that gets us to a previously created (but dangling) DFA /// state, we can continue in pure DFA mode from there. - let suppressEdge: Bool = q.hasSemanticContext + /// + let suppressEdge = q.hasSemanticContext q.hasSemanticContext = false - - - let to: DFAState = addDFAState(q) + let to = addDFAState(q) if suppressEdge { return to @@ -658,25 +680,28 @@ open class LexerATNSimulator: ATNSimulator { dfaStateMutex.synchronized { if p.edges == nil { // make room for tokens 1..n and -1 masquerading as index 0 - //TODO ARRAY COUNT - p.edges = [DFAState?](repeating: nil, count: LexerATNSimulator.MAX_DFA_EDGE - LexerATNSimulator.MIN_DFA_EDGE + 1) //new DFAState[MAX_DFA_EDGE-MIN_DFA_EDGE+1]; + p.edges = [DFAState?](repeating: nil, count: LexerATNSimulator.MAX_DFA_EDGE - LexerATNSimulator.MIN_DFA_EDGE + 1) } p.edges[t - LexerATNSimulator.MIN_DFA_EDGE] = q // connect } } + /// /// Add a new DFA state if there isn't one with this set of /// configurations already. This method also detects the first /// configuration containing an ATN rule stop state. Later, when /// traversing the DFA, we will know which rule to accept. + /// final func addDFAState(_ configs: ATNConfigSet) -> DFAState { + /// /// the lexer evaluates predicates on-the-fly; by this point configs /// should not contain any configurations with unevaluated predicates. + /// assert(!configs.hasSemanticContext, "Expected: !configs.hasSemanticContext") - let proposed: DFAState = DFAState(configs) - let firstConfigWithRuleStopState: ATNConfig? = configs.firstConfigWithRuleStopState + let proposed = DFAState(configs) + let firstConfigWithRuleStopState = configs.firstConfigWithRuleStopState if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true @@ -684,14 +709,14 @@ open class LexerATNSimulator: ATNSimulator { proposed.prediction = atn.ruleToTokenType[firstConfigWithRuleStopState!.state.ruleIndex!] } - let dfa: DFA = decisionToDFA[mode] + let dfa = decisionToDFA[mode] return dfaStatesMutex.synchronized { if let existing = dfa.states[proposed] { return existing! } - let newState: DFAState = proposed + let newState = proposed newState.stateNumber = dfa.states.count configs.setReadonly(true) newState.configs = configs @@ -705,11 +730,13 @@ open class LexerATNSimulator: ATNSimulator { return decisionToDFA[mode] } + /// /// Get the text matched so far for the current token. + /// public func getText(_ input: CharStream) -> String { // index is first lookahead char, don't include. - return input.getText(Interval.of(startIndex, input.index() - 1)) + return try! input.getText(Interval.of(startIndex, input.index() - 1)) } public func getLine() -> Int { @@ -729,7 +756,7 @@ open class LexerATNSimulator: ATNSimulator { } public func consume(_ input: CharStream) throws { - let curChar: Int = try input.LA(1) + let curChar = try input.LA(1) if String(Character(integerLiteral: curChar)) == "\n" { line += 1 charPositionInLine = 0 diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift index 6fb85d9bc..99d604da5 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerAction.swift @@ -1,56 +1,63 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// Represents a single action which can be executed following the successful /// match of a lexer rule. Lexer actions are used for both embedded action syntax /// and ANTLR 4's new lexer command syntax. -/// +/// /// - Sam Harwell /// - 4.2 +/// public class LexerAction: Hashable { + /// /// Gets the serialization type of the lexer action. - /// + /// /// - returns: The serialization type of the lexer action. + /// public func getActionType() -> LexerActionType { - RuntimeException(" must overriden ") - fatalError() + fatalError(#function + " must be overridden") } + /// /// Gets whether the lexer action is position-dependent. Position-dependent - /// actions may have different semantics depending on the {@link org.antlr.v4.runtime.CharStream} + /// actions may have different semantics depending on the _org.antlr.v4.runtime.CharStream_ /// index at the time the action is executed. - /// - ///

      Many lexer commands, including {@code type}, {@code skip}, and - /// {@code more}, do not check the input index during their execution. + /// + /// Many lexer commands, including `type`, `skip`, and + /// `more`, do not check the input index during their execution. /// Actions like this are position-independent, and may be stored more - /// efficiently as part of the {@link org.antlr.v4.runtime.atn.LexerATNConfig#lexerActionExecutor}.

      - /// - /// - returns: {@code true} if the lexer action semantics can be affected by the - /// position of the input {@link org.antlr.v4.runtime.CharStream} at the time it is executed; - /// otherwise, {@code false}. + /// efficiently as part of the _org.antlr.v4.runtime.atn.LexerATNConfig#lexerActionExecutor_. + /// + /// - returns: `true` if the lexer action semantics can be affected by the + /// position of the input _org.antlr.v4.runtime.CharStream_ at the time it is executed; + /// otherwise, `false`. + /// public func isPositionDependent() -> Bool { - RuntimeException(" must overriden ") - fatalError() + fatalError(#function + " must be overridden") } - /// Execute the lexer action in the context of the specified {@link org.antlr.v4.runtime.Lexer}. - /// - ///

      For position-dependent actions, the input stream must already be - /// positioned correctly prior to calling this method.

      - /// + /// + /// Execute the lexer action in the context of the specified _org.antlr.v4.runtime.Lexer_. + /// + /// For position-dependent actions, the input stream must already be + /// positioned correctly prior to calling this method. + /// /// - parameter lexer: The lexer instance. + /// public func execute(_ lexer: Lexer) throws { - RuntimeException(" must overriden ") + fatalError(#function + " must be overridden") } public var hashValue: Int { - RuntimeException(" must overriden ") - fatalError() + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift b/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift index 0870916d7..e33e92e9b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerActionExecutor.swift @@ -1,34 +1,42 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// Represents an executor for a sequence of lexer actions which traversed during /// the matching operation of a lexer rule (token). -/// -///

      The executor tracks position information for position-dependent lexer actions +/// +/// The executor tracks position information for position-dependent lexer actions /// efficiently, ensuring that actions appearing only at the end of the rule do -/// not cause bloating of the {@link org.antlr.v4.runtime.dfa.DFA} created for the lexer.

      -/// +/// not cause bloating of the _org.antlr.v4.runtime.dfa.DFA_ created for the lexer. +/// /// - Sam Harwell /// - 4.2 +/// public class LexerActionExecutor: Hashable { fileprivate final var lexerActions: [LexerAction] - /// Caches the result of {@link #hashCode} since the hash code is an element - /// of the performance-critical {@link org.antlr.v4.runtime.atn.LexerATNConfig#hashCode} operation. + /// + /// Caches the result of _#hashCode_ since the hash code is an element + /// of the performance-critical _org.antlr.v4.runtime.atn.LexerATNConfig#hashCode_ operation. + /// fileprivate final var hashCode: Int - /// Constructs an executor for a sequence of {@link org.antlr.v4.runtime.atn.LexerAction} actions. + /// + /// Constructs an executor for a sequence of _org.antlr.v4.runtime.atn.LexerAction_ actions. /// - parameter lexerActions: The lexer actions to execute. + /// public init(_ lexerActions: [LexerAction]) { self.lexerActions = lexerActions - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() for lexerAction: LexerAction in lexerActions { hash = MurmurHash.update(hash, lexerAction) } @@ -36,19 +44,21 @@ public class LexerActionExecutor: Hashable { self.hashCode = MurmurHash.finish(hash, lexerActions.count) } - /// Creates a {@link org.antlr.v4.runtime.atn.LexerActionExecutor} which executes the actions for - /// the input {@code lexerActionExecutor} followed by a specified - /// {@code lexerAction}. - /// + /// + /// Creates a _org.antlr.v4.runtime.atn.LexerActionExecutor_ which executes the actions for + /// the input `lexerActionExecutor` followed by a specified + /// `lexerAction`. + /// /// - parameter lexerActionExecutor: The executor for actions already traversed by /// the lexer while matching a token within a particular - /// {@link org.antlr.v4.runtime.atn.LexerATNConfig}. If this is {@code null}, the method behaves as + /// _org.antlr.v4.runtime.atn.LexerATNConfig_. If this is `null`, the method behaves as /// though it were an empty executor. /// - parameter lexerAction: The lexer action to execute after the actions - /// specified in {@code lexerActionExecutor}. - /// - /// - returns: A {@link org.antlr.v4.runtime.atn.LexerActionExecutor} for executing the combine actions - /// of {@code lexerActionExecutor} and {@code lexerAction}. + /// specified in `lexerActionExecutor`. + /// + /// - returns: A _org.antlr.v4.runtime.atn.LexerActionExecutor_ for executing the combine actions + /// of `lexerActionExecutor` and `lexerAction`. + /// public static func append(_ lexerActionExecutor: LexerActionExecutor?, _ lexerAction: LexerAction) -> LexerActionExecutor { if lexerActionExecutor == nil { return LexerActionExecutor([lexerAction]) @@ -61,33 +71,35 @@ public class LexerActionExecutor: Hashable { return LexerActionExecutor(lexerActions) } - /// Creates a {@link org.antlr.v4.runtime.atn.LexerActionExecutor} which encodes the current offset + /// + /// Creates a _org.antlr.v4.runtime.atn.LexerActionExecutor_ which encodes the current offset /// for position-dependent lexer actions. - /// - ///

      Normally, when the executor encounters lexer actions where - /// {@link org.antlr.v4.runtime.atn.LexerAction#isPositionDependent} returns {@code true}, it calls - /// {@link org.antlr.v4.runtime.IntStream#seek} on the input {@link org.antlr.v4.runtime.CharStream} to set the input - /// position to the end of the current token. This behavior provides + /// + /// Normally, when the executor encounters lexer actions where + /// _org.antlr.v4.runtime.atn.LexerAction#isPositionDependent_ returns `true`, it calls + /// _org.antlr.v4.runtime.IntStream#seek_ on the input _org.antlr.v4.runtime.CharStream_ to set the input + /// position to the __end__ of the current token. This behavior provides /// for efficient DFA representation of lexer actions which appear at the end /// of a lexer rule, even when the lexer rule matches a variable number of - /// characters.

      - /// - ///

      Prior to traversing a match transition in the ATN, the current offset + /// characters. + /// + /// Prior to traversing a match transition in the ATN, the current offset /// from the token start index is assigned to all position-dependent lexer /// actions which have not already been assigned a fixed offset. By storing /// the offsets relative to the token start index, the DFA representation of /// lexer actions which appear in the middle of tokens remains efficient due /// to sharing among tokens of the same length, regardless of their absolute - /// position in the input stream.

      - /// - ///

      If the current executor already has offsets assigned to all - /// position-dependent lexer actions, the method returns {@code this}.

      - /// + /// position in the input stream. + /// + /// If the current executor already has offsets assigned to all + /// position-dependent lexer actions, the method returns `this`. + /// /// - parameter offset: The current offset to assign to all position-dependent /// lexer actions which do not already have offsets assigned. - /// - /// - returns: A {@link org.antlr.v4.runtime.atn.LexerActionExecutor} which stores input stream offsets + /// + /// - returns: A _org.antlr.v4.runtime.atn.LexerActionExecutor_ which stores input stream offsets /// for all position-dependent lexer actions. + /// public func fixOffsetBeforeMatch(_ offset: Int) -> LexerActionExecutor { var updatedLexerActions: [LexerAction]? = nil let length = lexerActions.count @@ -108,29 +120,33 @@ public class LexerActionExecutor: Hashable { return LexerActionExecutor(updatedLexerActions!) } + /// /// Gets the lexer actions to be executed by this executor. /// - returns: The lexer actions to be executed by this executor. + /// public func getLexerActions() -> [LexerAction] { return lexerActions } + /// /// Execute the actions encapsulated by this executor within the context of a - /// particular {@link org.antlr.v4.runtime.Lexer}. - /// - ///

      This method calls {@link org.antlr.v4.runtime.IntStream#seek} to set the position of the - /// {@code input} {@link org.antlr.v4.runtime.CharStream} prior to calling - /// {@link org.antlr.v4.runtime.atn.LexerAction#execute} on a position-dependent action. Before the + /// particular _org.antlr.v4.runtime.Lexer_. + /// + /// This method calls _org.antlr.v4.runtime.IntStream#seek_ to set the position of the + /// `input` _org.antlr.v4.runtime.CharStream_ prior to calling + /// _org.antlr.v4.runtime.atn.LexerAction#execute_ on a position-dependent action. Before the /// method returns, the input position will be restored to the same position - /// it was in when the method was invoked.

      - /// + /// it was in when the method was invoked. + /// /// - parameter lexer: The lexer instance. /// - parameter input: The input stream which is the source for the current token. - /// When this method is called, the current {@link org.antlr.v4.runtime.IntStream#index} for - /// {@code input} should be the start of the following token, i.e. 1 + /// When this method is called, the current _org.antlr.v4.runtime.IntStream#index_ for + /// `input` should be the start of the following token, i.e. 1 /// character past the end of the current token. /// - parameter startIndex: The token start index. This value may be passed to - /// {@link org.antlr.v4.runtime.IntStream#seek} to set the {@code input} position to the beginning + /// _org.antlr.v4.runtime.IntStream#seek_ to set the `input` position to the beginning /// of the token. + /// public func execute(_ lexer: Lexer, _ input: CharStream, _ startIndex: Int) throws { var requiresSeek: Bool = false var stopIndex: Int = input.index() diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerActionType.swift b/runtime/Swift/Sources/Antlr4/atn/LexerActionType.swift index e440809cc..8b824d8c1 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerActionType.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerActionType.swift @@ -1,45 +1,49 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Represents the serialization type of a {@link org.antlr.v4.runtime.atn.LexerAction}. -/// +/// +/// Represents the serialization type of a _org.antlr.v4.runtime.atn.LexerAction_. +/// /// - Sam Harwell /// - 4.2 +/// public enum LexerActionType: Int { - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerChannelAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerChannelAction_ action. + /// case channel = 0 - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerCustomAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerCustomAction_ action. + /// case custom - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerModeAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerModeAction_ action. + /// case mode - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerMoreAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerMoreAction_ action. + /// case more - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerPopModeAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerPopModeAction_ action. + /// case popMode - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerPushModeAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerPushModeAction_ action. + /// case pushMode - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerSkipAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerSkipAction_ action. + /// case skip - /** - * The type of a {@link org.antlr.v4.runtime.atn.LexerTypeAction} action. - */ + /// + /// The type of a _org.antlr.v4.runtime.atn.LexerTypeAction_ action. + /// case type } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift index 150365632..4d099f28c 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerChannelAction.swift @@ -1,48 +1,62 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code channel} lexer action by calling -/// {@link org.antlr.v4.runtime.Lexer#setChannel} with the assigned channel. -/// +/// +/// Implements the `channel` lexer action by calling +/// _org.antlr.v4.runtime.Lexer#setChannel_ with the assigned channel. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerChannelAction: LexerAction, CustomStringConvertible { fileprivate let channel: Int - /// Constructs a new {@code channel} action with the specified channel value. - /// - parameter channel: The channel value to pass to {@link org.antlr.v4.runtime.Lexer#setChannel}. + /// + /// Constructs a new `channel` action with the specified channel value. + /// - parameter channel: The channel value to pass to _org.antlr.v4.runtime.Lexer#setChannel_. + /// public init(_ channel: Int) { self.channel = channel } - /// Gets the channel to use for the {@link org.antlr.v4.runtime.Token} created by the lexer. - /// - /// - returns: The channel to use for the {@link org.antlr.v4.runtime.Token} created by the lexer. + /// + /// Gets the channel to use for the _org.antlr.v4.runtime.Token_ created by the lexer. + /// + /// - returns: The channel to use for the _org.antlr.v4.runtime.Token_ created by the lexer. + /// public func getChannel() -> Int { return channel } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#CHANNEL}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#CHANNEL_. + /// public override func getActionType() -> LexerActionType { return LexerActionType.channel } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// public override func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#setChannel} with the - /// value provided by {@link #getChannel}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#setChannel_ with the + /// value provided by _#getChannel_. + /// public override func execute(_ lexer: Lexer) { lexer.setChannel(channel) @@ -51,7 +65,7 @@ public final class LexerChannelAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) hash = MurmurHash.update(hash, channel) return MurmurHash.finish(hash, 2) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift index b25d80993..3fe954236 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerCustomAction.swift @@ -1,76 +1,92 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Executes a custom lexer action by calling {@link org.antlr.v4.runtime.Recognizer#action} with the +/// +/// Executes a custom lexer action by calling _org.antlr.v4.runtime.Recognizer#action_ with the /// rule and action indexes assigned to the custom action. The implementation of /// a custom action is added to the generated code for the lexer in an override -/// of {@link org.antlr.v4.runtime.Recognizer#action} when the grammar is compiled. -/// -///

      This class may represent embedded actions created with the {...} +/// of _org.antlr.v4.runtime.Recognizer#action_ when the grammar is compiled. +/// +/// This class may represent embedded actions created with the {...} /// syntax in ANTLR 4, as well as actions created for lexer commands where the -/// command argument could not be evaluated when the grammar was compiled.

      -/// +/// command argument could not be evaluated when the grammar was compiled. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerCustomAction: LexerAction { fileprivate let ruleIndex: Int fileprivate let actionIndex: Int + /// /// Constructs a custom lexer action with the specified rule and action /// indexes. - /// + /// /// - parameter ruleIndex: The rule index to use for calls to - /// {@link org.antlr.v4.runtime.Recognizer#action}. + /// _org.antlr.v4.runtime.Recognizer#action_. /// - parameter actionIndex: The action index to use for calls to - /// {@link org.antlr.v4.runtime.Recognizer#action}. + /// _org.antlr.v4.runtime.Recognizer#action_. + /// public init(_ ruleIndex: Int, _ actionIndex: Int) { self.ruleIndex = ruleIndex self.actionIndex = actionIndex } - /// Gets the rule index to use for calls to {@link org.antlr.v4.runtime.Recognizer#action}. - /// + /// + /// Gets the rule index to use for calls to _org.antlr.v4.runtime.Recognizer#action_. + /// /// - returns: The rule index for the custom action. + /// public func getRuleIndex() -> Int { return ruleIndex } - /// Gets the action index to use for calls to {@link org.antlr.v4.runtime.Recognizer#action}. - /// + /// + /// Gets the action index to use for calls to _org.antlr.v4.runtime.Recognizer#action_. + /// /// - returns: The action index for the custom action. + /// public func getActionIndex() -> Int { return actionIndex } - /// {@inheritDoc} - /// - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#CUSTOM}. + /// + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#CUSTOM_. + /// public override func getActionType() -> LexerActionType { return LexerActionType.custom } + /// /// Gets whether the lexer action is position-dependent. Position-dependent - /// actions may have different semantics depending on the {@link org.antlr.v4.runtime.CharStream} + /// actions may have different semantics depending on the _org.antlr.v4.runtime.CharStream_ /// index at the time the action is executed. - /// - ///

      Custom actions are position-dependent since they may represent a + /// + /// Custom actions are position-dependent since they may represent a /// user-defined embedded action which makes calls to methods like - /// {@link org.antlr.v4.runtime.Lexer#getText}.

      - /// - /// - returns: This method returns {@code true}. + /// _org.antlr.v4.runtime.Lexer#getText_. + /// + /// - returns: This method returns `true`. + /// override public func isPositionDependent() -> Bool { return true } - /// {@inheritDoc} - /// - ///

      Custom actions are implemented by calling {@link org.antlr.v4.runtime.Lexer#action} with the - /// appropriate rule and action indexes.

      + /// + /// + /// + /// Custom actions are implemented by calling _org.antlr.v4.runtime.Lexer#action_ with the + /// appropriate rule and action indexes. + /// override public func execute(_ lexer: Lexer) throws { try lexer.action(nil, ruleIndex, actionIndex) @@ -78,7 +94,7 @@ public final class LexerCustomAction: LexerAction { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) hash = MurmurHash.update(hash, ruleIndex) hash = MurmurHash.update(hash, actionIndex) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift index b520e290e..501b2e637 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerIndexedCustomAction.swift @@ -1,78 +1,94 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This implementation of {@link org.antlr.v4.runtime.atn.LexerAction} is used for tracking input offsets -/// for position-dependent actions within a {@link org.antlr.v4.runtime.atn.LexerActionExecutor}. -/// -///

      This action is not serialized as part of the ATN, and is only required for +/// +/// This implementation of _org.antlr.v4.runtime.atn.LexerAction_ is used for tracking input offsets +/// for position-dependent actions within a _org.antlr.v4.runtime.atn.LexerActionExecutor_. +/// +/// This action is not serialized as part of the ATN, and is only required for /// position-dependent lexer actions which appear at a location other than the /// end of a rule. For more information about DFA optimizations employed for -/// lexer actions, see {@link org.antlr.v4.runtime.atn.LexerActionExecutor#append} and -/// {@link org.antlr.v4.runtime.atn.LexerActionExecutor#fixOffsetBeforeMatch}.

      -/// +/// lexer actions, see _org.antlr.v4.runtime.atn.LexerActionExecutor#append_ and +/// _org.antlr.v4.runtime.atn.LexerActionExecutor#fixOffsetBeforeMatch_. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerIndexedCustomAction: LexerAction { fileprivate let offset: Int fileprivate let action: LexerAction + /// /// Constructs a new indexed custom action by associating a character offset - /// with a {@link org.antlr.v4.runtime.atn.LexerAction}. - /// - ///

      Note: This class is only required for lexer actions for which - /// {@link org.antlr.v4.runtime.atn.LexerAction#isPositionDependent} returns {@code true}.

      - /// - /// - parameter offset: The offset into the input {@link org.antlr.v4.runtime.CharStream}, relative to + /// with a _org.antlr.v4.runtime.atn.LexerAction_. + /// + /// Note: This class is only required for lexer actions for which + /// _org.antlr.v4.runtime.atn.LexerAction#isPositionDependent_ returns `true`. + /// + /// - parameter offset: The offset into the input _org.antlr.v4.runtime.CharStream_, relative to /// the token start index, at which the specified lexer action should be /// executed. /// - parameter action: The lexer action to execute at a particular offset in the - /// input {@link org.antlr.v4.runtime.CharStream}. + /// input _org.antlr.v4.runtime.CharStream_. + /// public init(_ offset: Int, _ action: LexerAction) { self.offset = offset self.action = action } - /// Gets the location in the input {@link org.antlr.v4.runtime.CharStream} at which the lexer + /// + /// Gets the location in the input _org.antlr.v4.runtime.CharStream_ at which the lexer /// action should be executed. The value is interpreted as an offset relative /// to the token start index. - /// - /// - returns: The location in the input {@link org.antlr.v4.runtime.CharStream} at which the lexer + /// + /// - returns: The location in the input _org.antlr.v4.runtime.CharStream_ at which the lexer /// action should be executed. + /// public func getOffset() -> Int { return offset } + /// /// Gets the lexer action to execute. - /// - /// - returns: A {@link org.antlr.v4.runtime.atn.LexerAction} object which executes the lexer action. + /// + /// - returns: A _org.antlr.v4.runtime.atn.LexerAction_ object which executes the lexer action. + /// public func getAction() -> LexerAction { return action } - /// {@inheritDoc} - /// - /// - returns: This method returns the result of calling {@link #getActionType} - /// on the {@link org.antlr.v4.runtime.atn.LexerAction} returned by {@link #getAction}. + /// + /// + /// + /// - returns: This method returns the result of calling _#getActionType_ + /// on the _org.antlr.v4.runtime.atn.LexerAction_ returned by _#getAction_. + /// public override func getActionType() -> LexerActionType { return action.getActionType() } - /// {@inheritDoc} - /// - returns: This method returns {@code true}. + /// + /// + /// - returns: This method returns `true`. + /// public override func isPositionDependent() -> Bool { return true } - /// {@inheritDoc} - /// - ///

      This method calls {@link #execute} on the result of {@link #getAction} - /// using the provided {@code lexer}.

      + /// + /// + /// + /// This method calls _#execute_ on the result of _#getAction_ + /// using the provided `lexer`. + /// public override func execute(_ lexer: Lexer) throws { // assume the input stream position was properly set by the calling code @@ -81,7 +97,7 @@ public final class LexerIndexedCustomAction: LexerAction { public override var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, offset) hash = MurmurHash.update(hash, action) return MurmurHash.finish(hash, 2) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift index 7e829dee8..7bab1e713 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerModeAction.swift @@ -1,56 +1,70 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code mode} lexer action by calling {@link org.antlr.v4.runtime.Lexer#mode} with +/// +/// Implements the `mode` lexer action by calling _org.antlr.v4.runtime.Lexer#mode_ with /// the assigned mode. -/// +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerModeAction: LexerAction, CustomStringConvertible { fileprivate final var mode: Int - /// Constructs a new {@code mode} action with the specified mode value. - /// - parameter mode: The mode value to pass to {@link org.antlr.v4.runtime.Lexer#mode}. + /// + /// Constructs a new `mode` action with the specified mode value. + /// - parameter mode: The mode value to pass to _org.antlr.v4.runtime.Lexer#mode_. + /// public init(_ mode: Int) { self.mode = mode } + /// /// Get the lexer mode this action should transition the lexer to. - /// - /// - returns: The lexer mode for this {@code mode} command. + /// + /// - returns: The lexer mode for this `mode` command. + /// public func getMode() -> Int { return mode } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#MODE}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#MODE_. + /// public override func getActionType() -> LexerActionType { return LexerActionType.mode } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// public override func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#mode} with the - /// value provided by {@link #getMode}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#mode_ with the + /// value provided by _#getMode_. + /// override public func execute(_ lexer: Lexer) { lexer.mode(mode) } override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) hash = MurmurHash.update(hash, mode) return MurmurHash.finish(hash, 2) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift index c5e043443..bb9f197f3 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerMoreAction.swift @@ -1,41 +1,55 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code more} lexer action by calling {@link org.antlr.v4.runtime.Lexer#more}. -/// -///

      The {@code more} command does not have any parameters, so this action is -/// implemented as a singleton instance exposed by {@link #INSTANCE}.

      -/// +/// +/// Implements the `more` lexer action by calling _org.antlr.v4.runtime.Lexer#more_. +/// +/// The `more` command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by _#INSTANCE_. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerMoreAction: LexerAction, CustomStringConvertible { + /// /// Provides a singleton instance of this parameterless lexer action. + /// public static let INSTANCE: LexerMoreAction = LexerMoreAction() - /// Constructs the singleton instance of the lexer {@code more} command. + /// + /// Constructs the singleton instance of the lexer `more` command. + /// private override init() { } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#MORE}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#MORE_. + /// override public func getActionType() -> LexerActionType { return LexerActionType.more } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// override public func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#more}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#more_. + /// override public func execute(_ lexer: Lexer) { lexer.more() @@ -44,7 +58,7 @@ public final class LexerMoreAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) return MurmurHash.finish(hash, 1) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift index fec0ead6e..f35e78304 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerPopModeAction.swift @@ -1,42 +1,56 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code popMode} lexer action by calling {@link org.antlr.v4.runtime.Lexer#popMode}. -/// -///

      The {@code popMode} command does not have any parameters, so this action is -/// implemented as a singleton instance exposed by {@link #INSTANCE}.

      -/// +/// +/// Implements the `popMode` lexer action by calling _org.antlr.v4.runtime.Lexer#popMode_. +/// +/// The `popMode` command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by _#INSTANCE_. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerPopModeAction: LexerAction, CustomStringConvertible { + /// /// Provides a singleton instance of this parameterless lexer action. + /// public static let INSTANCE: LexerPopModeAction = LexerPopModeAction() - /// Constructs the singleton instance of the lexer {@code popMode} command. + /// + /// Constructs the singleton instance of the lexer `popMode` command. + /// private override init() { } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#popMode}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#popMode_. + /// override public func getActionType() -> LexerActionType { return LexerActionType.popMode } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// public override func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#popMode}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#popMode_. + /// public override func execute(_ lexer: Lexer) throws { try lexer.popMode() @@ -45,7 +59,7 @@ public final class LexerPopModeAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) return MurmurHash.finish(hash, 1) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift index 28bc75a98..33d5bf79b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerPushModeAction.swift @@ -1,49 +1,63 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code pushMode} lexer action by calling -/// {@link org.antlr.v4.runtime.Lexer#pushMode} with the assigned mode. -/// +/// +/// Implements the `pushMode` lexer action by calling +/// _org.antlr.v4.runtime.Lexer#pushMode_ with the assigned mode. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerPushModeAction: LexerAction, CustomStringConvertible { fileprivate final var mode: Int - /// Constructs a new {@code pushMode} action with the specified mode value. - /// - parameter mode: The mode value to pass to {@link org.antlr.v4.runtime.Lexer#pushMode}. + /// + /// Constructs a new `pushMode` action with the specified mode value. + /// - parameter mode: The mode value to pass to _org.antlr.v4.runtime.Lexer#pushMode_. + /// public init(_ mode: Int) { self.mode = mode } + /// /// Get the lexer mode this action should transition the lexer to. - /// - /// - returns: The lexer mode for this {@code pushMode} command. + /// + /// - returns: The lexer mode for this `pushMode` command. + /// public func getMode() -> Int { return mode } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#pushMode}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#pushMode_. + /// public override func getActionType() -> LexerActionType { return LexerActionType.pushMode } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// public override func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#pushMode} with the - /// value provided by {@link #getMode}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#pushMode_ with the + /// value provided by _#getMode_. + /// override public func execute(_ lexer: Lexer) { lexer.pushMode(mode) @@ -52,7 +66,7 @@ public final class LexerPushModeAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) hash = MurmurHash.update(hash, mode) return MurmurHash.finish(hash, 2) diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift index 87716aeea..bbdd06d2f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerSkipAction.swift @@ -1,41 +1,55 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code skip} lexer action by calling {@link org.antlr.v4.runtime.Lexer#skip}. -/// -///

      The {@code skip} command does not have any parameters, so this action is -/// implemented as a singleton instance exposed by {@link #INSTANCE}.

      -/// +/// +/// Implements the `skip` lexer action by calling _org.antlr.v4.runtime.Lexer#skip_. +/// +/// The `skip` command does not have any parameters, so this action is +/// implemented as a singleton instance exposed by _#INSTANCE_. +/// /// - Sam Harwell /// - 4.2 +/// public final class LexerSkipAction: LexerAction, CustomStringConvertible { + /// /// Provides a singleton instance of this parameterless lexer action. + /// public static let INSTANCE: LexerSkipAction = LexerSkipAction() - /// Constructs the singleton instance of the lexer {@code skip} command. + /// + /// Constructs the singleton instance of the lexer `skip` command. + /// private override init() { } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#SKIP}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#SKIP_. + /// override public func getActionType() -> LexerActionType { return LexerActionType.skip } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// override public func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#skip}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#skip_. + /// override public func execute(_ lexer: Lexer) { lexer.skip() @@ -44,7 +58,7 @@ public final class LexerSkipAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) return MurmurHash.finish(hash, 1) } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift b/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift index 725942bea..10b41698f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerTypeAction.swift @@ -1,47 +1,61 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Implements the {@code type} lexer action by calling {@link org.antlr.v4.runtime.Lexer#setType} +/// +/// Implements the `type` lexer action by calling _org.antlr.v4.runtime.Lexer#setType_ /// with the assigned type. -/// +/// /// - Sam Harwell /// - 4.2 +/// public class LexerTypeAction: LexerAction, CustomStringConvertible { fileprivate final var type: Int - /// Constructs a new {@code type} action with the specified token type value. - /// - parameter type: The type to assign to the token using {@link org.antlr.v4.runtime.Lexer#setType}. + /// + /// Constructs a new `type` action with the specified token type value. + /// - parameter type: The type to assign to the token using _org.antlr.v4.runtime.Lexer#setType_. + /// public init(_ type: Int) { self.type = type } + /// /// Gets the type to assign to a token created by the lexer. /// - returns: The type to assign to a token created by the lexer. + /// public func getType() -> Int { return type } - /// {@inheritDoc} - /// - returns: This method returns {@link org.antlr.v4.runtime.atn.LexerActionType#TYPE}. + /// + /// + /// - returns: This method returns _org.antlr.v4.runtime.atn.LexerActionType#TYPE_. + /// public override func getActionType() -> LexerActionType { return LexerActionType.type } - /// {@inheritDoc} - /// - returns: This method returns {@code false}. + /// + /// + /// - returns: This method returns `false`. + /// override public func isPositionDependent() -> Bool { return false } - /// {@inheritDoc} - /// - ///

      This action is implemented by calling {@link org.antlr.v4.runtime.Lexer#setType} with the - /// value provided by {@link #getType}.

      + /// + /// + /// + /// This action is implemented by calling _org.antlr.v4.runtime.Lexer#setType_ with the + /// value provided by _#getType_. + /// public override func execute(_ lexer: Lexer) { lexer.setType(type) @@ -50,7 +64,7 @@ public class LexerTypeAction: LexerAction, CustomStringConvertible { override public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, getActionType().rawValue) hash = MurmurHash.update(hash, type) return MurmurHash.finish(hash, 2) diff --git a/runtime/Swift/Sources/Antlr4/atn/LookaheadEventInfo.swift b/runtime/Swift/Sources/Antlr4/atn/LookaheadEventInfo.swift index 92a5cf40f..045bc121f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LookaheadEventInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LookaheadEventInfo.swift @@ -1,27 +1,33 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class represents profiling event information for tracking the lookahead /// depth required in order to make a prediction. -/// +/// /// - 4.3 +/// public class LookaheadEventInfo: DecisionEventInfo { - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.LookaheadEventInfo} class with + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.LookaheadEventInfo_ class with /// the specified detailed lookahead information. - /// + /// /// - parameter decision: The decision number /// - parameter configs: The final configuration set containing the necessary - /// information to determine the result of a prediction, or {@code null} if + /// information to determine the result of a prediction, or `null` if /// the final configuration set is not available /// - parameter input: The input token stream /// - parameter startIndex: The start index for the current prediction /// - parameter stopIndex: The index at which the prediction was finally made - /// - parameter fullCtx: {@code true} if the current lookahead is part of an LL - /// prediction; otherwise, {@code false} if the current lookahead is part of + /// - parameter fullCtx: `true` if the current lookahead is part of an LL + /// prediction; otherwise, `false` if the current lookahead is part of /// an SLL prediction + /// public override init(_ decision: Int, _ configs: ATNConfigSet?, _ input: TokenStream, _ startIndex: Int, _ stopIndex: Int, diff --git a/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift index ca26684c1..b8c2d1d4f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LookupATNConfig.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // LookupATNConfig.swift diff --git a/runtime/Swift/Sources/Antlr4/atn/LookupDictionary.swift b/runtime/Swift/Sources/Antlr4/atn/LookupDictionary.swift index fc0843b17..1f8e1e21e 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LookupDictionary.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LookupDictionary.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // LookupDictionary.swift diff --git a/runtime/Swift/Sources/Antlr4/atn/LoopEndState.swift b/runtime/Swift/Sources/Antlr4/atn/LoopEndState.swift index 93456480e..d0cc9f3d4 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LoopEndState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LoopEndState.swift @@ -1,10 +1,14 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// Mark the end of a * or + loop. +/// public final class LoopEndState: ATNState { public var loopBackState: ATNState? diff --git a/runtime/Swift/Sources/Antlr4/atn/NotSetTransition.swift b/runtime/Swift/Sources/Antlr4/atn/NotSetTransition.swift index d109484b0..4a2fcc13a 100644 --- a/runtime/Swift/Sources/Antlr4/atn/NotSetTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/NotSetTransition.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class NotSetTransition: SetTransition { diff --git a/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfig.swift index 4ccb03ace..f73501008 100644 --- a/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfig.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // OrderedATNConfig.swift diff --git a/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfigSet.swift b/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfigSet.swift index 45b7d38a3..9706e6e16 100644 --- a/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfigSet.swift +++ b/runtime/Swift/Sources/Antlr4/atn/OrderedATNConfigSet.swift @@ -1,10 +1,14 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// +/// +/// /// - Sam Harwell +/// public class OrderedATNConfigSet: ATNConfigSet { diff --git a/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift b/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift index a1529902a..244226a5c 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ParseInfo.swift @@ -1,13 +1,17 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class provides access to specific and aggregate statistics gathered /// during profiling of a parser. -/// +/// /// - 4.3 +/// public class ParseInfo { internal let atnSimulator: ProfilingATNSimulator @@ -16,21 +20,25 @@ public class ParseInfo { self.atnSimulator = atnSimulator } - /// Gets an array of {@link org.antlr.v4.runtime.atn.DecisionInfo} instances containing the profiling + /// + /// Gets an array of _org.antlr.v4.runtime.atn.DecisionInfo_ instances containing the profiling /// information gathered for each decision in the ATN. - /// - /// - returns: An array of {@link org.antlr.v4.runtime.atn.DecisionInfo} instances, indexed by decision + /// + /// - returns: An array of _org.antlr.v4.runtime.atn.DecisionInfo_ instances, indexed by decision /// number. + /// public func getDecisionInfo() -> [DecisionInfo] { return atnSimulator.getDecisionInfo() } + /// /// Gets the decision numbers for decisions that required one or more /// full-context predictions during parsing. These are decisions for which - /// {@link org.antlr.v4.runtime.atn.DecisionInfo#LL_Fallback} is non-zero. - /// + /// _org.antlr.v4.runtime.atn.DecisionInfo#LL_Fallback_ is non-zero. + /// /// - returns: A list of decision numbers which required one or more /// full-context predictions during parsing. + /// public func getLLDecisions() -> Array { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var LL: Array = Array() @@ -45,9 +53,11 @@ public class ParseInfo { return LL } + /// /// Gets the total time spent during prediction across all decisions made /// during parsing. This value is the sum of - /// {@link org.antlr.v4.runtime.atn.DecisionInfo#timeInPrediction} for all decisions. + /// _org.antlr.v4.runtime.atn.DecisionInfo#timeInPrediction_ for all decisions. + /// public func getTotalTimeInPrediction() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var t: Int64 = 0 @@ -58,9 +68,11 @@ public class ParseInfo { return t } + /// /// Gets the total number of SLL lookahead operations across all decisions /// made during parsing. This value is the sum of - /// {@link org.antlr.v4.runtime.atn.DecisionInfo#SLL_TotalLook} for all decisions. + /// _org.antlr.v4.runtime.atn.DecisionInfo#SLL_TotalLook_ for all decisions. + /// public func getTotalSLLLookaheadOps() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 @@ -71,9 +83,11 @@ public class ParseInfo { return k } + /// /// Gets the total number of LL lookahead operations across all decisions /// made during parsing. This value is the sum of - /// {@link org.antlr.v4.runtime.atn.DecisionInfo#LL_TotalLook} for all decisions. + /// _org.antlr.v4.runtime.atn.DecisionInfo#LL_TotalLook_ for all decisions. + /// public func getTotalLLLookaheadOps() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 @@ -84,8 +98,10 @@ public class ParseInfo { return k } + /// /// Gets the total number of ATN lookahead operations for SLL prediction /// across all decisions made during parsing. + /// public func getTotalSLLATNLookaheadOps() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 @@ -96,8 +112,10 @@ public class ParseInfo { return k } + /// /// Gets the total number of ATN lookahead operations for LL prediction /// across all decisions made during parsing. + /// public func getTotalLLATNLookaheadOps() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 @@ -108,12 +126,14 @@ public class ParseInfo { return k } + /// /// Gets the total number of ATN lookahead operations for SLL and LL /// prediction across all decisions made during parsing. - /// - ///

      - /// This value is the sum of {@link #getTotalSLLATNLookaheadOps} and - /// {@link #getTotalLLATNLookaheadOps}.

      + /// + /// + /// This value is the sum of _#getTotalSLLATNLookaheadOps_ and + /// _#getTotalLLATNLookaheadOps_. + /// public func getTotalATNLookaheadOps() -> Int64 { var decisions: [DecisionInfo] = atnSimulator.getDecisionInfo() var k: Int64 = 0 @@ -125,8 +145,10 @@ public class ParseInfo { return k } + /// /// Gets the total number of DFA states stored in the DFA cache for all /// decisions in the ATN. + /// public func getDFASize() -> Int { var n: Int = 0 let decisionToDFA: [DFA] = atnSimulator.decisionToDFA @@ -137,8 +159,10 @@ public class ParseInfo { return n } + /// /// Gets the total number of DFA states stored in the DFA cache for a /// particular decision. + /// public func getDFASize(_ decision: Int) -> Int { let decisionToDFA: DFA = atnSimulator.decisionToDFA[decision] return decisionToDFA.states.count diff --git a/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift index edc3c38fc..530738b22 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ParserATNSimulator.swift @@ -1,20 +1,23 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// The embodiment of the adaptive LL(*), ALL(*), parsing strategy. -/// -///

      +/// +/// /// The basic complexity of the adaptive strategy makes it harder to understand. /// We begin with ATN simulation to build paths in a DFA. Subsequent prediction /// requests go through the DFA first. If they reach a state without an edge for /// the current symbol, the algorithm fails over to the ATN simulation to /// complete the DFA path for the current input (until it finds a conflict state -/// or uniquely predicting state).

      -/// -///

      +/// or uniquely predicting state). +/// +/// /// All of that is done without using the outer context because we want to create /// a DFA that is not dependent upon the rule invocation stack when we do a /// prediction. One DFA works in all contexts. We avoid using context not @@ -24,9 +27,9 @@ /// prediction occurs without invoking another rule's ATN, there are no context /// stacks in the configurations. When lack of context leads to a conflict, we /// don't know if it's an ambiguity or a weakness in the strong LL(*) parsing -/// strategy (versus full LL(*)).

      -/// -///

      +/// strategy (versus full LL(*)). +/// +/// /// When SLL yields a configuration set with conflict, we rewind the input and /// retry the ATN simulation, this time using full outer context without adding /// to the DFA. Configuration context stacks will be the full invocation stacks @@ -34,18 +37,18 @@ /// definitively say we have a true ambiguity for that input sequence. If we /// don't get a conflict, it implies that the decision is sensitive to the outer /// context. (It is not context-sensitive in the sense of context-sensitive -/// grammars.)

      -/// -///

      +/// grammars.) +/// +/// /// The next time we reach this DFA state with an SLL conflict, through DFA /// simulation, we will again retry the ATN simulation using full context mode. /// This is slow because we can't save the results and have to "interpret" the -/// ATN each time we get that input.

      -/// -///

      -/// CACHING FULL CONTEXT PREDICTIONS

      -/// -///

      +/// ATN each time we get that input. +/// +/// +/// __CACHING FULL CONTEXT PREDICTIONS__ +/// +/// /// We could cache results from full context to predicted alternative easily and /// that saves a lot of time but doesn't work in presence of predicates. The set /// of visible predicates from the ATN start state changes depending on the @@ -54,9 +57,9 @@ /// than interpreting and much more complicated. Also required a huge amount of /// memory. The goal is not to create the world's fastest parser anyway. I'd like /// to keep this algorithm simple. By launching multiple threads, we can improve -/// the speed of parsing across a large number of files.

      -/// -///

      +/// the speed of parsing across a large number of files. +/// +/// /// There is no strict ordering between the amount of input used by SLL vs LL, /// which makes it really hard to build a cache for full context. Let's say that /// we have input A B C that leads to an SLL conflict with full context X. That @@ -67,140 +70,140 @@ /// full context prediction, which would lead us to requiring more input than the /// original A B C. To make a prediction cache work, we have to track the exact /// input used during the previous prediction. That amounts to a cache that maps -/// X to a specific DFA for that context.

      -/// -///

      +/// X to a specific DFA for that context. +/// +/// /// Something should be done for left-recursive expression predictions. They are /// likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry -/// with full LL thing Sam does.

      -/// -///

      -/// AVOIDING FULL CONTEXT PREDICTION

      -/// -///

      +/// with full LL thing Sam does. +/// +/// +/// __AVOIDING FULL CONTEXT PREDICTION__ +/// +/// /// We avoid doing full context retry when the outer context is empty, we did not /// dip into the outer context by falling off the end of the decision state rule, -/// or when we force SLL mode.

      -/// -///

      +/// or when we force SLL mode. +/// +/// /// As an example of the not dip into outer context case, consider as super /// constructor calls versus function calls. One grammar might look like -/// this:

      -/// -///
      +/// this:
      +/// 
      +/// 
       /// ctorBody
       /// : '{' superCall? stat* '}'
       /// ;
      -/// 
      -/// -///

      -/// Or, you might see something like

      -/// -///
      +/// 
      +/// 
      +/// 
      +/// Or, you might see something like
      +/// 
      +/// 
       /// stat
       /// : superCall ';'
       /// | expression ';'
       /// | ...
       /// ;
      -/// 
      -/// -///

      +/// +/// +/// /// In both cases I believe that no closure operations will dip into the outer /// context. In the first case ctorBody in the worst case will stop at the '}'. /// In the 2nd case it should stop at the ';'. Both cases should stay within the -/// entry rule and not dip into the outer context.

      -/// -///

      -/// PREDICATES

      -/// -///

      +/// entry rule and not dip into the outer context. +/// +/// +/// __PREDICATES__ +/// +/// /// Predicates are always evaluated if present in either SLL or LL both. SLL and /// LL simulation deals with predicates differently. SLL collects predicates as /// it performs closure operations like ANTLR v3 did. It delays predicate /// evaluation until it reaches and accept state. This allows us to cache the SLL /// ATN simulation whereas, if we had evaluated predicates on-the-fly during /// closure, the DFA state configuration sets would be different and we couldn't -/// build up a suitable DFA.

      -/// -///

      +/// build up a suitable DFA. +/// +/// /// When building a DFA accept state during ATN simulation, we evaluate any /// predicates and return the sole semantically valid alternative. If there is /// more than 1 alternative, we report an ambiguity. If there are 0 alternatives, /// we throw an exception. Alternatives without predicates act like they have /// true predicates. The simple way to think about it is to strip away all /// alternatives with false predicates and choose the minimum alternative that -/// remains.

      -/// -///

      +/// remains. +/// +/// /// When we start in the DFA and reach an accept state that's predicated, we test /// those and return the minimum semantically viable alternative. If no -/// alternatives are viable, we throw an exception.

      -/// -///

      +/// alternatives are viable, we throw an exception. +/// +/// /// During full LL ATN simulation, closure always evaluates predicates and /// on-the-fly. This is crucial to reducing the configuration set size during /// closure. It hits a landmine when parsing with the Java grammar, for example, -/// without this on-the-fly evaluation.

      -/// -///

      -/// SHARING DFA

      -/// -///

      +/// without this on-the-fly evaluation. +/// +/// +/// __SHARING DFA__ +/// +/// /// All instances of the same parser share the same decision DFAs through a /// static field. Each instance gets its own ATN simulator but they share the -/// same {@link #decisionToDFA} field. They also share a -/// {@link org.antlr.v4.runtime.atn.PredictionContextCache} object that makes sure that all -/// {@link org.antlr.v4.runtime.atn.PredictionContext} objects are shared among the DFA states. This makes -/// a big size difference.

      -/// -///

      -/// THREAD SAFETY

      -/// -///

      -/// The {@link org.antlr.v4.runtime.atn.ParserATNSimulator} locks on the {@link #decisionToDFA} field when -/// it adds a new DFA object to that array. {@link #addDFAEdge} +/// same _#decisionToDFA_ field. They also share a +/// _org.antlr.v4.runtime.atn.PredictionContextCache_ object that makes sure that all +/// _org.antlr.v4.runtime.atn.PredictionContext_ objects are shared among the DFA states. This makes +/// a big size difference. +/// +/// +/// __THREAD SAFETY__ +/// +/// +/// The _org.antlr.v4.runtime.atn.ParserATNSimulator_ locks on the _#decisionToDFA_ field when +/// it adds a new DFA object to that array. _#addDFAEdge_ /// locks on the DFA for the current decision when setting the -/// {@link org.antlr.v4.runtime.dfa.DFAState#edges} field. {@link #addDFAState} locks on +/// _org.antlr.v4.runtime.dfa.DFAState#edges_ field. _#addDFAState_ locks on /// the DFA for the current decision when looking up a DFA state to see if it /// already exists. We must make sure that all requests to add DFA states that /// are equivalent result in the same shared DFA object. This is because lots of /// threads will be trying to update the DFA at once. The -/// {@link #addDFAState} method also locks inside the DFA lock +/// _#addDFAState_ method also locks inside the DFA lock /// but this time on the shared context cache when it rebuilds the -/// configurations' {@link org.antlr.v4.runtime.atn.PredictionContext} objects using cached +/// configurations' _org.antlr.v4.runtime.atn.PredictionContext_ objects using cached /// subgraphs/nodes. No other locking occurs, even during DFA simulation. This is /// safe as long as we can guarantee that all threads referencing -/// {@code s.edge[t]} get the same physical target {@link org.antlr.v4.runtime.dfa.DFAState}, or -/// {@code null}. Once into the DFA, the DFA simulation does not reference the -/// {@link org.antlr.v4.runtime.dfa.DFA#states} map. It follows the {@link org.antlr.v4.runtime.dfa.DFAState#edges} field to new -/// targets. The DFA simulator will either find {@link org.antlr.v4.runtime.dfa.DFAState#edges} to be -/// {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or -/// {@code dfa.edges[t]} to be non-null. The -/// {@link #addDFAEdge} method could be racing to set the field -/// but in either case the DFA simulator works; if {@code null}, and requests ATN -/// simulation. It could also race trying to get {@code dfa.edges[t]}, but either -/// way it will work because it's not doing a test and set operation.

      -/// -///

      -/// Starting with SLL then failing to combined SLL/LL (Two-Stage -/// Parsing)

      -/// -///

      +/// `s.edge[t]` get the same physical target _org.antlr.v4.runtime.dfa.DFAState_, or +/// `null`. Once into the DFA, the DFA simulation does not reference the +/// _org.antlr.v4.runtime.dfa.DFA#states_ map. It follows the _org.antlr.v4.runtime.dfa.DFAState#edges_ field to new +/// targets. The DFA simulator will either find _org.antlr.v4.runtime.dfa.DFAState#edges_ to be +/// `null`, to be non-`null` and `dfa.edges[t]` null, or +/// `dfa.edges[t]` to be non-null. The +/// _#addDFAEdge_ method could be racing to set the field +/// but in either case the DFA simulator works; if `null`, and requests ATN +/// simulation. It could also race trying to get `dfa.edges[t]`, but either +/// way it will work because it's not doing a test and set operation. +/// +/// +/// __Starting with SLL then failing to combined SLL/LL (Two-Stage +/// Parsing)__ +/// +/// /// Sam pointed out that if SLL does not give a syntax error, then there is no /// point in doing full LL, which is slower. We only have to try LL if we get a /// syntax error. For maximum speed, Sam starts the parser set to pure SLL -/// mode with the {@link org.antlr.v4.runtime.BailErrorStrategy}:

      -/// -///
      -/// parser.{@link org.antlr.v4.runtime.Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
      -/// parser.{@link org.antlr.v4.runtime.Parser#setErrorHandler setErrorHandler}(new {@link org.antlr.v4.runtime.BailErrorStrategy}());
      -/// 
      -/// -///

      +/// mode with the _org.antlr.v4.runtime.BailErrorStrategy_: +/// +/// +/// parser._org.antlr.v4.runtime.Parser#getInterpreter() getInterpreter()_._#setPredictionMode setPredictionMode_`(`_PredictionMode#SLL_`)`; +/// parser._org.antlr.v4.runtime.Parser#setErrorHandler setErrorHandler_(new _org.antlr.v4.runtime.BailErrorStrategy_()); +/// +/// +/// /// If it does not get a syntax error, then we're done. If it does get a syntax -/// error, we need to retry with the combined SLL/LL strategy.

      -/// -///

      +/// error, we need to retry with the combined SLL/LL strategy. +/// +/// /// The reason this works is as follows. If there are no SLL conflicts, then the /// grammar is SLL (at least for that input set). If there is an SLL conflict, /// the full LL analysis must yield a set of viable alternatives which is a @@ -214,43 +217,51 @@ /// analysis says it's not viable. If SLL conflict resolution chooses an /// alternative within the LL set, them both SLL and LL would choose the same /// alternative because they both choose the minimum of multiple conflicting -/// alternatives.

      -/// -///

      -/// Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and -/// a smaller LL set called s. If s is {@code {2, 3}}, then SLL +/// alternatives. +/// +/// +/// Let's say we have a set of SLL conflicting alternatives `{1, 2, 3`} and +/// a smaller LL set called __s__. If __s__ is `{2, 3`}, then SLL /// parsing will get an error because SLL will pursue alternative 1. If -/// s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will +/// __s__ is `{1, 2`} or `{1, 3`} then both SLL and LL will /// choose the same alternative because alternative one is the minimum of either -/// set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax -/// error. If s is {@code {1}} then SLL will succeed.

      -/// -///

      +/// set. If __s__ is `{2`} or `{3`} then SLL will get a syntax +/// error. If __s__ is `{1`} then SLL will succeed. +/// +/// /// Of course, if the input is invalid, then we will get an error for sure in /// both SLL and LL parsing. Erroneous input will therefore require 2 passes over -/// the input.

      +/// the input. +/// import Foundation open class ParserATNSimulator: ATNSimulator { - public let debug: Bool = false - public let debug_list_atn_decisions: Bool = false - public let dfa_debug: Bool = false - public let retry_debug: Bool = false + public let debug = false + public let debug_list_atn_decisions = false + public let dfa_debug = false + public let retry_debug = false + + /// /// Just in case this optimization is bad, add an ENV variable to turn it off + /// public static let TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT: Bool = { if let value = ProcessInfo.processInfo.environment["TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"] { return NSString(string: value).boolValue } return false }() - internal final var parser: Parser + + internal final unowned let parser: Parser public final var decisionToDFA: [DFA] + /// /// SLL, LL, or LL + exact ambig detection? + /// - private var mode: PredictionMode = PredictionMode.LL + private var mode = PredictionMode.LL + /// /// Each prediction operation uses a cache for merge of prediction contexts. /// Don't keep around as it wastes huge amounts of memory. DoubleKeyMap /// isn't synchronized but we're ok since two threads shouldn't reuse same @@ -258,18 +269,23 @@ open class ParserATNSimulator: ATNSimulator { /// This maps graphs a and b to merged result c. (a,b)→c. We can avoid /// the merge if we ever see a and b again. Note that (b,a)→c should /// also be examined during cache lookup. + /// internal final var mergeCache: DoubleKeyMap? // LAME globals to avoid parameters!!!!! I need these down deep in predTransition internal var _input: TokenStream! - internal var _startIndex: Int = 0 + internal var _startIndex = 0 internal var _outerContext: ParserRuleContext! internal var _dfa: DFA? + /// /// mutex for DFAState change + /// private var dfaStateMutex = Mutex() + /// /// mutex for changes in a DFAStates map + /// private var dfaStatesMutex = Mutex() // /// Testing only! @@ -296,7 +312,6 @@ open class ParserATNSimulator: ATNSimulator { override open func clearDFA() { - //for var d: Int = 0; d < decisionToDFA.count; d++ { for d in 0.. Int { var outerContext = outerContext - if debug || debug_list_atn_decisions { - var debugInfo = "adaptivePredict decision \(decision) " - debugInfo += "exec LA(1)==\(try getLookaheadName(input)) " - debugInfo += "line \(try input.LT(1)!.getLine()):" - debugInfo += "\(try input.LT(1)!.getCharPositionInLine())" - print(debugInfo) + if debug || debug_list_atn_decisions { + var debugInfo = "adaptivePredict decision \(decision) " + debugInfo += "exec LA(1)==\(try getLookaheadName(input)) " + debugInfo += "line \(try input.LT(1)!.getLine()):" + debugInfo += "\(try input.LT(1)!.getCharPositionInLine())" + print(debugInfo) + } + + + _input = input + _startIndex = input.index() + _outerContext = outerContext + let dfa = decisionToDFA[decision] + _dfa = dfa + + let m = input.mark() + let index = _startIndex + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + //TODO: exception handler + do { + var s0: DFAState? + if dfa.isPrecedenceDfa() { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = try dfa.getPrecedenceStartState(parser.getPrecedence()) + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0 } + if s0 == nil { + //BIG BUG + if outerContext == nil { + outerContext = ParserRuleContext.EMPTY + } + if debug || debug_list_atn_decisions { + var debugInfo = "predictATN decision \(dfa.decision) " + debugInfo += "exec LA(1)==\(try getLookaheadName(input)), " + debugInfo += "outerContext=\(outerContext!.toString(parser))" + print(debugInfo) + } - _input = input - _startIndex = input.index() - _outerContext = outerContext - var dfa: DFA = decisionToDFA[decision] - _dfa = dfa + let fullCtx = false + var s0_closure = try computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx) - var m: Int = input.mark() - var index: Int = _startIndex - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - //TODO: exception handler - do { - var s0: DFAState? if dfa.isPrecedenceDfa() { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = try dfa.getPrecedenceStartState(parser.getPrecedence()) + /// + /// If this is a precedence DFA, we use applyPrecedenceFilter + /// to convert the computed start state to a precedence start + /// state. We then use DFA.setPrecedenceStartState to set the + /// appropriate start state for the precedence level rather + /// than simply setting DFA.s0. + /// + //added by janyou 20160224 + // dfa.s0!.configs = s0_closure // not used for prediction but useful to know start configs anyway + s0_closure = try applyPrecedenceFilter(s0_closure) + s0 = addDFAState(dfa, DFAState(s0_closure)) + try dfa.setPrecedenceStartState(parser.getPrecedence(), s0!) } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 + s0 = addDFAState(dfa, DFAState(s0_closure)) + dfa.s0 = s0 } - - if s0 == nil { - //BIG BUG - if outerContext == nil { - outerContext = ParserRuleContext.EMPTY - } - if debug || debug_list_atn_decisions { - var debugInfo = "predictATN decision \(dfa.decision) " - debugInfo += "exec LA(1)==\(try getLookaheadName(input)), " - debugInfo += "outerContext=\(outerContext!.toString(parser))" - print(debugInfo) - } - - var fullCtx: Bool = false - var s0_closure: ATNConfigSet = try computeStartState(dfa.atnStartState, - ParserRuleContext.EMPTY, - fullCtx) - - if dfa.isPrecedenceDfa() { - /// If this is a precedence DFA, we use applyPrecedenceFilter - /// to convert the computed start state to a precedence start - /// state. We then use DFA.setPrecedenceStartState to set the - /// appropriate start state for the precedence level rather - /// than simply setting DFA.s0. - //added by janyou 20160224 - // dfa.s0!.configs = s0_closure // not used for prediction but useful to know start configs anyway - s0_closure = try applyPrecedenceFilter(s0_closure) - s0 = try addDFAState(dfa, DFAState(s0_closure)) - try dfa.setPrecedenceStartState(parser.getPrecedence(), s0!) - } else { - s0 = try addDFAState(dfa, DFAState(s0_closure)) - dfa.s0 = s0 - } - } - - var alt: Int = try execATN(dfa, s0!, input, index, outerContext!) - if debug { - print("DFA after predictATN: \(dfa.toString(parser.getVocabulary()))") - } - defer { - mergeCache = nil // wack cache after each prediction - _dfa = nil - try! input.seek(index) - try! input.release(m) - } - return alt } + let alt = try execATN(dfa, s0!, input, index, outerContext!) + if debug { + print("DFA after predictATN: \(dfa.toString(parser.getVocabulary()))") + } + mergeCache = nil // wack cache after each prediction + _dfa = nil + try! input.seek(index) + try! input.release(m) + return alt + } + } + /// /// Performs ATN simulation to compute a predicted alternative based /// upon the remaining input, but also updates the DFA cache to avoid /// having to traverse the ATN again for the same input sequence. - /// + /// /// There are some key conditions we're looking for after computing a new /// set of ATN configs (proposed DFA state): /// if the set is empty, there is no viable alternative for current symbol /// does the state uniquely predict an alternative? /// does the state have a conflict that would prevent us from /// putting it on the work list? - /// + /// /// We also have some key operations to do: /// add an edge from previous DFA state to potentially new DFA state, D, /// upon current symbol but only if adding to work list, which means in all @@ -408,13 +422,14 @@ open class ParserATNSimulator: ATNSimulator { /// reporting an ambiguity /// reporting a context sensitivity /// reporting insufficient predicates - /// + /// /// cover these cases: /// dead end /// single alt /// single alt + preds /// conflict /// conflict + preds + /// final func execATN(_ dfa: DFA, _ s0: DFAState, _ input: TokenStream, _ startIndex: Int, _ outerContext: ParserRuleContext) throws -> Int { @@ -422,13 +437,13 @@ open class ParserATNSimulator: ATNSimulator { try print("execATN decision \(dfa.decision) exec LA(1)==\(getLookaheadName(input)) line \(input.LT(1)!.getLine()):\(input.LT(1)!.getCharPositionInLine())") } - var previousD: DFAState = s0 + var previousD = s0 if debug { print("s0 = \(s0)") } - var t: Int = try input.LA(1) + var t = try input.LA(1) while true { // while more work @@ -449,9 +464,9 @@ open class ParserATNSimulator: ATNSimulator { // ATN states in SLL implies LL will also get nowhere. // If conflict in states that dip out, choose min since we // will get error no matter what. - let e: NoViableAltException = try noViableAlt(input, outerContext, previousD.configs, startIndex) + let e = noViableAlt(input, outerContext, previousD.configs, startIndex) try input.seek(startIndex) - let alt: Int = try getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + let alt = try getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) if alt != ATN.INVALID_ALT_NUMBER { return alt } @@ -462,12 +477,12 @@ open class ParserATNSimulator: ATNSimulator { if D.requiresFullContext && (mode != PredictionMode.SLL) { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - var conflictingAlts: BitSet = D.configs.conflictingAlts! + var conflictingAlts = D.configs.conflictingAlts! if D.predicates != nil { if debug { print("DFA state has preds in DFA sim LL failover") } - let conflictIndex: Int = input.index() + let conflictIndex = input.index() if conflictIndex != startIndex { try input.seek(startIndex) } @@ -477,7 +492,7 @@ open class ParserATNSimulator: ATNSimulator { if debug { print("Full LL avoided") } - return try conflictingAlts.nextSetBit(0) + return conflictingAlts.firstSetBit() } if conflictIndex != startIndex { @@ -490,12 +505,10 @@ open class ParserATNSimulator: ATNSimulator { if dfa_debug { print("ctx sensitive state \(outerContext) in \(D)") } - let fullCtx: Bool = true - let s0_closure: ATNConfigSet = - try computeStartState(dfa.atnStartState, outerContext, - fullCtx) - try reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index()) - let alt: Int = try execATNWithFullContext(dfa, D, s0_closure, + let fullCtx = true + let s0_closure = try computeStartState(dfa.atnStartState, outerContext, fullCtx) + reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index()) + let alt = try execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext) return alt @@ -506,22 +519,22 @@ open class ParserATNSimulator: ATNSimulator { return D.prediction } - let stopIndex: Int = input.index() + let stopIndex = input.index() try input.seek(startIndex) - let alts: BitSet = try evalSemanticContext(D.predicates!, outerContext, true) + let alts = try evalSemanticContext(D.predicates!, outerContext, true) switch alts.cardinality() { case 0: - throw try ANTLRException.recognition(e: noViableAlt(input, outerContext, D.configs, startIndex)) + throw ANTLRException.recognition(e: noViableAlt(input, outerContext, D.configs, startIndex)) case 1: - return try alts.nextSetBit(0) + return alts.firstSetBit() default: // report ambiguity after predicate evaluation to make sure the correct // set of ambig alts is reported. - try reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return try alts.nextSetBit(0) + reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.firstSetBit() } } @@ -534,17 +547,19 @@ open class ParserATNSimulator: ATNSimulator { } } + /// /// Get an existing target state for an edge in the DFA. If the target state /// for the edge has not yet been computed or is otherwise not available, - /// this method returns {@code null}. - /// + /// this method returns `null`. + /// /// - parameter previousD: The current DFA state /// - parameter t: The next input symbol /// - returns: The existing target DFA state for the given input symbol - /// {@code t}, or {@code null} if the target state for this edge is not + /// `t`, or `null` if the target state for this edge is not /// already cached + /// func getExistingTargetState(_ previousD: DFAState, _ t: Int) -> DFAState? { - var edges: [DFAState?]? = previousD.edges + var edges = previousD.edges if edges == nil || (t + 1) < 0 || (t + 1) >= (edges!.count) { return nil } @@ -552,21 +567,23 @@ open class ParserATNSimulator: ATNSimulator { return edges![t + 1] } + /// /// Compute a target state for an edge in the DFA, and attempt to add the /// computed state and corresponding edge to the DFA. - /// + /// /// - parameter dfa: The DFA /// - parameter previousD: The current DFA state /// - parameter t: The next input symbol - /// + /// /// - returns: The computed target DFA state for the given input symbol - /// {@code t}. If {@code t} does not lead to a valid DFA state, this method - /// returns {@link #ERROR}. + /// `t`. If `t` does not lead to a valid DFA state, this method + /// returns _#ERROR_. + /// func computeTargetState(_ dfa: DFA, _ previousD: DFAState, _ t: Int) throws -> DFAState { - let reach: ATNConfigSet? = try computeReachSet(previousD.configs, t, false) + let reach = try computeReachSet(previousD.configs, t, false) if reach == nil { - try addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR) + addDFAEdge(dfa, previousD, t, ATNSimulator.ERROR) return ATNSimulator.ERROR } @@ -576,8 +593,8 @@ open class ParserATNSimulator: ATNSimulator { let predictedAlt: Int = ParserATNSimulator.getUniqueAlt(reach!) if debug { - let altSubSets: Array = try PredictionMode.getConflictingAltSubsets(reach!) - print("SLL altSubSets=\(altSubSets), configs=\(reach!), predict=\(predictedAlt), allSubsetsConflict=\(PredictionMode.allSubsetsConflict(altSubSets)), conflictingAlts=\(try! getConflictingAlts(reach!))") + let altSubSets = PredictionMode.getConflictingAltSubsets(reach!) + print("SLL altSubSets=\(altSubSets), configs=\(reach!), predict=\(predictedAlt), allSubsetsConflict=\(PredictionMode.allSubsetsConflict(altSubSets)), conflictingAlts=\(getConflictingAlts(reach!))") } if predictedAlt != ATN.INVALID_ALT_NUMBER { @@ -586,44 +603,43 @@ open class ParserATNSimulator: ATNSimulator { D.configs.uniqueAlt = predictedAlt D.prediction = predictedAlt } else { - if try PredictionMode.hasSLLConflictTerminatingPrediction(mode, reach!) { + if PredictionMode.hasSLLConflictTerminatingPrediction(mode, reach!) { // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = try getConflictingAlts(reach!) + D.configs.conflictingAlts = getConflictingAlts(reach!) D.requiresFullContext = true // in SLL-only mode, we will stop at this state and return the minimum alt D.isAcceptState = true - D.prediction = try D.configs.conflictingAlts!.nextSetBit(0) + D.prediction = D.configs.conflictingAlts!.firstSetBit() } } if D.isAcceptState && D.configs.hasSemanticContext { - try predicateDFAState(D, atn.getDecisionState(dfa.decision)!) + predicateDFAState(D, atn.getDecisionState(dfa.decision)!) if D.predicates != nil { D.prediction = ATN.INVALID_ALT_NUMBER } } // all adds to dfa are done after we've created full D state - D = try addDFAEdge(dfa, previousD, t, D)! + D = addDFAEdge(dfa, previousD, t, D)! return D } - final func predicateDFAState(_ dfaState: DFAState, _ decisionState: DecisionState) throws { + final func predicateDFAState(_ dfaState: DFAState, _ decisionState: DecisionState) { // We need to test all predicates, even in DFA states that // uniquely predict alternative. - let nalts: Int = decisionState.getNumberOfTransitions() + let nalts = decisionState.getNumberOfTransitions() // Update DFA so reach becomes accept state with (predicate,alt) // pairs if preds found for conflicting alts - let altsToCollectPredsFrom: BitSet = try getConflictingAltsOrUniqueAlt(dfaState.configs) - let altToPred: [SemanticContext?]? = try getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = try getPredicatePredictions(altsToCollectPredsFrom, altToPred!) + let altsToCollectPredsFrom = getConflictingAltsOrUniqueAlt(dfaState.configs) + if let altToPred = getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) { + dfaState.predicates = getPredicatePredictions(altsToCollectPredsFrom, altToPred) dfaState.prediction = ATN.INVALID_ALT_NUMBER // make sure we use preds } else { // There are preds in configs but they might go away // when OR'd together like {p}? || NONE == NONE. If neither // alt has preds, resolve to min alt - dfaState.prediction = try altsToCollectPredsFrom.nextSetBit(0) + dfaState.prediction = altsToCollectPredsFrom.firstSetBit() } } @@ -636,13 +652,13 @@ open class ParserATNSimulator: ATNSimulator { if debug || debug_list_atn_decisions { print("execATNWithFullContext \(s0)") } - let fullCtx: Bool = true - var foundExactAmbig: Bool = false + let fullCtx = true + var foundExactAmbig = false var reach: ATNConfigSet? = nil - var previous: ATNConfigSet = s0 + var previous = s0 try input.seek(startIndex) - var t: Int = try input.LA(1) - var predictedAlt: Int = 0 + var t = try input.LA(1) + var predictedAlt = 0 while true { // while more work if let computeReach = try computeReachSet(previous, t, fullCtx) { @@ -657,9 +673,9 @@ open class ParserATNSimulator: ATNSimulator { // ATN states in SLL implies LL will also get nowhere. // If conflict in states that dip out, choose min since we // will get error no matter what. - let e: NoViableAltException = try noViableAlt(input, outerContext, previous, startIndex) + let e = noViableAlt(input, outerContext, previous, startIndex) try input.seek(startIndex) - let alt: Int = try getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + let alt = try getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) if alt != ATN.INVALID_ALT_NUMBER { return alt } @@ -667,9 +683,9 @@ open class ParserATNSimulator: ATNSimulator { } if let reach = reach { - let altSubSets: Array = try PredictionMode.getConflictingAltSubsets(reach) + let altSubSets = PredictionMode.getConflictingAltSubsets(reach) if debug { - print("LL altSubSets=\(altSubSets), predict=\(try PredictionMode.getUniqueAlt(altSubSets)), resolvesToJustOneViableAlt=\(try PredictionMode.resolvesToJustOneViableAlt(altSubSets))") + print("LL altSubSets=\(altSubSets), predict=\(PredictionMode.getUniqueAlt(altSubSets)), resolvesToJustOneViableAlt=\(PredictionMode.resolvesToJustOneViableAlt(altSubSets))") } @@ -680,7 +696,7 @@ open class ParserATNSimulator: ATNSimulator { break } if mode != PredictionMode.LL_EXACT_AMBIG_DETECTION { - predictedAlt = try PredictionMode.resolvesToJustOneViableAlt(altSubSets) + predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets) if predictedAlt != ATN.INVALID_ALT_NUMBER { break } @@ -690,7 +706,7 @@ open class ParserATNSimulator: ATNSimulator { if PredictionMode.allSubsetsConflict(altSubSets) && PredictionMode.allSubsetsEqual(altSubSets) { foundExactAmbig = true - predictedAlt = try PredictionMode.getSingleViableAlt(altSubSets) + predictedAlt = PredictionMode.getSingleViableAlt(altSubSets) break } // else there are multiple non-conflicting subsets or @@ -710,37 +726,39 @@ open class ParserATNSimulator: ATNSimulator { // without conflict, then we know that it's a full LL decision // not SLL. if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER { - try reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index()) + reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index()) return predictedAlt } // We do not check predicates here because we have checked them // on-the-fly when doing full context prediction. + /// /// In non-exact ambiguity detection mode, we might actually be able to /// detect an exact ambiguity, but I'm not going to spend the cycles /// needed to check. We only emit ambiguity warnings in exact ambiguity /// mode. - /// + /// /// For example, we might know that we have conflicting configurations. /// But, that does not mean that there is no way forward without a /// conflict. It's possible to have nonconflicting alt subsets as in: - /// + /// /// LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - /// + /// /// from - /// + /// /// [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), /// (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - /// + /// /// In this case, (17,1,[5 $]) indicates there is some next sequence that /// would resolve this without conflict to alternative 1. Any other viable /// next sequence, however, is associated with a conflict. We stop /// looking for input because no amount of further lookahead will alter /// the fact that we should predict alternative 1. We just can't say for /// sure that there is an ambiguity without looking further. - try reportAmbiguity(dfa, D, startIndex, input.index(), foundExactAmbig, - reach.getAlts(), reach) + /// + reportAmbiguity(dfa, D, startIndex, input.index(), foundExactAmbig, + reach.getAlts(), reach) } return predictedAlt } @@ -756,50 +774,46 @@ open class ParserATNSimulator: ATNSimulator { mergeCache = DoubleKeyMap() } - let intermediate: ATNConfigSet = ATNConfigSet(fullCtx) + let intermediate = ATNConfigSet(fullCtx) + /// /// Configurations already in a rule stop state indicate reaching the end /// of the decision rule (local context) or end of the start rule (full /// context). Once reached, these configurations are never updated by a /// closure operation, so they are handled separately for the performance /// advantage of having a smaller intermediate set when calling closure. - /// + /// /// For full-context reach operations, separate handling is required to /// ensure that the alternative matching the longest overall sequence is /// chosen when multiple such configurations can match the input. - var skippedStopStates: Array? = nil + /// + var skippedStopStates: [ATNConfig]? = nil // First figure out where we can reach on input t - let length = closureConfigSet.configs.count let configs = closureConfigSet.configs - for i in 0..() + skippedStopStates = [ATNConfig]() } - - skippedStopStates?.append(configs[i]) + skippedStopStates!.append(config) } continue } - let n: Int = configs[i].state.getNumberOfTransitions() + let n = config.state.getNumberOfTransitions() for ti in 0.. = Set() - let treatEofAsEpsilon: Bool = t == CommonToken.EOF - let configs = intermediate.configs - let length = configs.count - for i in 0..() + let treatEofAsEpsilon = (t == CommonToken.EOF) + for config in intermediate.configs { + try closure(config, reach!, &closureBusy, false, fullCtx, treatEofAsEpsilon) } } if t == BufferedTokenStream.EOF { + /// /// After consuming EOF no additional input is possible, so we are /// only interested in configurations which reached the end of the /// decision rule (local context) or end of the start rule (full /// context). Update reach to contain only these configurations. This /// handles both explicit EOF transitions in the grammar and implicit /// EOF transitions following the end of the decision or start rule. - /// + /// /// When reach==intermediate, no closure operation was performed. In /// this case, removeAllConfigsNotInRuleStopState needs to check for /// reachable rule stop states as well as configurations already in /// a rule stop state. - /// + /// /// This is handled before the configurations in skippedStopStates, /// because any configurations potentially added from that list are /// already guaranteed to meet this condition whether or not it's /// required. - reach = try removeAllConfigsNotInRuleStopState(reach!, reach! === intermediate) + /// + reach = removeAllConfigsNotInRuleStopState(reach!, reach! === intermediate) } + /// /// If skippedStopStates is not null, then it contains at least one /// configuration. For full-context reach operations, these /// configurations reached the end of the start rule, in which case we @@ -874,11 +891,12 @@ open class ParserATNSimulator: ATNSimulator { /// closure operation reached such a state. This ensures adaptivePredict /// chooses an alternative matching the longest overall sequence when /// multiple alternatives are viable. + /// if let reach = reach { - if skippedStopStates != nil && (!fullCtx || !PredictionMode.hasConfigInRuleStopState(reach)) { - assert(!skippedStopStates!.isEmpty, "Expected: !skippedStopStates.isEmpty()") - for c: ATNConfig in skippedStopStates! { - try reach.add(c, &mergeCache) + if let skippedStopStates = skippedStopStates, (!fullCtx || !PredictionMode.hasConfigInRuleStopState(reach)) { + assert(!skippedStopStates.isEmpty, "Expected: !skippedStopStates.isEmpty()") + for c in skippedStopStates { + try! reach.add(c, &mergeCache) } } @@ -889,53 +907,49 @@ open class ParserATNSimulator: ATNSimulator { return reach } + /// /// Return a configuration set containing only the configurations from - /// {@code configs} which are in a {@link org.antlr.v4.runtime.atn.RuleStopState}. If all - /// configurations in {@code configs} are already in a rule stop state, this - /// method simply returns {@code configs}. - /// - ///

      When {@code lookToEndOfRule} is true, this method uses - /// {@link org.antlr.v4.runtime.atn.ATN#nextTokens} for each configuration in {@code configs} which is + /// `configs` which are in a _org.antlr.v4.runtime.atn.RuleStopState_. If all + /// configurations in `configs` are already in a rule stop state, this + /// method simply returns `configs`. + /// + /// When `lookToEndOfRule` is true, this method uses + /// _org.antlr.v4.runtime.atn.ATN#nextTokens_ for each configuration in `configs` which is /// not already in a rule stop state to see if a rule stop state is reachable - /// from the configuration via epsilon-only transitions.

      - /// + /// from the configuration via epsilon-only transitions. + /// /// - parameter configs: the configuration set to update /// - parameter lookToEndOfRule: when true, this method checks for rule stop states /// reachable by epsilon-only transitions from each configuration in - /// {@code configs}. - /// - /// - returns: {@code configs} if all configurations in {@code configs} are in a + /// `configs`. + /// + /// - returns: `configs` if all configurations in `configs` are in a /// rule stop state, otherwise return a new configuration set containing only - /// the configurations from {@code configs} which are in a rule stop state - final func removeAllConfigsNotInRuleStopState(_ configs: ATNConfigSet, _ lookToEndOfRule: Bool) throws -> ATNConfigSet { - - let result = try configs.removeAllConfigsNotInRuleStopState(&mergeCache,lookToEndOfRule,atn) - return result + /// the configurations from `configs` which are in a rule stop state + /// + final func removeAllConfigsNotInRuleStopState(_ configs: ATNConfigSet, _ lookToEndOfRule: Bool) -> ATNConfigSet { + return configs.removeAllConfigsNotInRuleStopState(&mergeCache,lookToEndOfRule,atn) } - final func computeStartState(_ p: ATNState, - _ ctx: RuleContext, - _ fullCtx: Bool) throws -> ATNConfigSet { - - - let initialContext: PredictionContext = PredictionContext.fromRuleContext(atn, ctx) - let configs: ATNConfigSet = ATNConfigSet(fullCtx) + final func computeStartState(_ p: ATNState, _ ctx: RuleContext, _ fullCtx: Bool) throws -> ATNConfigSet { + let initialContext = PredictionContext.fromRuleContext(atn, ctx) + let configs = ATNConfigSet(fullCtx) let length = p.getNumberOfTransitions() for i in 0.. = Set() + let target = p.transition(i).target + let c = ATNConfig(target, i + 1, initialContext) + var closureBusy = Set() try closure(c, configs, &closureBusy, true, fullCtx, false) } - return configs } + /// /// parrt internal source braindump that doesn't mess up /// external API spec. - /// + /// /// applyPrecedenceFilter is an optimization to avoid highly /// nonlinear prediction of expressions and other left recursive /// rules. The precedence predicates such as {3>=prec}? Are highly @@ -946,23 +960,23 @@ open class ParserATNSimulator: ATNSimulator { /// these predicates out of context, the resulting conflict leads /// to full LL evaluation and nonlinear prediction which shows up /// very clearly with fairly large expressions. - /// + /// /// Example grammar: - /// + /// /// e : e '*' e /// | e '+' e /// | INT /// ; - /// + /// /// We convert that to the following: - /// + /// /// e[int prec] /// : INT /// ( {3>=prec}? '*' e[4] /// | {2>=prec}? '+' e[3] /// )* /// ; - /// + /// /// The (..)* loop has a decision for the inner block as well as /// an enter or exit decision, which is what concerns us here. At /// the 1st + of input 1+2+3, the loop entry sees both predicates @@ -974,7 +988,7 @@ open class ParserATNSimulator: ATNSimulator { /// cannot evaluate those predicates because we have fallen off /// the edge of the stack and will in general not know which prec /// parameter is the right one to use in the predicate. - /// + /// /// Because we have special information, that these are precedence /// predicates, we can resolve them without failing over to full /// LL despite their context sensitive nature. We make an @@ -989,7 +1003,7 @@ open class ParserATNSimulator: ATNSimulator { /// the same value and so we can decide to enter the loop instead /// of matching it later. That means we can strip out the other /// configuration for the exit branch. - /// + /// /// So imagine we have (14,1,$,{2>=prec}?) and then /// (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization /// allows us to collapse these two configurations. We know that @@ -1001,33 +1015,33 @@ open class ParserATNSimulator: ATNSimulator { /// enter the loop as it is consistent with the notion of operator /// precedence. It's also how the full LL conflict resolution /// would work. - /// + /// /// The solution requires a different DFA start state for each /// precedence level. - /// + /// /// The basic filter mechanism is to remove configurations of the /// form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In /// other words, for the same ATN state and predicate context, /// remove any configuration associated with an exit branch if /// there is a configuration associated with the enter branch. - /// + /// /// It's also the case that the filter evaluates precedence /// predicates and resolves conflicts according to precedence /// levels. For example, for input 1+2+3 at the first +, we see /// prediction filtering - /// + /// /// [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1), /// (11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext - /// + /// /// to - /// + /// /// [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext - /// + /// /// This filters because {3>=prec}? evals to true and collapses /// (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict /// resolution based upon rules of operator precedence fits with /// our usual match first alt upon conflict. - /// + /// /// We noticed a problem where a recursive call resets precedence /// to 0. Sam's fix: each config has flag indicating if it has /// returned from an expr[0] call. then just don't filter any @@ -1038,69 +1052,60 @@ open class ParserATNSimulator: ATNSimulator { /// after leaving the rule stop state of the LR rule containing /// state p, corresponding to a rule invocation with precedence /// level 0" + /// + /// /// This method transforms the start state computed by - /// {@link #computeStartState} to the special start state used by a + /// _#computeStartState_ to the special start state used by a /// precedence DFA for a particular precedence value. The transformation /// process applies the following changes to the start state's configuration /// set. - /// - ///
        - ///
      1. Evaluate the precedence predicates for each configuration using - /// {@link org.antlr.v4.runtime.atn.SemanticContext#evalPrecedence}.
      2. - ///
      3. When {@link org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed} is {@code false}, + /// + /// * Evaluate the precedence predicates for each configuration using + /// _org.antlr.v4.runtime.atn.SemanticContext#evalPrecedence_. + /// * When _org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed_ is `false`, /// remove all configurations which predict an alternative greater than 1, /// for which another configuration that predicts alternative 1 is in the /// same ATN state with the same prediction context. This transformation is /// valid for the following reasons: - ///
          - ///
        • The closure block cannot contain any epsilon transitions which bypass + /// + /// * The closure block cannot contain any epsilon transitions which bypass /// the body of the closure, so all states reachable via alternative 1 are /// part of the precedence alternatives of the transformed left-recursive - /// rule.
        • - ///
        • The "primary" portion of a left recursive rule cannot contain an + /// rule. + /// * The "primary" portion of a left recursive rule cannot contain an /// epsilon transition, so the only way an alternative other than 1 can exist /// in a state that is also reachable via alternative 1 is by nesting calls /// to the left-recursive rule, with the outer calls not being at the /// preferred precedence level. The - /// {@link org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed} property marks ATN + /// _org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed_ property marks ATN /// configurations which do not meet this condition, and therefore are not - /// eligible for elimination during the filtering process.
        • - ///
        - ///
      4. - ///
      - /// - ///

      + /// eligible for elimination during the filtering process. + /// /// The prediction context must be considered by this filter to address /// situations like the following. - ///

      - /// - ///
      +    /// ```
           /// grammar TA;
           /// prog: statement* EOF;
           /// statement: letterA | statement letterA 'b' ;
           /// letterA: 'a';
      -    /// 
      - ///
      - ///

      + /// ``` /// If the above grammar, the ATN state immediately before the token - /// reference {@code 'a'} in {@code letterA} is reachable from the left edge + /// reference `'a'` in `letterA` is reachable from the left edge /// of both the primary and closure blocks of the left-recursive rule - /// {@code statement}. The prediction context associated with each of these + /// `statement`. The prediction context associated with each of these /// configurations distinguishes between them, and prevents the alternative - /// which stepped out to {@code prog} (and then back in to {@code statement} + /// which stepped out to `prog` (and then back in to `statement` /// from being eliminated by the filter. - ///

      - /// + /// /// - parameter configs: The configuration set computed by - /// {@link #computeStartState} as the start state for the DFA. + /// _#computeStartState_ as the start state for the DFA. /// - returns: The transformed configuration set representing the start state /// for a precedence DFA at a particular precedence level (determined by - /// calling {@link org.antlr.v4.runtime.Parser#getPrecedence}). - final internal func applyPrecedenceFilter(_ configs: ATNConfigSet) throws -> ATNConfigSet { - - let configSet = try configs.applyPrecedenceFilter(&mergeCache,parser,_outerContext) - return configSet + /// calling _org.antlr.v4.runtime.Parser#getPrecedence_). + /// + final internal func applyPrecedenceFilter(_ configs: ATNConfigSet) throws -> ATNConfigSet { + return try configs.applyPrecedenceFilter(&mergeCache,parser,_outerContext) } final internal func getReachableTarget(_ trans: Transition, _ ttype: Int) -> ATNState? { @@ -1114,8 +1119,9 @@ open class ParserATNSimulator: ATNSimulator { final internal func getPredsForAmbigAlts(_ ambigAlts: BitSet, _ configs: ATNConfigSet, - _ nalts: Int) throws -> [SemanticContext?]? { + _ nalts: Int) -> [SemanticContext?]? { // REACH=[1|1|[]|0:0, 1|2|[]|0:1] + /// /// altToPred starts as an array of all null contexts. The entry at index i /// corresponds to alternative i. altToPred[i] may have one of three values: /// 1. null: no ATNConfig c is found such that c.alt==i @@ -1124,10 +1130,10 @@ open class ParserATNSimulator: ATNSimulator { /// alt i has at least one unpredicated config. /// 3. Non-NONE Semantic Context: There exists at least one, and for all /// ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. - /// + /// /// From this, it is clear that NONE||anything==NONE. - let altToPred: [SemanticContext?]? = try configs.getPredsForAmbigAlts(ambigAlts,nalts) - + /// + let altToPred = configs.getPredsForAmbigAlts(ambigAlts,nalts) if debug { print("getPredsForAmbigAlts result \(String(describing: altToPred))") } @@ -1135,17 +1141,17 @@ open class ParserATNSimulator: ATNSimulator { } final internal func getPredicatePredictions(_ ambigAlts: BitSet?, - _ altToPred: [SemanticContext?]) throws -> [DFAState.PredPrediction]? { - var pairs: Array = Array() - var containsPredicate: Bool = false + _ altToPred: [SemanticContext?]) -> [DFAState.PredPrediction]? { + var pairs = [DFAState.PredPrediction]() + var containsPredicate = false let length = altToPred.count for i in 1.. + /// _org.antlr.v4.runtime.NoViableAltException_ in particular prediction scenarios where the + /// _#ERROR_ state was reached during ATN simulation. + /// /// The default implementation of this method uses the following /// algorithm to identify an ATN configuration which successfully parsed the /// decision entry rule. Choosing such an alternative ensures that the - /// {@link org.antlr.v4.runtime.ParserRuleContext} returned by the calling rule will be complete + /// _org.antlr.v4.runtime.ParserRuleContext_ returned by the calling rule will be complete /// and valid, and the syntax error will be reported later at a more - /// localized location.

      - /// - ///
        - ///
      • If a syntactically valid path or paths reach the end of the decision rule and - /// they are semantically valid if predicated, return the min associated alt.
      • - ///
      • Else, if a semantically invalid but syntactically valid path exist + /// localized location. + /// + /// * If a syntactically valid path or paths reach the end of the decision rule and + /// they are semantically valid if predicated, return the min associated alt. + /// * Else, if a semantically invalid but syntactically valid path exist /// or paths exist, return the minimum associated alt. - ///
      • - ///
      • Otherwise, return {@link org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER}.
      • - ///
      - /// - ///

      + /// * Otherwise, return _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_. + /// /// In some scenarios, the algorithm described above could predict an - /// alternative which will result in a {@link org.antlr.v4.runtime.FailedPredicateException} in - /// the parser. Specifically, this could occur if the only configuration + /// alternative which will result in a _org.antlr.v4.runtime.FailedPredicateException_ in + /// the parser. Specifically, this could occur if the __only__ configuration /// capable of successfully parsing to the end of the decision rule is /// blocked by a semantic predicate. By choosing this alternative within - /// {@link #adaptivePredict} instead of throwing a - /// {@link org.antlr.v4.runtime.NoViableAltException}, the resulting - /// {@link org.antlr.v4.runtime.FailedPredicateException} in the parser will identify the specific + /// _#adaptivePredict_ instead of throwing a + /// _org.antlr.v4.runtime.NoViableAltException_, the resulting + /// _org.antlr.v4.runtime.FailedPredicateException_ in the parser will identify the specific /// predicate which is preventing the parser from successfully parsing the /// decision rule, which helps developers identify and correct logic errors /// in semantic predicates. - ///

      - /// + /// /// - parameter configs: The ATN configurations which were valid immediately before - /// the {@link #ERROR} state was reached + /// the _#ERROR_ state was reached /// - parameter outerContext: The is the \gamma_0 initial parser context from the paper /// or the parser stack at the instant before prediction commences. - /// - /// - returns: The value to return from {@link #adaptivePredict}, or - /// {@link org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER} if a suitable alternative was not - /// identified and {@link #adaptivePredict} should report an error instead. + /// + /// - returns: The value to return from _#adaptivePredict_, or + /// _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_ if a suitable alternative was not + /// identified and _#adaptivePredict_ should report an error instead. + /// final internal func getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(_ configs: ATNConfigSet, _ outerContext: ParserRuleContext) throws -> Int { - let sets: (ATNConfigSet, ATNConfigSet) = try - splitAccordingToSemanticValidity(configs, outerContext) - let semValidConfigs: ATNConfigSet = sets.0 - let semInvalidConfigs: ATNConfigSet = sets.1 - var alt: Int = try getAltThatFinishedDecisionEntryRule(semValidConfigs) + let (semValidConfigs, semInvalidConfigs) = try splitAccordingToSemanticValidity(configs, outerContext) + var alt = getAltThatFinishedDecisionEntryRule(semValidConfigs) if alt != ATN.INVALID_ALT_NUMBER { // semantically/syntactically viable path exists return alt } // Is there a syntactically valid path with a failed pred? if semInvalidConfigs.size() > 0 { - alt = try getAltThatFinishedDecisionEntryRule(semInvalidConfigs) + alt = getAltThatFinishedDecisionEntryRule(semInvalidConfigs) if alt != ATN.INVALID_ALT_NUMBER { // syntactically viable path exists return alt @@ -1226,46 +1225,50 @@ open class ParserATNSimulator: ATNSimulator { return ATN.INVALID_ALT_NUMBER } - final internal func getAltThatFinishedDecisionEntryRule(_ configs: ATNConfigSet) throws -> Int { + final internal func getAltThatFinishedDecisionEntryRule(_ configs: ATNConfigSet) -> Int { - return try configs.getAltThatFinishedDecisionEntryRule() + return configs.getAltThatFinishedDecisionEntryRule() } + /// /// Walk the list of configurations and split them according to /// those that have preds evaluating to true/false. If no pred, assume /// true pred and include in succeeded set. Returns Pair of sets. - /// + /// /// Create a new set so as not to alter the incoming parameter. - /// + /// /// Assumption: the input stream has been restored to the starting point /// prediction, which is where predicates need to evaluate. + /// final internal func splitAccordingToSemanticValidity( _ configs: ATNConfigSet, _ outerContext: ParserRuleContext) throws -> (ATNConfigSet, ATNConfigSet) { - return try configs.splitAccordingToSemanticValidity(outerContext,evalSemanticContext) + return try configs.splitAccordingToSemanticValidity(outerContext, evalSemanticContext) } + /// /// Look through a list of predicate/alt pairs, returning alts for the - /// pairs that win. A {@code NONE} predicate indicates an alt containing an + /// pairs that win. A `NONE` predicate indicates an alt containing an /// unpredicated config which behaves as "always true." If !complete /// then we stop at the first predicate that evaluates to true. This /// includes pairs with null predicates. + /// final internal func evalSemanticContext(_ predPredictions: [DFAState.PredPrediction], _ outerContext: ParserRuleContext, _ complete: Bool) throws -> BitSet { - let predictions: BitSet = BitSet() - for pair: DFAState.PredPrediction in predPredictions { + let predictions = BitSet() + for pair in predPredictions { if pair.pred == SemanticContext.NONE { - try predictions.set(pair.alt) + try! predictions.set(pair.alt) if !complete { break } continue } - let fullCtx: Bool = false // in dfa - let predicateEvaluationResult: Bool = try evalSemanticContext(pair.pred, outerContext, pair.alt, fullCtx) + let fullCtx = false // in dfa + let predicateEvaluationResult = try evalSemanticContext(pair.pred, outerContext, pair.alt, fullCtx) if debug || dfa_debug { print("eval pred \(pair)= \(predicateEvaluationResult)") } @@ -1274,7 +1277,7 @@ open class ParserATNSimulator: ATNSimulator { if debug || dfa_debug { print("PREDICT \(pair.alt)") } - try predictions.set(pair.alt) + try! predictions.set(pair.alt) if !complete { break } @@ -1284,54 +1287,52 @@ open class ParserATNSimulator: ATNSimulator { return predictions } + /// /// Evaluate a semantic context within a specific parser context. - /// - ///

      + /// /// This method might not be called for every semantic context evaluated /// during the prediction process. In particular, we currently do not - /// evaluate the following but it may change in the future:

      - /// - ///
        - ///
      • Precedence predicates (represented by - /// {@link org.antlr.v4.runtime.atn.SemanticContext.PrecedencePredicate}) are not currently evaluated - /// through this method.
      • - ///
      • Operator predicates (represented by {@link org.antlr.v4.runtime.atn.SemanticContext.AND} and - /// {@link org.antlr.v4.runtime.atn.SemanticContext.OR}) are evaluated as a single semantic + /// evaluate the following but it may change in the future: + /// + /// * Precedence predicates (represented by + /// _org.antlr.v4.runtime.atn.SemanticContext.PrecedencePredicate_) are not currently evaluated + /// through this method. + /// * Operator predicates (represented by _org.antlr.v4.runtime.atn.SemanticContext.AND_ and + /// _org.antlr.v4.runtime.atn.SemanticContext.OR_) are evaluated as a single semantic /// context, rather than evaluating the operands individually. /// Implementations which require evaluation results from individual /// predicates should override this method to explicitly handle evaluation of - /// the operands within operator predicates.
      • - ///
      - /// + /// the operands within operator predicates. + /// /// - parameter pred: The semantic context to evaluate /// - parameter parserCallStack: The parser context in which to evaluate the /// semantic context - /// - parameter alt: The alternative which is guarded by {@code pred} - /// - parameter fullCtx: {@code true} if the evaluation is occurring during LL - /// prediction; otherwise, {@code false} if the evaluation is occurring + /// - parameter alt: The alternative which is guarded by `pred` + /// - parameter fullCtx: `true` if the evaluation is occurring during LL + /// prediction; otherwise, `false` if the evaluation is occurring /// during SLL prediction - /// - /// - 4.3 + /// + /// - since: 4.3 + /// internal func evalSemanticContext(_ pred: SemanticContext, _ parserCallStack: ParserRuleContext, _ alt: Int, _ fullCtx: Bool) throws -> Bool { return try pred.eval(parser, parserCallStack) } + /// /// TODO: If we are doing predicates, there is no point in pursuing /// closure operations if we reach a DFA state that uniquely predicts /// alternative. We will not be caching that DFA state and it is a /// waste to pursue the closure. Might have to advance when we do /// ambig detection thought :( - + /// final internal func closure(_ config: ATNConfig, _ configs: ATNConfigSet, _ closureBusy: inout Set, _ collectPredicates: Bool, _ fullCtx: Bool, _ treatEofAsEpsilon: Bool) throws { - let initialDepth: Int = 0 - try closureCheckingStopState(config, configs, &closureBusy, collectPredicates, - fullCtx, - initialDepth, treatEofAsEpsilon) + let initialDepth = 0 + try closureCheckingStopState(config, configs, &closureBusy, collectPredicates, fullCtx, initialDepth, treatEofAsEpsilon) assert(!fullCtx || !configs.dipsIntoOuterContext, "Expected: !fullCtx||!configs.dipsIntoOuterContext") } @@ -1357,7 +1358,7 @@ open class ParserATNSimulator: ATNSimulator { for i in 0.., @@ -1413,10 +1414,10 @@ open class ParserATNSimulator: ATNSimulator { _ treatEofAsEpsilon: Bool) throws { // print(__FUNCTION__) //long startTime = System.currentTimeMillis(); - let p: ATNState = config.state + let p = config.state // optimization if !p.onlyHasEpsilonTransitions() { - try configs.add(config, &mergeCache) + try! configs.add(config, &mergeCache) // make sure to not return here, because EOF transitions can act as // both epsilon transitions and non-epsilon transitions. // if ( debug ) print("added config "+configs); @@ -1427,11 +1428,9 @@ open class ParserATNSimulator: ATNSimulator { canDropLoopEntryEdgeInLeftRecursiveRule(config) { continue } - let t: Transition = p.transition(i) - let continueCollecting: Bool = - !(t is ActionTransition) && collectPredicates - let c: ATNConfig? = try getEpsilonTarget(config, t, continueCollecting, - depth == 0, fullCtx, treatEofAsEpsilon) + let t = p.transition(i) + let continueCollecting = !(t is ActionTransition) && collectPredicates + let c = try getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon) if let c = c { if !t.isEpsilon() { // avoid infinite recursion for EOF* and EOF+ @@ -1442,7 +1441,7 @@ open class ParserATNSimulator: ATNSimulator { } } - var newDepth: Int = depth + var newDepth = depth if config.state is RuleStopState { assert(!fullCtx, "Expected: !fullCtx") // target fell off end of rule; mark resulting c as having dipped into outer context @@ -1492,49 +1491,50 @@ open class ParserATNSimulator: ATNSimulator { //print("That took: "+(finishTime-startTime)+ " ms"); } + /// /// Implements first-edge (loop entry) elimination as an optimization /// during closure operations. See antlr/antlr4#1398. - /// + /// /// The optimization is to avoid adding the loop entry config when /// the exit path can only lead back to the same /// StarLoopEntryState after popping context at the rule end state /// (traversing only epsilon edges, so we're still in closure, in /// this same rule). - /// + /// /// We need to detect any state that can reach loop entry on /// epsilon w/o exiting rule. We don't have to look at FOLLOW /// links, just ensure that all stack tops for config refer to key /// states in LR rule. - /// + /// /// To verify we are in the right situation we must first check /// closure is at a StarLoopEntryState generated during LR removal. /// Then we check that each stack top of context is a return state /// from one of these cases: - /// + /// /// 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state /// 2. expr op expr. The return state is the block end of internal block of (...)* /// 3. 'between' expr 'and' expr. The return state of 2nd expr reference. /// That state points at block end of internal block of (...)*. /// 4. expr '?' expr ':' expr. The return state points at block end, /// which points at loop entry state. - /// + /// /// If any is true for each stack top, then closure does not add a /// config to the current config set for edge[0], the loop entry branch. - /// + /// /// Conditions fail if any context for the current config is: - /// + /// /// a. empty (we'd fall out of expr to do a global FOLLOW which could /// even be to some weird spot in expr) or, /// b. lies outside of expr or, /// c. lies within expr but at a state not the BlockEndState /// generated during LR removal - /// + /// /// Do we need to evaluate predicates ever in closure for this case? - /// + /// /// No. Predicates, including precedence predicates, are only /// evaluated when computing a DFA start state. I.e., only before /// the lookahead (but not parser) consumes a token. - /// + /// /// There are no epsilon edges allowed in LR rule alt blocks or in /// the "primary" part (ID here). If closure is in /// StarLoopEntryState any lookahead operation will have consumed a @@ -1546,9 +1546,9 @@ open class ParserATNSimulator: ATNSimulator { /// closure starting at edges[0], edges[1] emanating from /// StarLoopEntryState. That means it is not performing closure on /// StarLoopEntryState during compute-start-state. - /// + /// /// How do we know this always gives same prediction answer? - /// + /// /// Without predicates, loop entry and exit paths are ambiguous /// upon remaining input +b (in, say, a+b). Either paths lead to /// valid parses. Closure can lead to consuming + immediately or by @@ -1556,34 +1556,35 @@ open class ParserATNSimulator: ATNSimulator { /// again to StarLoopEntryState to match +b. In this special case, /// we choose the more efficient path, which is to take the bypass /// path. - /// + /// /// The lookahead language has not changed because closure chooses /// one path over the other. Both paths lead to consuming the same /// remaining input during a lookahead operation. If the next token /// is an operator, lookahead will enter the choice block with /// operators. If it is not, lookahead will exit expr. Same as if /// closure had chosen to enter the choice block immediately. - /// + /// /// Closure is examining one config (some loopentrystate, some alt, /// context) which means it is considering exactly one alt. Closure /// always copies the same alt to any derived configs. - /// + /// /// How do we know this optimization doesn't mess up precedence in /// our parse trees? - /// + /// /// Looking through expr from left edge of stat only has to confirm /// that an input, say, a+b+c; begins with any valid interpretation /// of an expression. The precedence actually doesn't matter when /// making a decision in stat seeing through expr. It is only when /// parsing rule expr that we must use the precedence to get the /// right interpretation and, hence, parse tree. - /// + /// /// - 4.6 + /// internal func canDropLoopEntryEdgeInLeftRecursiveRule(_ config: ATNConfig) -> Bool { if ParserATNSimulator.TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT { return false } - let p: ATNState = config.state + let p = config.state guard let configContext = config.context else { return false } @@ -1591,7 +1592,7 @@ open class ParserATNSimulator: ATNSimulator { // left-recursion elimination. For efficiency, also check if // the context has an empty stack case. If so, it would mean // global FOLLOW so we can't perform optimization - if p.getStateType() != ATNState.STAR_LOOP_ENTRY || + if p.getStateType() != ATNState.STAR_LOOP_ENTRY || !( (p as! StarLoopEntryState)).precedenceRuleDecision || // Are we the special loop entry/exit state? configContext.isEmpty() || // If SLL wildcard configContext.hasEmptyPath(){ @@ -1600,41 +1601,41 @@ open class ParserATNSimulator: ATNSimulator { // Require all return states to return back to the same rule // that p is in. - let numCtxs: Int = configContext.size() + let numCtxs = configContext.size() for i in 0 ..< numCtxs { // for each stack context - let returnState: ATNState = atn.states[configContext.getReturnState(i)]! + let returnState = atn.states[configContext.getReturnState(i)]! if returnState.ruleIndex != p.ruleIndex {return false} } - let decisionStartState: BlockStartState = (p.transition(0).target as! BlockStartState) - let blockEndStateNum: Int = decisionStartState.endState!.stateNumber - let blockEndState: BlockEndState = (atn.states[blockEndStateNum] as! BlockEndState) + let decisionStartState = (p.transition(0).target as! BlockStartState) + let blockEndStateNum = decisionStartState.endState!.stateNumber + let blockEndState = (atn.states[blockEndStateNum] as! BlockEndState) // Verify that the top of each stack context leads to loop entry/exit // state through epsilon edges and w/o leaving rule. for i in 0 ..< numCtxs { // for each stack context - let returnStateNumber: Int = configContext.getReturnState(i) - let returnState: ATNState = atn.states[returnStateNumber]! + let returnStateNumber = configContext.getReturnState(i) + let returnState = atn.states[returnStateNumber]! // all states must have single outgoing epsilon edge if returnState.getNumberOfTransitions() != 1 || !returnState.transition(0).isEpsilon(){ return false } // Look for prefix op case like 'not expr', (' type ')' expr - let returnStateTarget: ATNState = returnState.transition(0).target - if returnState.getStateType() == ATNState.BLOCK_END && + let returnStateTarget = returnState.transition(0).target + if returnState.getStateType() == ATNState.BLOCK_END && returnStateTarget == p { continue } // Look for 'expr op expr' or case where expr's return state is block end // of (...)* internal block; the block end points to loop back // which points to p but we don't need to check that - if returnState == blockEndState{ + if returnState == blockEndState { continue } // Look for ternary expr ? expr : expr. The return state points at block end, // which points at loop entry state - if returnStateTarget == blockEndState{ + if returnStateTarget == blockEndState { continue } // Look for complex prefix 'between expr and expr' case where 2nd expr's @@ -1717,87 +1718,87 @@ open class ParserATNSimulator: ATNSimulator { final func precedenceTransition(_ config: ATNConfig, - _ pt: PrecedencePredicateTransition, - _ collectPredicates: Bool, - _ inContext: Bool, - _ fullCtx: Bool) throws -> ATNConfig { - if debug { - print("PRED (collectPredicates=\(collectPredicates)) \(pt.precedence)>=_p, ctx dependent=true") - //if ( parser != nil ) { - print("context surrounding pred is \(parser.getRuleInvocationStack())") - // } - } + _ pt: PrecedencePredicateTransition, + _ collectPredicates: Bool, + _ inContext: Bool, + _ fullCtx: Bool) throws -> ATNConfig { + if debug { + print("PRED (collectPredicates=\(collectPredicates)) \(pt.precedence)>=_p, ctx dependent=true") + //if ( parser != nil ) { + print("context surrounding pred is \(parser.getRuleInvocationStack())") + // } + } - var c: ATNConfig? = nil - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - let currentPosition: Int = _input.index() - try _input.seek(_startIndex) - let predSucceeds: Bool = try evalSemanticContext(pt.getPredicate(), _outerContext, config.alt, fullCtx) - try _input.seek(currentPosition) - if predSucceeds { - c = ATNConfig(config, pt.target) // no pred context - } - } else { - let newSemCtx: SemanticContext = - SemanticContext.and(config.semanticContext, pt.getPredicate()) - c = ATNConfig(config, pt.target, newSemCtx) + var c: ATNConfig? = nil + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + let currentPosition = _input.index() + try _input.seek(_startIndex) + let predSucceeds = try evalSemanticContext(pt.getPredicate(), _outerContext, config.alt, fullCtx) + try _input.seek(currentPosition) + if predSucceeds { + c = ATNConfig(config, pt.target) // no pred context } - } else { - c = ATNConfig(config, pt.target) } + else { + let newSemCtx = SemanticContext.and(config.semanticContext, pt.getPredicate()) + c = ATNConfig(config, pt.target, newSemCtx) + } + } + else { + c = ATNConfig(config, pt.target) + } - if debug { - print("config from pred transition=\(String(describing: c))") - } - return c! + if debug { + print("config from pred transition=\(String(describing: c))") + } + return c! } final func predTransition(_ config: ATNConfig, - _ pt: PredicateTransition, - _ collectPredicates: Bool, - _ inContext: Bool, - _ fullCtx: Bool) throws -> ATNConfig? { - if debug { - print("PRED (collectPredicates=\(collectPredicates)) \(pt.ruleIndex):\(pt.predIndex), ctx dependent=\(pt.isCtxDependent)") - //if ( parser != nil ) { - print("context surrounding pred is \(parser.getRuleInvocationStack())") - //} - } + _ pt: PredicateTransition, + _ collectPredicates: Bool, + _ inContext: Bool, + _ fullCtx: Bool) throws -> ATNConfig? { + if debug { + print("PRED (collectPredicates=\(collectPredicates)) \(pt.ruleIndex):\(pt.predIndex), ctx dependent=\(pt.isCtxDependent)") + //if ( parser != nil ) { + print("context surrounding pred is \(parser.getRuleInvocationStack())") + //} + } - var c: ATNConfig? = nil - if collectPredicates && - (!pt.isCtxDependent || (pt.isCtxDependent && inContext)) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - let currentPosition: Int = _input.index() - try _input.seek(_startIndex) - let predSucceeds: Bool = try evalSemanticContext(pt.getPredicate(), _outerContext, config.alt, fullCtx) - try _input.seek(currentPosition) - if predSucceeds { - c = ATNConfig(config, pt.target) // no pred context - } - } else { - let newSemCtx: SemanticContext = - SemanticContext.and(config.semanticContext, pt.getPredicate()) - c = ATNConfig(config, pt.target, newSemCtx) - } + var c: ATNConfig? = nil + if collectPredicates && + (!pt.isCtxDependent || (pt.isCtxDependent && inContext)) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + let currentPosition = _input.index() + try _input.seek(_startIndex) + let predSucceeds = try evalSemanticContext(pt.getPredicate(), _outerContext, config.alt, fullCtx) + try _input.seek(currentPosition) + if predSucceeds { + c = ATNConfig(config, pt.target) // no pred context + } } else { - c = ATNConfig(config, pt.target) + let newSemCtx = SemanticContext.and(config.semanticContext, pt.getPredicate()) + c = ATNConfig(config, pt.target, newSemCtx) } + } else { + c = ATNConfig(config, pt.target) + } - if debug { - print("config from pred transition=\(String(describing: c))") - } - return c + if debug { + print("config from pred transition=\(String(describing: c))") + } + return c } @@ -1806,30 +1807,32 @@ open class ParserATNSimulator: ATNSimulator { print("CALL rule \(getRuleName(t.target.ruleIndex!)), ctx=\(String(describing: config.context))") } - let returnState: ATNState = t.followState - let newContext: PredictionContext = - SingletonPredictionContext.create(config.context, returnState.stateNumber) + let returnState = t.followState + let newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber) return ATNConfig(config, t.target, newContext) } - /// Gets a {@link java.util.BitSet} containing the alternatives in {@code configs} + /// + /// Gets a _java.util.BitSet_ containing the alternatives in `configs` /// which are part of one or more conflicting alternative subsets. - /// - /// - parameter configs: The {@link org.antlr.v4.runtime.atn.ATNConfigSet} to analyze. - /// - returns: The alternatives in {@code configs} which are part of one or more - /// conflicting alternative subsets. If {@code configs} does not contain any - /// conflicting subsets, this method returns an empty {@link java.util.BitSet}. - final func getConflictingAlts(_ configs: ATNConfigSet) throws -> BitSet { - let altsets: Array = try PredictionMode.getConflictingAltSubsets(configs) + /// + /// - parameter configs: The _org.antlr.v4.runtime.atn.ATNConfigSet_ to analyze. + /// - returns: The alternatives in `configs` which are part of one or more + /// conflicting alternative subsets. If `configs` does not contain any + /// conflicting subsets, this method returns an empty _java.util.BitSet_. + /// + final func getConflictingAlts(_ configs: ATNConfigSet) -> BitSet { + let altsets = PredictionMode.getConflictingAltSubsets(configs) return PredictionMode.getAlts(altsets) } + /// /// Sam pointed out a problem with the previous definition, v3, of /// ambiguous states. If we have another state associated with conflicting /// alternatives, we should keep going. For example, the following grammar - /// + /// /// s : (ID | ID ID?) ';' ; - /// + /// /// When the ATN simulation reaches the state before ';', it has a DFA /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node @@ -1841,13 +1844,13 @@ open class ParserATNSimulator: ATNSimulator { /// ignore the conflict between alts 1 and 2. We ignore a set of /// conflicting alts when there is an intersection with an alternative /// associated with a single alt state in the state→config-list map. - /// + /// /// It's also the case that we might have two conflicting configurations but /// also a 3rd nonconflicting configuration for a different alternative: /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: - /// + /// /// a : A | A | A B ; - /// + /// /// After matching input A, we reach the stop state for rule A, state 1. /// State 8 is the state right before B. Clearly alternatives 1 and 2 /// conflict and no amount of further lookahead will separate the two. @@ -1858,11 +1861,12 @@ open class ParserATNSimulator: ATNSimulator { /// looking for input reasonably, I don't declare the state done. We /// ignore a set of conflicting alts when we have an alternative /// that we still need to pursue. - final func getConflictingAltsOrUniqueAlt(_ configs: ATNConfigSet) throws -> BitSet { + /// + final func getConflictingAltsOrUniqueAlt(_ configs: ATNConfigSet) -> BitSet { var conflictingAlts: BitSet if configs.uniqueAlt != ATN.INVALID_ALT_NUMBER { conflictingAlts = BitSet() - try conflictingAlts.set(configs.uniqueAlt) + try! conflictingAlts.set(configs.uniqueAlt) } else { conflictingAlts = configs.conflictingAlts! } @@ -1874,9 +1878,8 @@ open class ParserATNSimulator: ATNSimulator { if t == CommonToken.EOF { return "EOF" } - //var vocabulary : Vocabulary = parser != nil ? parser.getVocabulary() : Vocabulary.EMPTY_VOCABULARY; - let vocabulary: Vocabulary = parser.getVocabulary() - let displayName: String = vocabulary.getDisplayName(t) + let vocabulary = parser.getVocabulary() + let displayName = vocabulary.getDisplayName(t) if displayName == String(t) { return displayName } @@ -1888,24 +1891,23 @@ open class ParserATNSimulator: ATNSimulator { return try getTokenName(input.LA(1)) } + /// /// Used for debugging in adaptivePredict around execATN but I cut /// it out for clarity now that alg. works well. We can leave this /// "dead" code for a bit. + /// public final func dumpDeadEndConfigs(_ nvae: NoViableAltException) { errPrint("dead end configs: ") - for c: ATNConfig in nvae.getDeadEndConfigs()!.configs { - var trans: String = "no edges" + for c in nvae.getDeadEndConfigs()!.configs { + var trans = "no edges" if c.state.getNumberOfTransitions() > 0 { - let t: Transition = c.state.transition(0) - if t is AtomTransition { - let at: AtomTransition = t as! AtomTransition + let t = c.state.transition(0) + if let at = t as? AtomTransition { trans = "Atom " + getTokenName(at.label) - } else { - if t is SetTransition { - let st: SetTransition = t as! SetTransition - let not: Bool = st is NotSetTransition - trans = (not ? "~" : "") + "Set " + st.set.toString() - } + } + else if let st = t as? SetTransition { + let not = st is NotSetTransition + trans = (not ? "~" : "") + "Set " + st.set.toString() } } errPrint("\(c.toString(parser, true)):\(trans)") @@ -1914,53 +1916,49 @@ open class ParserATNSimulator: ATNSimulator { final func noViableAlt(_ input: TokenStream, - _ outerContext: ParserRuleContext, - _ configs: ATNConfigSet, - _ startIndex: Int) throws -> NoViableAltException { - return try NoViableAltException(parser, input, - input.get(startIndex), - input.LT(1)!, - configs, outerContext) + _ outerContext: ParserRuleContext, + _ configs: ATNConfigSet, + _ startIndex: Int) -> NoViableAltException { + let startToken = try! input.get(startIndex) + var offendingToken: Token? = nil + do { + offendingToken = try input.LT(1) + } + catch { + } + return NoViableAltException(parser, input, startToken, offendingToken, configs, outerContext) } internal static func getUniqueAlt(_ configs: ATNConfigSet) -> Int { - // var alt: Int = ATN.INVALID_ALT_NUMBER - // for c: ATNConfig in configs.configs { - // if alt == ATN.INVALID_ALT_NUMBER { - // alt = c.alt // found first alt - // } else { - // if c.alt != alt { - // return ATN.INVALID_ALT_NUMBER - // } - // } - // } let alt = configs.getUniqueAlt() return alt } + /// /// Add an edge to the DFA, if possible. This method calls - /// {@link #addDFAState} to ensure the {@code to} state is present in the - /// DFA. If {@code from} is {@code null}, or if {@code t} is outside the + /// _#addDFAState_ to ensure the `to` state is present in the + /// DFA. If `from` is `null`, or if `t` is outside the /// range of edges that can be represented in the DFA tables, this method /// returns without adding the edge to the DFA. - /// - ///

      If {@code to} is {@code null}, this method returns {@code null}. - /// Otherwise, this method returns the {@link org.antlr.v4.runtime.dfa.DFAState} returned by calling - /// {@link #addDFAState} for the {@code to} state.

      - /// + /// + /// If `to` is `null`, this method returns `null`. + /// Otherwise, this method returns the _org.antlr.v4.runtime.dfa.DFAState_ returned by calling + /// _#addDFAState_ for the `to` state. + /// /// - parameter dfa: The DFA /// - parameter from: The source state for the edge /// - parameter t: The input symbol /// - parameter to: The target state for the edge - /// - /// - returns: If {@code to} is {@code null}, this method returns {@code null}; - /// otherwise this method returns the result of calling {@link #addDFAState} - /// on {@code to} + /// + /// - returns: If `to` is `null`, this method returns `null`; + /// otherwise this method returns the result of calling _#addDFAState_ + /// on `to` + /// @discardableResult final func addDFAEdge(_ dfa: DFA, _ from: DFAState?, _ t: Int, - _ to: DFAState?) throws -> DFAState? { + _ to: DFAState?) -> DFAState? { var to = to if debug { print("EDGE \(String(describing: from)) -> \(String(describing: to)) upon \(getTokenName(t))") @@ -1970,7 +1968,7 @@ open class ParserATNSimulator: ATNSimulator { return nil } - to = try addDFAState(dfa, to!) // used existing if possible not incoming + to = addDFAState(dfa, to!) // used existing if possible not incoming if from == nil || t < -1 || t > atn.maxTokenType { return to } @@ -1987,32 +1985,33 @@ open class ParserATNSimulator: ATNSimulator { } if debug { - // print ("DFA=\n"+dfa.toString(parser != nil ? parser.getVocabulary() : Vocabulary.EMPTY_VOCABULARY)); print("DFA=\n" + dfa.toString(parser.getVocabulary())) } return to } - /// Add state {@code D} to the DFA if it is not already present, and return - /// the actual instance stored in the DFA. If a state equivalent to {@code D} + /// + /// Add state `D` to the DFA if it is not already present, and return + /// the actual instance stored in the DFA. If a state equivalent to `D` /// is already in the DFA, the existing state is returned. Otherwise this - /// method returns {@code D} after adding it to the DFA. - /// - ///

      If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and - /// does not change the DFA.

      - /// + /// method returns `D` after adding it to the DFA. + /// + /// If `D` is _#ERROR_, this method returns _#ERROR_ and + /// does not change the DFA. + /// /// - parameter dfa: The dfa /// - parameter D: The DFA state to add /// - returns: The state stored in the DFA. This will be either the existing - /// state if {@code D} is already in the DFA, or {@code D} itself if the + /// state if `D` is already in the DFA, or `D` itself if the /// state was not already present. - final func addDFAState(_ dfa: DFA, _ D: DFAState) throws -> DFAState { + /// + final func addDFAState(_ dfa: DFA, _ D: DFAState) -> DFAState { if D == ATNSimulator.ERROR { return D } - return try dfaStatesMutex.synchronized { + return dfaStatesMutex.synchronized { if let existing = dfa.states[D] { return existing! } @@ -2020,7 +2019,7 @@ open class ParserATNSimulator: ATNSimulator { D.stateNumber = dfa.states.count if !D.configs.isReadonly() { - try D.configs.optimizeConfigs(self) + try! D.configs.optimizeConfigs(self) D.configs.setReadonly(true) } @@ -2033,44 +2032,49 @@ open class ParserATNSimulator: ATNSimulator { } } - func reportAttemptingFullContext(_ dfa: DFA, _ conflictingAlts: BitSet?, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) throws { + func reportAttemptingFullContext(_ dfa: DFA, _ conflictingAlts: BitSet?, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) { if debug || retry_debug { - let interval: Interval = Interval.of(startIndex, stopIndex) - try print("reportAttemptingFullContext decision=\(dfa.decision):\(configs), input=\(parser.getTokenStream()!.getText(interval))") + let input = getTextInInterval(startIndex, stopIndex) + print("reportAttemptingFullContext decision=\(dfa.decision):\(configs), input=\(input)") } - // if ( parser=nil ) { - try parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - // } + parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, configs) } - func reportContextSensitivity(_ dfa: DFA, _ prediction: Int, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) throws { + func reportContextSensitivity(_ dfa: DFA, _ prediction: Int, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) { if debug || retry_debug { - let interval: Interval = Interval.of(startIndex, stopIndex) - try print("reportContextSensitivity decision=\(dfa.decision):\(configs), input=\(parser.getTokenStream()!.getText(interval))") + let input = getTextInInterval(startIndex, stopIndex) + print("reportContextSensitivity decision=\(dfa.decision):\(configs), input=\(input)") } - //if ( parser=nil ) { - try parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, configs) - // } + parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, configs) } + /// /// If context sensitive parsing, we know it's ambiguity not conflict + /// // configs that LL not SLL considered conflictin internal func reportAmbiguity(_ dfa: DFA, _ D: DFAState, // the DFA state from execATN() that had SLL conflicts _ startIndex: Int, _ stopIndex: Int, _ exact: Bool, _ ambigAlts: BitSet, - _ configs: ATNConfigSet) throws + _ configs: ATNConfigSet) { if debug || retry_debug { - let interval: Interval = Interval.of(startIndex, stopIndex) - try print("reportAmbiguity \(ambigAlts):\(configs), input=\(parser.getTokenStream()!.getText(interval))") + let input = getTextInInterval(startIndex, stopIndex) + print("reportAmbiguity \(ambigAlts):\(configs), input=\(input)") } - //TODO ( parser != nil ? - //if ( parser != nil ) { - try parser .getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex, + parser.getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - //} + } + + private func getTextInInterval(_ startIndex: Int, _ stopIndex: Int) -> String { + let interval = Interval.of(startIndex, stopIndex) + do { + return try parser.getTokenStream()?.getText(interval) ?? "" + } + catch { + return "" + } } public final func setPredictionMode(_ mode: PredictionMode) { @@ -2082,7 +2086,6 @@ open class ParserATNSimulator: ATNSimulator { return mode } - /// - 4.3 public final func getParser() -> Parser { return parser } diff --git a/runtime/Swift/Sources/Antlr4/atn/PlusBlockStartState.swift b/runtime/Swift/Sources/Antlr4/atn/PlusBlockStartState.swift index a3d086502..06dac1f24 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PlusBlockStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PlusBlockStartState.swift @@ -1,13 +1,17 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Start of {@code (A|B|...)+} loop. Technically a decision state, but +/// +/// Start of `(A|B|...)+` loop. Technically a decision state, but /// we don't use for code generation; somebody might need it, so I'm defining -/// it for completeness. In reality, the {@link org.antlr.v4.runtime.atn.PlusLoopbackState} node is the -/// real decision-making note for {@code A+}. +/// it for completeness. In reality, the _org.antlr.v4.runtime.atn.PlusLoopbackState_ node is the +/// real decision-making note for `A+`. +/// public final class PlusBlockStartState: BlockStartState { public var loopBackState: PlusLoopbackState? diff --git a/runtime/Swift/Sources/Antlr4/atn/PlusLoopbackState.swift b/runtime/Swift/Sources/Antlr4/atn/PlusLoopbackState.swift index e850d3ce4..d716ce60a 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PlusLoopbackState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PlusLoopbackState.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: +/// +/// Decision state for `A+` and `(A|B)+`. It has two transitions: /// one to the loop back to start of the block and one to exit. +/// public final class PlusLoopbackState: DecisionState { diff --git a/runtime/Swift/Sources/Antlr4/atn/PrecedencePredicateTransition.swift b/runtime/Swift/Sources/Antlr4/atn/PrecedencePredicateTransition.swift index ee39eed0f..5a8565faf 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PrecedencePredicateTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PrecedencePredicateTransition.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// +/// +/// /// - Sam Harwell +/// public final class PrecedencePredicateTransition: AbstractPredicateTransition, CustomStringConvertible { public final var precedence: Int diff --git a/runtime/Swift/Sources/Antlr4/atn/PredicateEvalInfo.swift b/runtime/Swift/Sources/Antlr4/atn/PredicateEvalInfo.swift index 6ee7039b5..4c3c223ad 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredicateEvalInfo.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredicateEvalInfo.swift @@ -1,30 +1,41 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This class represents profiling event information for semantic predicate /// evaluations which occur during prediction. -/// +/// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#evalSemanticContext -/// +/// /// - 4.3 +/// public class PredicateEvalInfo: DecisionEventInfo { + /// /// The semantic context which was evaluated. + /// public private(set) var semctx: SemanticContext + /// /// The alternative number for the decision which is guarded by the semantic - /// context {@link #semctx}. Note that other ATN + /// context _#semctx_. Note that other ATN /// configurations may predict the same alternative which are guarded by - /// other semantic contexts and/or {@link org.antlr.v4.runtime.atn.SemanticContext#NONE}. + /// other semantic contexts and/or _org.antlr.v4.runtime.atn.SemanticContext#NONE_. + /// public private(set) var predictedAlt: Int - /// The result of evaluating the semantic context {@link #semctx}. + /// + /// The result of evaluating the semantic context _#semctx_. + /// public private(set) var evalResult: Bool - /// Constructs a new instance of the {@link org.antlr.v4.runtime.atn.PredicateEvalInfo} class with the + /// + /// Constructs a new instance of the _org.antlr.v4.runtime.atn.PredicateEvalInfo_ class with the /// specified detailed predicate evaluation information. - /// + /// /// - parameter decision: The decision number /// - parameter input: The input token stream /// - parameter startIndex: The start index for the current prediction @@ -34,14 +45,15 @@ public class PredicateEvalInfo: DecisionEventInfo { /// - parameter semctx: The semantic context which was evaluated /// - parameter evalResult: The results of evaluating the semantic context /// - parameter predictedAlt: The alternative number for the decision which is - /// guarded by the semantic context {@code semctx}. See {@link #predictedAlt} + /// guarded by the semantic context `semctx`. See _#predictedAlt_ /// for more information. - /// - parameter fullCtx: {@code true} if the semantic context was - /// evaluated during LL prediction; otherwise, {@code false} if the semantic + /// - parameter fullCtx: `true` if the semantic context was + /// evaluated during LL prediction; otherwise, `false` if the semantic /// context was evaluated during SLL prediction - /// + /// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#evalSemanticContext(org.antlr.v4.runtime.atn.SemanticContext, org.antlr.v4.runtime.ParserRuleContext, int, boolean) /// - seealso: org.antlr.v4.runtime.atn.SemanticContext#eval(org.antlr.v4.runtime.Recognizer, org.antlr.v4.runtime.RuleContext) + /// public init(_ decision: Int, _ input: TokenStream, _ startIndex: Int, diff --git a/runtime/Swift/Sources/Antlr4/atn/PredicateTransition.swift b/runtime/Swift/Sources/Antlr4/atn/PredicateTransition.swift index 2fa0bab1b..aa608857c 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredicateTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredicateTransition.swift @@ -1,14 +1,18 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// TODO: this is old comment: /// A tree of semantic predicates from the grammar AST if label==SEMPRED. /// In the ATN, labels will always be exactly one predicate, but the DFA /// may have to combine a bunch of them as it collects predicates from /// multiple ATN configurations into a single DFA state. +/// public final class PredicateTransition: AbstractPredicateTransition { public let ruleIndex: Int diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift index 9fb06ebc6..660528da2 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift @@ -1,63 +1,69 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// import Foundation public class PredictionContext: Hashable, CustomStringConvertible { - /// Represents {@code $} in local context prediction, which means wildcard. - /// {@code *+x = *}. - public static let EMPTY: EmptyPredictionContext = EmptyPredictionContext() + /// + /// Represents `$` in local context prediction, which means wildcard. + /// `+x = *`. + /// + public static let EMPTY = EmptyPredictionContext() - /// Represents {@code $} in an array in full context mode, when {@code $} - /// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, - /// {@code $} = {@link #EMPTY_RETURN_STATE}. - public static let EMPTY_RETURN_STATE: Int = Int(Int32.max) + /// + /// Represents `$` in an array in full context mode, when `$` + /// doesn't mean wildcard: `$ + x = [$,x]`. Here, + /// `$` = _#EMPTY_RETURN_STATE_. + /// + public static let EMPTY_RETURN_STATE = Int(Int32.max) - private static let INITIAL_HASH: Int = 1 + private static let INITIAL_HASH = UInt32(1) + + public static var globalNodeCount = 0 - public static var globalNodeCount: Int = 0 public final let id: Int = { let oldGlobalNodeCount = globalNodeCount globalNodeCount += 1 return oldGlobalNodeCount }() - /// Stores the computed hash code of this {@link org.antlr.v4.runtime.atn.PredictionContext}. The hash + /// + /// Stores the computed hash code of this _org.antlr.v4.runtime.atn.PredictionContext_. The hash /// code is computed in parts to match the following reference algorithm. - /// - ///
      +    /// 
      +    /// 
           /// private int referenceHashCode() {
      -    /// int hash = {@link org.antlr.v4.runtime.misc.MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
      -    ///
      -    /// for (int i = 0; i < {@link #size()}; i++) {
      -    /// hash = {@link org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
      +    /// int hash = _org.antlr.v4.runtime.misc.MurmurHash#initialize MurmurHash.initialize_(_#INITIAL_HASH_);
      +    /// 
      +    /// for (int i = 0; i < _#size()_; i++) {
      +    /// hash = _org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update_(hash, _#getParent getParent_(i));
           /// }
      -    ///
      -    /// for (int i = 0; i < {@link #size()}; i++) {
      -    /// hash = {@link org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
      +    /// 
      +    /// for (int i = 0; i < _#size()_; i++) {
      +    /// hash = _org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update_(hash, _#getReturnState getReturnState_(i));
           /// }
      -    ///
      -    /// hash = {@link org.antlr.v4.runtime.misc.MurmurHash#finish MurmurHash.finish}(hash, 2 * {@link #size()});
      +    /// 
      +    /// hash = _org.antlr.v4.runtime.misc.MurmurHash#finish MurmurHash.finish_(hash, 2 * _#size()_);
           /// return hash;
           /// }
      -    /// 
      + /// + /// public let cachedHashCode: Int init(_ cachedHashCode: Int) { self.cachedHashCode = cachedHashCode } - /// Convert a {@link org.antlr.v4.runtime.RuleContext} tree to a {@link org.antlr.v4.runtime.atn.PredictionContext} graph. - /// Return {@link #EMPTY} if {@code outerContext} is empty or null. + /// + /// Convert a _org.antlr.v4.runtime.RuleContext_ tree to a _org.antlr.v4.runtime.atn.PredictionContext_ graph. + /// Return _#EMPTY_ if `outerContext` is empty or null. + /// public static func fromRuleContext(_ atn: ATN, _ outerContext: RuleContext?) -> PredictionContext { - var _outerContext: RuleContext - if let outerContext = outerContext { - _outerContext = outerContext - }else { - _outerContext = RuleContext.EMPTY - } + let _outerContext = outerContext ?? RuleContext.EMPTY // if we are in RuleContext of start rule, s, then PredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) @@ -66,33 +72,31 @@ public class PredictionContext: Hashable, CustomStringConvertible { } // If we have a parent, convert it to a PredictionContext graph - var parent: PredictionContext = EMPTY - parent = PredictionContext.fromRuleContext(atn, _outerContext.parent) + let parent = PredictionContext.fromRuleContext(atn, _outerContext.parent) - let state: ATNState = atn.states[_outerContext.invokingState]! - let transition: RuleTransition = state.transition(0) as! RuleTransition + let state = atn.states[_outerContext.invokingState]! + let transition = state.transition(0) as! RuleTransition return SingletonPredictionContext.create(parent, transition.followState.stateNumber) } public func size() -> Int { - RuntimeException(#function + " must be overridden") - return 0 + fatalError(#function + " must be overridden") } public func getParent(_ index: Int) -> PredictionContext? { - RuntimeException(#function + " must be overridden") - return nil + fatalError(#function + " must be overridden") } public func getReturnState(_ index: Int) -> Int { - RuntimeException(#function + " must be overridden") - return 0 + fatalError(#function + " must be overridden") } - /// This means only the {@link #EMPTY} context is in set. + /// + /// This means only the _#EMPTY_ context is in set. + /// public func isEmpty() -> Bool { return self === PredictionContext.EMPTY } @@ -106,21 +110,19 @@ public class PredictionContext: Hashable, CustomStringConvertible { } static func calculateEmptyHashCode() -> Int { - var hash: Int = MurmurHash.initialize(INITIAL_HASH) - hash = MurmurHash.finish(hash, 0) - return hash + let hash = MurmurHash.initialize(INITIAL_HASH) + return MurmurHash.finish(hash, 0) } static func calculateHashCode(_ parent: PredictionContext?, _ returnState: Int) -> Int { - var hash: Int = MurmurHash.initialize(INITIAL_HASH) + var hash = MurmurHash.initialize(INITIAL_HASH) hash = MurmurHash.update(hash, parent) hash = MurmurHash.update(hash, returnState) - hash = MurmurHash.finish(hash, 2) - return hash + return MurmurHash.finish(hash, 2) } static func calculateHashCode(_ parents: [PredictionContext?], _ returnStates: [Int]) -> Int { - var hash: Int = MurmurHash.initialize(INITIAL_HASH) + var hash = MurmurHash.initialize(INITIAL_HASH) var length = parents.count for i in 0..Stack tops equal, parents merge is same; return left graph.
      - ///

      - /// - ///

      Same stack top, parents differ; merge parents giving array node, then + /// + /// Merge two _org.antlr.v4.runtime.atn.SingletonPredictionContext_ instances. + /// + /// Stack tops equal, parents merge is same; return left graph. + /// + /// + /// Same stack top, parents differ; merge parents giving array node, then /// remainders of those graphs. A new root node is created to point to the - /// merged parents.
      - ///

      - /// - ///

      Different stack tops pointing to same parent. Make array node for the + /// merged parents. + /// + /// + /// Different stack tops pointing to same parent. Make array node for the /// root where both element in the root point to the same (original) - /// parent.
      - ///

      - /// - ///

      Different stack tops pointing to different parents. Make array node for + /// parent. + /// + /// + /// Different stack tops pointing to different parents. Make array node for /// the root where each element points to the corresponding original - /// parent.
      - ///

      - /// - /// - parameter a: the first {@link org.antlr.v4.runtime.atn.SingletonPredictionContext} - /// - parameter b: the second {@link org.antlr.v4.runtime.atn.SingletonPredictionContext} - /// - parameter rootIsWildcard: {@code true} if this is a local-context merge, + /// parent. + /// + /// + /// - parameter a: the first _org.antlr.v4.runtime.atn.SingletonPredictionContext_ + /// - parameter b: the second _org.antlr.v4.runtime.atn.SingletonPredictionContext_ + /// - parameter rootIsWildcard: `true` if this is a local-context merge, /// otherwise false to indicate a full-context merge /// - parameter mergeCache: + /// public static func mergeSingletons( _ a: SingletonPredictionContext, _ b: SingletonPredictionContext, @@ -211,7 +212,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { _ mergeCache: inout DoubleKeyMap?) -> PredictionContext { if let mergeCache = mergeCache { - var previous: PredictionContext? = mergeCache.get(a, b) + var previous = mergeCache.get(a, b) if previous != nil { return previous! } @@ -229,45 +230,45 @@ public class PredictionContext: Hashable, CustomStringConvertible { return rootMerge } - if (a.returnState == b.returnState) { + if a.returnState == b.returnState { // a == b - let parent: PredictionContext = merge(a.parent!, b.parent!, rootIsWildcard, &mergeCache); + let parent = merge(a.parent!, b.parent!, rootIsWildcard, &mergeCache) // if parent is same as existing a or b parent or reduced to a parent, return it - if (parent === a.parent!) { + if parent === a.parent! { return a } // ax + bx = ax, if a=b - if (parent === b.parent!) { + if parent === b.parent! { return b } // ax + bx = bx, if a=b // else: ax + ay = a'[x,y] // merge parents x and y, giving array node with x,y then remainders // of those graphs. dup a, a' points at merged array // new joined parent so create new singleton pointing to it, a' - let a_: PredictionContext = SingletonPredictionContext.create(parent, a.returnState); - if (mergeCache != nil) { + let a_ = SingletonPredictionContext.create(parent, a.returnState); + if mergeCache != nil { mergeCache!.put(a, b, a_) } return a_ } else { // a != b payloads differ // see if we can collapse parents due to $+x parents if local ctx - var singleParent: PredictionContext? = nil; + var singleParent: PredictionContext? = nil //added by janyou if a === b || (a.parent != nil && a.parent! == b.parent) { // ax + bx = [a,b]x singleParent = a.parent } - if (singleParent != nil) { + if singleParent != nil { // parents are same // sort payloads and use same parent - var payloads: [Int] = [a.returnState, b.returnState]; - if (a.returnState > b.returnState) { + var payloads = [a.returnState, b.returnState] + if a.returnState > b.returnState { payloads[0] = b.returnState payloads[1] = a.returnState } - let parents: [PredictionContext?] = [singleParent, singleParent] - let a_: PredictionContext = ArrayPredictionContext(parents, payloads) - if (mergeCache != nil) { + let parents = [singleParent, singleParent] + let a_ = ArrayPredictionContext(parents, payloads) + if mergeCache != nil { mergeCache!.put(a, b, a_) } return a_ @@ -275,142 +276,143 @@ public class PredictionContext: Hashable, CustomStringConvertible { // parents differ and can't merge them. Just pack together // into array; can't merge. // ax + by = [ax,by] - var payloads: [Int] = [a.returnState, b.returnState] - var parents: [PredictionContext?] = [a.parent, b.parent]; - if (a.returnState > b.returnState) { + var payloads = [a.returnState, b.returnState] + var parents = [a.parent, b.parent] + if a.returnState > b.returnState { // sort by payload payloads[0] = b.returnState payloads[1] = a.returnState parents = [b.parent, a.parent] } if a is EmptyPredictionContext { - // print("parenet is null") + // print("parent is null") } - let a_: PredictionContext = ArrayPredictionContext(parents, payloads); - if (mergeCache != nil) { + let a_ = ArrayPredictionContext(parents, payloads) + if mergeCache != nil { mergeCache!.put(a, b, a_) } return a_ } } - /// Handle case where at least one of {@code a} or {@code b} is - /// {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used - /// to represent {@link #EMPTY}. - /// - ///

      Local-Context Merges

      - /// - ///

      These local-context merge operations are used when {@code rootIsWildcard} - /// is true.

      - /// - ///

      {@link #EMPTY} is superset of any graph; return {@link #EMPTY}.
      - ///

      - /// - ///

      {@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is - /// {@code #EMPTY}; return left graph.
      - ///

      - /// - ///

      Special case of last merge if local context.
      - ///

      - /// - ///

      Full-Context Merges

      - /// - ///

      These full-context merge operations are used when {@code rootIsWildcard} - /// is false.

      - /// - ///

      - /// - ///

      Must keep all contexts; {@link #EMPTY} in array is a special value (and - /// null parent).
      - ///

      - /// - ///

      - /// - /// - parameter a: the first {@link org.antlr.v4.runtime.atn.SingletonPredictionContext} - /// - parameter b: the second {@link org.antlr.v4.runtime.atn.SingletonPredictionContext} - /// - parameter rootIsWildcard: {@code true} if this is a local-context merge, + /// + /// Handle case where at least one of `a` or `b` is + /// _#EMPTY_. In the following diagrams, the symbol `$` is used + /// to represent _#EMPTY_. + /// + /// Local-Context Merges + /// + /// These local-context merge operations are used when `rootIsWildcard` + /// is true. + /// + /// _#EMPTY_ is superset of any graph; return _#EMPTY_. + /// + /// + /// _#EMPTY_ and anything is `#EMPTY`, so merged parent is + /// `#EMPTY`; return left graph. + /// + /// + /// Special case of last merge if local context. + /// + /// + /// Full-Context Merges + /// + /// These full-context merge operations are used when `rootIsWildcard` + /// is false. + /// + /// + /// + /// Must keep all contexts; _#EMPTY_ in array is a special value (and + /// null parent). + /// + /// + /// + /// + /// - parameter a: the first _org.antlr.v4.runtime.atn.SingletonPredictionContext_ + /// - parameter b: the second _org.antlr.v4.runtime.atn.SingletonPredictionContext_ + /// - parameter rootIsWildcard: `true` if this is a local-context merge, /// otherwise false to indicate a full-context merge + /// public static func mergeRoot(_ a: SingletonPredictionContext, _ b: SingletonPredictionContext, _ rootIsWildcard: Bool) -> PredictionContext? { - if (rootIsWildcard) { - if (a === PredictionContext.EMPTY) { + if rootIsWildcard { + if a === PredictionContext.EMPTY { return PredictionContext.EMPTY } // * + b = * - if (b === PredictionContext.EMPTY) { + if b === PredictionContext.EMPTY { return PredictionContext.EMPTY } // a + * = * } else { - if (a === PredictionContext.EMPTY && b === PredictionContext.EMPTY) { + if a === PredictionContext.EMPTY && b === PredictionContext.EMPTY { return PredictionContext.EMPTY } // $ + $ = $ - if (a === PredictionContext.EMPTY) { + if a === PredictionContext.EMPTY { // $ + x = [$,x] - let payloads: [Int] = [b.returnState, EMPTY_RETURN_STATE] - let parents: [PredictionContext?] = [b.parent, nil] - let joined: PredictionContext = - ArrayPredictionContext(parents, payloads) - return joined; + let payloads = [b.returnState, EMPTY_RETURN_STATE] + let parents = [b.parent, nil] + let joined = ArrayPredictionContext(parents, payloads) + return joined } - if (b === PredictionContext.EMPTY) { + if b === PredictionContext.EMPTY { // x + $ = [$,x] ($ is always first if present) - let payloads: [Int] = [a.returnState, EMPTY_RETURN_STATE] - let parents: [PredictionContext?] = [a.parent, nil] - let joined: PredictionContext = - ArrayPredictionContext(parents, payloads) + let payloads = [a.returnState, EMPTY_RETURN_STATE] + let parents = [a.parent, nil] + let joined = ArrayPredictionContext(parents, payloads) return joined } } return nil } - /// Merge two {@link org.antlr.v4.runtime.atn.ArrayPredictionContext} instances. - /// - ///

      Different tops, different parents.
      - ///

      - /// - ///

      Shared top, same parents.
      - ///

      - /// - ///

      Shared top, different parents.
      - ///

      - /// - ///

      Shared top, all shared parents.
      - ///

      - /// - ///

      Equal tops, merge parents and reduce top to - /// {@link org.antlr.v4.runtime.atn.SingletonPredictionContext}.
      - ///

      + /// + /// Merge two _org.antlr.v4.runtime.atn.ArrayPredictionContext_ instances. + /// + /// Different tops, different parents. + /// + /// + /// Shared top, same parents. + /// + /// + /// Shared top, different parents. + /// + /// + /// Shared top, all shared parents. + /// + /// + /// Equal tops, merge parents and reduce top to + /// _org.antlr.v4.runtime.atn.SingletonPredictionContext_. + /// + /// public static func mergeArrays( _ a: ArrayPredictionContext, _ b: ArrayPredictionContext, _ rootIsWildcard: Bool, _ mergeCache: inout DoubleKeyMap?) -> PredictionContext { - if (mergeCache != nil) { - var previous: PredictionContext? = mergeCache!.get(a, b) - if (previous != nil) { + if mergeCache != nil { + var previous = mergeCache!.get(a, b) + if previous != nil { return previous! } previous = mergeCache!.get(b, a) - if (previous != nil) { + if previous != nil { return previous! } } // merge sorted payloads a + b => M - var i: Int = 0 // walks a - var j: Int = 0 // walks b - var k: Int = 0// walks target M array + var i = 0 // walks a + var j = 0 // walks b + var k = 0 // walks target M array let aReturnStatesLength = a.returnStates.count let bReturnStatesLength = b.returnStates.count let mergedReturnStatesLength = aReturnStatesLength + bReturnStatesLength - var mergedReturnStates: [Int] = [Int](repeating: 0, count: mergedReturnStatesLength) + var mergedReturnStates = [Int](repeating: 0, count: mergedReturnStatesLength) - var mergedParents: [PredictionContext?] = [PredictionContext?](repeating: nil, count: mergedReturnStatesLength) - //new PredictionContext[a.returnStates.length + b.returnStates.length]; + var mergedParents = [PredictionContext?](repeating: nil, count: mergedReturnStatesLength) // walk and merge to yield mergedParents, mergedReturnStates let aReturnStates = a.returnStates let bReturnStates = b.returnStates @@ -418,35 +420,27 @@ public class PredictionContext: Hashable, CustomStringConvertible { let bParents = b.parents while i < aReturnStatesLength && j < bReturnStatesLength { - let a_parent: PredictionContext? = aParents[i] - let b_parent: PredictionContext? = bParents[j] - if (aReturnStates[i] == bReturnStates[j]) { + let a_parent = aParents[i] + let b_parent = bParents[j] + if aReturnStates[i] == bReturnStates[j] { // same payload (stack tops are equal), must yield merged singleton - let payload: Int = aReturnStates[i] + let payload = aReturnStates[i] // $+$ = $ - var both$: Bool = (payload == EMPTY_RETURN_STATE) - both$ = both$ && a_parent == nil - both$ = both$ && b_parent == nil -// let both$: Bool = ((payload == EMPTY_RETURN_STATE) && -// a_parent == nil && b_parent == nil) - var ax_ax: Bool = (a_parent != nil && b_parent != nil) - ax_ax = ax_ax && a_parent! == b_parent! -// let ax_ax: Bool = (a_parent != nil && b_parent != nil) && a_parent! == b_parent! // ax+ax -> ax + let both$ = ((payload == EMPTY_RETURN_STATE) && a_parent == nil && b_parent == nil) + let ax_ax = (a_parent != nil && b_parent != nil && a_parent! == b_parent!) - - if (both$ || ax_ax) { + if both$ || ax_ax { mergedParents[k] = a_parent // choose left mergedReturnStates[k] = payload } else { // ax+ay -> a'[x,y] - let mergedParent: PredictionContext = - merge(a_parent!, b_parent!, rootIsWildcard, &mergeCache) + let mergedParent = merge(a_parent!, b_parent!, rootIsWildcard, &mergeCache) mergedParents[k] = mergedParent mergedReturnStates[k] = payload } i += 1 // hop over left one as usual j += 1 // but also skip one in right side since we merge - } else if (aReturnStates[i] < bReturnStates[j]) { + } else if aReturnStates[i] < bReturnStates[j] { // copy a[i] to M mergedParents[k] = a_parent mergedReturnStates[k] = aReturnStates[i] @@ -461,7 +455,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { } // copy over any payloads remaining in either array - if (i < aReturnStatesLength) { + if i < aReturnStatesLength { for p in i..M {@code parents}; merge any {@code equals()} - /// ones. -// internal static func combineCommonParents(inout parents: [PredictionContext?]) { -// var uniqueParents: Dictionary = -// Dictionary() -// let length = parents.count -// for p in 0.. String { - if (context == nil) { + if context == nil { return "" } - let buf: StringBuilder = StringBuilder() + let buf = StringBuilder() buf.append("digraph G {\n") buf.append("rankdir=LR;\n") - var nodes: Array = getAllContextNodes(context!) + var nodes = getAllContextNodes(context!) - nodes.sort(by: { $0.id > $1.id }) + nodes.sort { $0.id > $1.id } - for current: PredictionContext in nodes { - if (current is SingletonPredictionContext) { - let s: String = String(current.id) + for current in nodes { + if current is SingletonPredictionContext { + let s = String(current.id) buf.append(" s").append(s) - var returnState: String = String(current.getReturnState(0)) - if (current is EmptyPredictionContext) { + var returnState = String(current.getReturnState(0)) + if current is EmptyPredictionContext { returnState = "$" } buf.append(" [label=\"") @@ -572,17 +540,17 @@ public class PredictionContext: Hashable, CustomStringConvertible { buf.append("\"];\n") continue } - let arr: ArrayPredictionContext = current as! ArrayPredictionContext + let arr = current as! ArrayPredictionContext buf.append(" s").append(arr.id) buf.append(" [shape=box, label=\"") buf.append("[") - var first: Bool = true + var first = true let returnStates = arr.returnStates - for inv: Int in returnStates { - if (!first) { + for inv in returnStates { + if !first { buf.append(", ") } - if (inv == EMPTY_RETURN_STATE) { + if inv == EMPTY_RETURN_STATE { buf.append("$") } else { buf.append(inv) @@ -593,8 +561,8 @@ public class PredictionContext: Hashable, CustomStringConvertible { buf.append("\"];\n") } - for current: PredictionContext in nodes { - if (current === EMPTY) { + for current in nodes { + if current === EMPTY { continue } let length = current.size() @@ -602,13 +570,13 @@ public class PredictionContext: Hashable, CustomStringConvertible { guard let currentParent = current.getParent(i) else { continue } - let s: String = String(current.id) + let s = String(current.id) buf.append(" s").append(s) buf.append("->") buf.append("s") buf.append(currentParent.id) - if (current.size() > 1) { - buf.append(" [label=\"parent[\(i)]\"];\n"); + if current.size() > 1 { + buf.append(" [label=\"parent[\(i)]\"];\n") } else { buf.append(";\n") } @@ -624,23 +592,23 @@ public class PredictionContext: Hashable, CustomStringConvertible { _ context: PredictionContext, _ contextCache: PredictionContextCache, _ visited: HashMap) -> PredictionContext { - if (context.isEmpty()) { + if context.isEmpty() { return context } - var existing: PredictionContext? = visited[context] - if (existing != nil) { + var existing = visited[context] + if existing != nil { return existing! } existing = contextCache.get(context) - if (existing != nil) { + if existing != nil { visited[context] = existing! return existing! } - var changed: Bool = false - var parents: [PredictionContext?] = [PredictionContext?](repeating: nil, count: context.size()) + var changed = false + var parents = [PredictionContext?](repeating: nil, count: context.size()) let length = parents.count for i in 0.. Array { - var nodes: Array = Array() - let visited: HashMap = - HashMap() + public static func getAllContextNodes(_ context: PredictionContext) -> [PredictionContext] { + var nodes = [PredictionContext]() + let visited = HashMap() getAllContextNodes_(context, &nodes, visited) return nodes } public static func getAllContextNodes_(_ context: PredictionContext?, - _ nodes: inout Array, + _ nodes: inout [PredictionContext], _ visited: HashMap) { - //if (context == nil || visited.keys.contains(context!)) { - - guard let context = context , visited[context] == nil else { + guard let context = context, visited[context] == nil else { return } visited[context] = context @@ -717,67 +682,66 @@ public class PredictionContext: Hashable, CustomStringConvertible { } } - public func toString(_ recog: Recognizer) -> String { + public func toString(_ recog: Recognizer) -> String { return NSStringFromClass(PredictionContext.self) // return toString(recog, ParserRuleContext.EMPTY); } - public func toStrings(_ recognizer: Recognizer, _ currentState: Int) -> [String] { + public func toStrings(_ recognizer: Recognizer, _ currentState: Int) -> [String] { return toStrings(recognizer, PredictionContext.EMPTY, currentState) } // FROM SAM - public func toStrings(_ recognizer: Recognizer?, _ stop: PredictionContext, _ currentState: Int) -> [String] { - var result: Array = Array() - var perm: Int = 0 + public func toStrings(_ recognizer: Recognizer?, _ stop: PredictionContext, _ currentState: Int) -> [String] { + var result = [String]() + var perm = 0 outer: while true { - var offset: Int = 0 - var last: Bool = true - var p: PredictionContext = self - var stateNumber: Int = currentState - let localBuffer: StringBuilder = StringBuilder() + var offset = 0 + var last = true + var p = self + var stateNumber = currentState + let localBuffer = StringBuilder() localBuffer.append("[") while !p.isEmpty() && p !== stop { - var index: Int = 0 - if (p.size() > 0) { - var bits: Int = 1 + var index = 0 + if p.size() > 0 { + var bits = 1 while (1 << bits) < p.size() { bits += 1 } - let mask: Int = (1 << bits) - 1 + let mask = (1 << bits) - 1 index = (perm >> offset) & mask //last &= index >= p.size() - 1; //last = Bool(Int(last) & (index >= p.size() - 1)); last = last && (index >= p.size() - 1) - if (index >= p.size()) { + if index >= p.size() { continue outer } offset += bits } if let recognizer = recognizer { - if (localBuffer.length > 1) { + if localBuffer.length > 1 { // first char is '[', if more than that this isn't the first rule localBuffer.append(" ") } - let atn: ATN = recognizer.getATN() - let s: ATNState = atn.states[stateNumber]! - let ruleName: String = recognizer.getRuleNames()[s.ruleIndex!] + let atn = recognizer.getATN() + let s = atn.states[stateNumber]! + let ruleName = recognizer.getRuleNames()[s.ruleIndex!] localBuffer.append(ruleName) - } else { - if (p.getReturnState(index) != PredictionContext.EMPTY_RETURN_STATE) { - if (!p.isEmpty()) { - if (localBuffer.length > 1) { - // first char is '[', if more than that this isn't the first rule - localBuffer.append(" ") - } - - localBuffer.append(p.getReturnState(index)) + } + else if p.getReturnState(index) != PredictionContext.EMPTY_RETURN_STATE { + if !p.isEmpty() { + if localBuffer.length > 1 { + // first char is '[', if more than that this isn't the first rule + localBuffer.append(" ") } + + localBuffer.append(p.getReturnState(index)) } } stateNumber = p.getReturnState(index) @@ -786,7 +750,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { localBuffer.append("]") result.append(localBuffer.toString()) - if (last) { + if last { break } @@ -797,17 +761,18 @@ public class PredictionContext: Hashable, CustomStringConvertible { } public var description: String { - return String(describing: PredictionContext.self) + "@" + String(Unmanaged.passUnretained(self).toOpaque().hashValue) } } public func ==(lhs: RuleContext, rhs: ParserRuleContext) -> Bool { - if !(lhs is ParserRuleContext) { + if let lhs = lhs as? ParserRuleContext { + return lhs === rhs + } + else { return false } - return (lhs as! ParserRuleContext) === rhs } public func ==(lhs: PredictionContext, rhs: PredictionContext) -> Bool { @@ -815,16 +780,16 @@ public func ==(lhs: PredictionContext, rhs: PredictionContext) -> Bool { if lhs === rhs { return true } - if (lhs is EmptyPredictionContext) { + if lhs is EmptyPredictionContext { return lhs === rhs } - if (lhs is SingletonPredictionContext) && (rhs is SingletonPredictionContext) { - return (lhs as! SingletonPredictionContext) == (rhs as! SingletonPredictionContext) + if let lhs = lhs as? SingletonPredictionContext, let rhs = rhs as? SingletonPredictionContext { + return lhs == rhs } - if (lhs is ArrayPredictionContext) && (rhs is ArrayPredictionContext) { - return (lhs as! ArrayPredictionContext) == (rhs as! ArrayPredictionContext) + if let lhs = lhs as? ArrayPredictionContext, let rhs = rhs as? ArrayPredictionContext { + return lhs == rhs } return false diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift index 5daac2eb1..8a5c5479f 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// Used to cache {@link org.antlr.v4.runtime.atn.PredictionContext} objects. Its used for the shared +/// +/// Used to cache _org.antlr.v4.runtime.atn.PredictionContext_ objects. Its used for the shared /// context cash associated with contexts in DFA states. This cache /// can be used for both lexers and parsers. +/// public final class PredictionContextCache { //internal final var @@ -13,9 +17,11 @@ public final class PredictionContextCache { HashMap() public init() { } + /// /// Add a context to the cache and return it. If the context already exists, /// return that one instead and do not add a new context to the cache. /// Protect shared cache from unsafe thread access. + /// @discardableResult public func add(_ ctx: PredictionContext) -> PredictionContext { if ctx === PredictionContext.EMPTY { diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionMode.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionMode.swift index 2180ddca7..2bbd71cd2 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionMode.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionMode.swift @@ -1,171 +1,177 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// This enumeration defines the prediction modes available in ANTLR 4 along with /// utility methods for analyzing configuration sets for conflicts and/or /// ambiguities. +/// public enum PredictionMode { - /** - * The SLL(*) prediction mode. This prediction mode ignores the current - * parser context when making predictions. This is the fastest prediction - * mode, and provides correct results for many grammars. This prediction - * mode is more powerful than the prediction mode provided by ANTLR 3, but - * may result in syntax errors for grammar and input combinations which are - * not SLL. - * - *

      - * When using this prediction mode, the parser will either return a correct - * parse tree (i.e. the same parse tree that would be returned with the - * {@link #LL} prediction mode), or it will report a syntax error. If a - * syntax error is encountered when using the {@link #SLL} prediction mode, - * it may be due to either an actual syntax error in the input or indicate - * that the particular combination of grammar and input requires the more - * powerful {@link #LL} prediction abilities to complete successfully.

      - * - *

      - * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.

      - */ + /// + /// The SLL(*) prediction mode. This prediction mode ignores the current + /// parser context when making predictions. This is the fastest prediction + /// mode, and provides correct results for many grammars. This prediction + /// mode is more powerful than the prediction mode provided by ANTLR 3, but + /// may result in syntax errors for grammar and input combinations which are + /// not SLL. + /// + /// + /// When using this prediction mode, the parser will either return a correct + /// parse tree (i.e. the same parse tree that would be returned with the + /// _#LL_ prediction mode), or it will report a syntax error. If a + /// syntax error is encountered when using the _#SLL_ prediction mode, + /// it may be due to either an actual syntax error in the input or indicate + /// that the particular combination of grammar and input requires the more + /// powerful _#LL_ prediction abilities to complete successfully. + /// + /// + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs. + /// case SLL - /** - * The LL(*) prediction mode. This prediction mode allows the current parser - * context to be used for resolving SLL conflicts that occur during - * prediction. This is the fastest prediction mode that guarantees correct - * parse results for all combinations of grammars with syntactically correct - * inputs. - * - *

      - * When using this prediction mode, the parser will make correct decisions - * for all syntactically-correct grammar and input combinations. However, in - * cases where the grammar is truly ambiguous this prediction mode might not - * report a precise answer for exactly which alternatives are - * ambiguous.

      - * - *

      - * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.

      - */ + /// + /// The LL(*) prediction mode. This prediction mode allows the current parser + /// context to be used for resolving SLL conflicts that occur during + /// prediction. This is the fastest prediction mode that guarantees correct + /// parse results for all combinations of grammars with syntactically correct + /// inputs. + /// + /// + /// When using this prediction mode, the parser will make correct decisions + /// for all syntactically-correct grammar and input combinations. However, in + /// cases where the grammar is truly ambiguous this prediction mode might not + /// report a precise answer for __exactly which__ alternatives are + /// ambiguous. + /// + /// + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs. + /// case LL - /** - * The LL(*) prediction mode with exact ambiguity detection. In addition to - * the correctness guarantees provided by the {@link #LL} prediction mode, - * this prediction mode instructs the prediction algorithm to determine the - * complete and exact set of ambiguous alternatives for every ambiguous - * decision encountered while parsing. - * - *

      - * This prediction mode may be used for diagnosing ambiguities during - * grammar development. Due to the performance overhead of calculating sets - * of ambiguous alternatives, this prediction mode should be avoided when - * the exact results are not necessary.

      - * - *

      - * This prediction mode does not provide any guarantees for prediction - * behavior for syntactically-incorrect inputs.

      - */ + /// + /// The LL(*) prediction mode with exact ambiguity detection. In addition to + /// the correctness guarantees provided by the _#LL_ prediction mode, + /// this prediction mode instructs the prediction algorithm to determine the + /// complete and exact set of ambiguous alternatives for every ambiguous + /// decision encountered while parsing. + /// + /// + /// This prediction mode may be used for diagnosing ambiguities during + /// grammar development. Due to the performance overhead of calculating sets + /// of ambiguous alternatives, this prediction mode should be avoided when + /// the exact results are not necessary. + /// + /// + /// This prediction mode does not provide any guarantees for prediction + /// behavior for syntactically-incorrect inputs. + /// case LL_EXACT_AMBIG_DETECTION + /// /// Computes the SLL prediction termination condition. - /// - ///

      + /// + /// /// This method computes the SLL prediction termination condition for both of - /// the following cases.

      - /// - ///
        - ///
      • The usual SLL+LL fallback upon SLL conflict
      • - ///
      • Pure SLL without LL fallback
      • - ///
      - /// - ///

      COMBINED SLL+LL PARSING

      - /// - ///

      When LL-fallback is enabled upon SLL conflict, correct predictions are + /// the following cases. + /// + /// * The usual SLL+LL fallback upon SLL conflict + /// * Pure SLL without LL fallback + /// + /// __COMBINED SLL+LL PARSING__ + /// + /// When LL-fallback is enabled upon SLL conflict, correct predictions are /// ensured regardless of how the termination condition is computed by this /// method. Due to the substantially higher cost of LL prediction, the /// prediction should only fall back to LL when the additional lookahead - /// cannot lead to a unique SLL prediction.

      - /// - ///

      Assuming combined SLL+LL parsing, an SLL configuration set with only + /// cannot lead to a unique SLL prediction. + /// + /// Assuming combined SLL+LL parsing, an SLL configuration set with only /// conflicting subsets should fall back to full LL, even if the /// configuration sets don't resolve to the same alternative (e.g. - /// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + /// `{1,2`} and `{3,4`}. If there is at least one non-conflicting /// configuration, SLL could continue with the hopes that more lookahead will - /// resolve via one of those non-conflicting configurations.

      - /// - ///

      Here's the prediction termination rule them: SLL (for SLL+LL parsing) + /// resolve via one of those non-conflicting configurations. + /// + /// Here's the prediction termination rule them: SLL (for SLL+LL parsing) /// stops when it sees only conflicting configuration subsets. In contrast, - /// full LL keeps going when there is uncertainty.

      - /// - ///

      HEURISTIC

      - /// - ///

      As a heuristic, we stop prediction when we see any conflicting subset + /// full LL keeps going when there is uncertainty. + /// + /// __HEURISTIC__ + /// + /// As a heuristic, we stop prediction when we see any conflicting subset /// unless we see a state that only has one alternative associated with it. /// The single-alt-state thing lets prediction continue upon rules like - /// (otherwise, it would admit defeat too soon):

      - /// - ///

      {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

      - /// - ///

      When the ATN simulation reaches the state before {@code ';'}, it has a - /// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally - /// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + /// (otherwise, it would admit defeat too soon): + /// + /// `[12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;` + /// + /// When the ATN simulation reaches the state before `';'`, it has a + /// DFA state that looks like: `[12|1|[], 6|2|[], 12|2|[]]`. Naturally + /// `12|1|[]` and `12|2|[]` conflict, but we cannot stop /// processing this node because alternative to has another way to continue, - /// via {@code [6|2|[]]}.

      - /// - ///

      It also let's us continue for this rule:

      - /// - ///

      {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

      - /// - ///

      After matching input A, we reach the stop state for rule A, state 1. + /// via `[6|2|[]]`. + /// + /// It also let's us continue for this rule: + /// + /// `[1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;` + /// + /// After matching input A, we reach the stop state for rule A, state 1. /// State 8 is the state right before B. Clearly alternatives 1 and 2 /// conflict and no amount of further lookahead will separate the two. /// However, alternative 3 will be able to continue and so we do not stop /// working on this state. In the previous example, we're concerned with /// states associated with the conflicting alternatives. Here alt 3 is not /// associated with the conflicting configs, but since we can continue - /// looking for input reasonably, don't declare the state done.

      - /// - ///

      PURE SLL PARSING

      - /// - ///

      To handle pure SLL parsing, all we have to do is make sure that we + /// looking for input reasonably, don't declare the state done. + /// + /// __PURE SLL PARSING__ + /// + /// To handle pure SLL parsing, all we have to do is make sure that we /// combine stack contexts for configurations that differ only by semantic - /// predicate. From there, we can do the usual SLL termination heuristic.

      - /// - ///

      PREDICATES IN SLL+LL PARSING

      - /// - ///

      SLL decisions don't evaluate predicates until after they reach DFA stop + /// predicate. From there, we can do the usual SLL termination heuristic. + /// + /// __PREDICATES IN SLL+LL PARSING__ + /// + /// SLL decisions don't evaluate predicates until after they reach DFA stop /// states because they need to create the DFA cache that works in all /// semantic situations. In contrast, full LL evaluates predicates collected /// during start state computation so it can ignore predicates thereafter. /// This means that SLL termination detection can totally ignore semantic - /// predicates.

      - /// - ///

      Implementation-wise, {@link org.antlr.v4.runtime.atn.ATNConfigSet} combines stack contexts but not + /// predicates. + /// + /// Implementation-wise, _org.antlr.v4.runtime.atn.ATNConfigSet_ combines stack contexts but not /// semantic predicate contexts so we might see two configurations like the - /// following.

      - /// - ///

      {@code (s, 1, x, {}), (s, 1, x', {p})}

      - /// - ///

      Before testing these configurations against others, we have to merge - /// {@code x} and {@code x'} (without modifying the existing configurations). - /// For example, we test {@code (x+x')==x''} when looking for conflicts in - /// the following configurations.

      - /// - ///

      {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

      - /// - ///

      If the configuration set has predicates (as indicated by - /// {@link org.antlr.v4.runtime.atn.ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of + /// following. + /// + /// `(s, 1, x, {`), (s, 1, x', {p})} + /// + /// Before testing these configurations against others, we have to merge + /// `x` and `x'` (without modifying the existing configurations). + /// For example, we test `(x+x')==x''` when looking for conflicts in + /// the following configurations. + /// + /// `(s, 1, x, {`), (s, 1, x', {p}), (s, 2, x'', {})} + /// + /// If the configuration set has predicates (as indicated by + /// _org.antlr.v4.runtime.atn.ATNConfigSet#hasSemanticContext_), this algorithm makes a copy of /// the configurations to strip out all of the predicates so that a standard - /// {@link org.antlr.v4.runtime.atn.ATNConfigSet} will merge everything ignoring predicates.

      - public static func hasSLLConflictTerminatingPrediction(_ mode: PredictionMode,_ configs: ATNConfigSet) throws -> Bool { + /// _org.antlr.v4.runtime.atn.ATNConfigSet_ will merge everything ignoring predicates. + /// + public static func hasSLLConflictTerminatingPrediction(_ mode: PredictionMode,_ configs: ATNConfigSet) -> Bool { var configs = configs + /// /// Configs in rule stop states indicate reaching the end of the decision /// rule (local context) or end of start rule (full context). If all /// configs meet this condition, then none of the configurations is able /// to match additional input so we terminate prediction. + /// if allConfigsInRuleStopStates(configs) { return true } @@ -177,206 +183,211 @@ public enum PredictionMode { // since we'll often fail over anyway. if configs.hasSemanticContext { // dup configs, tossing out semantic predicates - configs = try configs.dupConfigsWithoutSemanticPredicates() + configs = configs.dupConfigsWithoutSemanticPredicates() } // now we have combined contexts for configs with dissimilar preds } // pure SLL or combined SLL+LL mode parsing - let altsets: Array = try getConflictingAltSubsets(configs) + let altsets = getConflictingAltSubsets(configs) - let heuristic: Bool = - try hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs) + let heuristic = hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs) return heuristic } - /// Checks if any configuration in {@code configs} is in a - /// {@link org.antlr.v4.runtime.atn.RuleStopState}. Configurations meeting this condition have reached + /// + /// Checks if any configuration in `configs` is in a + /// _org.antlr.v4.runtime.atn.RuleStopState_. Configurations meeting this condition have reached /// the end of the decision rule (local context) or end of start rule (full /// context). - /// + /// /// - parameter configs: the configuration set to test - /// - returns: {@code true} if any configuration in {@code configs} is in a - /// {@link org.antlr.v4.runtime.atn.RuleStopState}, otherwise {@code false} + /// - returns: `true` if any configuration in `configs` is in a + /// _org.antlr.v4.runtime.atn.RuleStopState_, otherwise `false` + /// public static func hasConfigInRuleStopState(_ configs: ATNConfigSet) -> Bool { return configs.hasConfigInRuleStopState } - /// Checks if all configurations in {@code configs} are in a - /// {@link org.antlr.v4.runtime.atn.RuleStopState}. Configurations meeting this condition have reached + /// + /// Checks if all configurations in `configs` are in a + /// _org.antlr.v4.runtime.atn.RuleStopState_. Configurations meeting this condition have reached /// the end of the decision rule (local context) or end of start rule (full /// context). - /// + /// /// - parameter configs: the configuration set to test - /// - returns: {@code true} if all configurations in {@code configs} are in a - /// {@link org.antlr.v4.runtime.atn.RuleStopState}, otherwise {@code false} + /// - returns: `true` if all configurations in `configs` are in a + /// _org.antlr.v4.runtime.atn.RuleStopState_, otherwise `false` + /// public static func allConfigsInRuleStopStates(_ configs: ATNConfigSet) -> Bool { return configs.allConfigsInRuleStopStates } + /// /// Full LL prediction termination. - /// - ///

      Can we stop looking ahead during ATN simulation or is there some + /// + /// Can we stop looking ahead during ATN simulation or is there some /// uncertainty as to which alternative we will ultimately pick, after /// consuming more input? Even if there are partial conflicts, we might know /// that everything is going to resolve to the same minimum alternative. That /// means we can stop since no more lookahead will change that fact. On the /// other hand, there might be multiple conflicts that resolve to different /// minimums. That means we need more look ahead to decide which of those - /// alternatives we should predict.

      - /// - ///

      The basic idea is to split the set of configurations {@code C}, into - /// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + /// alternatives we should predict. + /// + /// The basic idea is to split the set of configurations `C`, into + /// conflicting subsets `(s, _, ctx, _)` and singleton subsets with /// non-conflicting configurations. Two configurations conflict if they have - /// identical {@link org.antlr.v4.runtime.atn.ATNConfig#state} and {@link org.antlr.v4.runtime.atn.ATNConfig#context} values - /// but different {@link org.antlr.v4.runtime.atn.ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)} - /// and {@code (s, j, ctx, _)} for {@code i!=j}.

      - /// - ///

      Reduce these configuration subsets to the set of possible alternatives. - /// You can compute the alternative subsets in one pass as follows:

      - /// - ///

      {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in - /// {@code C} holding {@code s} and {@code ctx} fixed.

      - /// - ///

      Or in pseudo-code, for each configuration {@code c} in {@code C}:

      - /// - ///
      -    /// map[c] U= c.{@link org.antlr.v4.runtime.atn.ATNConfig#alt alt} # map hash/equals uses s and x, not
      +    /// identical _org.antlr.v4.runtime.atn.ATNConfig#state_ and _org.antlr.v4.runtime.atn.ATNConfig#context_ values
      +    /// but different _org.antlr.v4.runtime.atn.ATNConfig#alt_ value, e.g. `(s, i, ctx, _)`
      +    /// and `(s, j, ctx, _)` for `i!=j`.
      +    /// 
      +    /// Reduce these configuration subsets to the set of possible alternatives.
      +    /// You can compute the alternative subsets in one pass as follows:
      +    /// 
      +    /// `A_s,ctx = {i | (s, i, ctx, _)`} for each configuration in
      +    /// `C` holding `s` and `ctx` fixed.
      +    /// 
      +    /// Or in pseudo-code, for each configuration `c` in `C`:
      +    /// 
      +    /// 
      +    /// map[c] U= c._org.antlr.v4.runtime.atn.ATNConfig#alt alt_ # map hash/equals uses s and x, not
           /// alt and not pred
      -    /// 
      - /// - ///

      The values in {@code map} are the set of {@code A_s,ctx} sets.

      - /// - ///

      If {@code |A_s,ctx|=1} then there is no conflict associated with - /// {@code s} and {@code ctx}.

      - /// - ///

      Reduce the subsets to singletons by choosing a minimum of each subset. If + /// + /// + /// The values in `map` are the set of `A_s,ctx` sets. + /// + /// If `|A_s,ctx|=1` then there is no conflict associated with + /// `s` and `ctx`. + /// + /// Reduce the subsets to singletons by choosing a minimum of each subset. If /// the union of these alternative subsets is a singleton, then no amount of /// more lookahead will help us. We will always pick that alternative. If, /// however, there is more than one alternative, then we are uncertain which /// alternative to predict and must continue looking for resolution. We may /// or may not discover an ambiguity in the future, even if there are no - /// conflicting subsets this round.

      - /// - ///

      The biggest sin is to terminate early because it means we've made a + /// conflicting subsets this round. + /// + /// The biggest sin is to terminate early because it means we've made a /// decision but were uncertain as to the eventual outcome. We haven't used /// enough lookahead. On the other hand, announcing a conflict too late is no /// big deal; you will still have the conflict. It's just inefficient. It - /// might even look until the end of file.

      - /// - ///

      No special consideration for semantic predicates is required because + /// might even look until the end of file. + /// + /// No special consideration for semantic predicates is required because /// predicates are evaluated on-the-fly for full LL prediction, ensuring that /// no configuration contains a semantic context during the termination - /// check.

      - /// - ///

      CONFLICTING CONFIGS

      - /// - ///

      Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict - /// when {@code i!=j} but {@code x=x'}. Because we merge all - /// {@code (s, i, _)} configurations together, that means that there are at - /// most {@code n} configurations associated with state {@code s} for - /// {@code n} possible alternatives in the decision. The merged stacks - /// complicate the comparison of configuration contexts {@code x} and - /// {@code x'}. Sam checks to see if one is a subset of the other by calling - /// merge and checking to see if the merged result is either {@code x} or - /// {@code x'}. If the {@code x} associated with lowest alternative {@code i} - /// is the superset, then {@code i} is the only possible prediction since the - /// others resolve to {@code min(i)} as well. However, if {@code x} is - /// associated with {@code j>i} then at least one stack configuration for - /// {@code j} is not in conflict with alternative {@code i}. The algorithm - /// should keep going, looking for more lookahead due to the uncertainty.

      - /// - ///

      For simplicity, I'm doing a equality check between {@code x} and - /// {@code x'} that lets the algorithm continue to consume lookahead longer + /// check. + /// + /// __CONFLICTING CONFIGS__ + /// + /// Two configurations `(s, i, x)` and `(s, j, x')`, conflict + /// when `i!=j` but `x=x'`. Because we merge all + /// `(s, i, _)` configurations together, that means that there are at + /// most `n` configurations associated with state `s` for + /// `n` possible alternatives in the decision. The merged stacks + /// complicate the comparison of configuration contexts `x` and + /// `x'`. Sam checks to see if one is a subset of the other by calling + /// merge and checking to see if the merged result is either `x` or + /// `x'`. If the `x` associated with lowest alternative `i` + /// is the superset, then `i` is the only possible prediction since the + /// others resolve to `min(i)` as well. However, if `x` is + /// associated with `j>i` then at least one stack configuration for + /// `j` is not in conflict with alternative `i`. The algorithm + /// should keep going, looking for more lookahead due to the uncertainty. + /// + /// For simplicity, I'm doing a equality check between `x` and + /// `x'` that lets the algorithm continue to consume lookahead longer /// than necessary. The reason I like the equality is of course the /// simplicity but also because that is the test you need to detect the - /// alternatives that are actually in conflict.

      - /// - ///

      CONTINUE/STOP RULE

      - /// - ///

      Continue if union of resolved alternative sets from non-conflicting and + /// alternatives that are actually in conflict. + /// + /// __CONTINUE/STOP RULE__ + /// + /// Continue if union of resolved alternative sets from non-conflicting and /// conflicting alternative subsets has more than one alternative. We are - /// uncertain about which alternative to predict.

      - /// - ///

      The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + /// uncertain about which alternative to predict. + /// + /// The complete set of alternatives, `[i for (_,i,_)]`, tells us which /// alternatives are still in the running for the amount of input we've /// consumed at this point. The conflicting sets let us to strip away /// configurations that won't lead to more states because we resolve /// conflicts to the configuration with a minimum alternate for the - /// conflicting set.

      - /// - ///

      CASES

      - /// - ///
        - /// - ///
      • no conflicts and more than 1 alternative in set => continue
      • - /// - ///
      • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, - /// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set - /// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = - /// {@code {1,3}} => continue - ///
      • - /// - ///
      • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, - /// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set - /// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = - /// {@code {1}} => stop and predict 1
      • - /// - ///
      • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, - /// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U - /// {@code {1}} = {@code {1}} => stop and predict 1, can announce - /// ambiguity {@code {1,2}}
      • - /// - ///
      • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, - /// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U - /// {@code {2}} = {@code {1,2}} => continue
      • - /// - ///
      • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, - /// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U - /// {@code {3}} = {@code {1,3}} => continue
      • - /// - ///
      - /// - ///

      EXACT AMBIGUITY DETECTION

      - /// - ///

      If all states report the same conflicting set of alternatives, then we - /// know we have the exact ambiguity set.

      - /// - ///

      |A_i|>1 and - /// A_i = A_j for all i, j.

      - /// - ///

      In other words, we continue examining lookahead until all {@code A_i} - /// have more than one alternative and all {@code A_i} are the same. If - /// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate - /// because the resolved set is {@code {1}}. To determine what the real + /// conflicting set. + /// + /// __CASES__ + /// + /// * no conflicts and more than 1 alternative in set => continue + /// + /// * `(s, 1, x)`, `(s, 2, x)`, `(s, 3, z)`, + /// `(s', 1, y)`, `(s', 2, y)` yields non-conflicting set + /// `{3`} U conflicting sets `min({1,2`)} U `min({1,2`)} = + /// `{1,3`} => continue + /// + /// * `(s, 1, x)`, `(s, 2, x)`, `(s', 1, y)`, + /// `(s', 2, y)`, `(s'', 1, z)` yields non-conflicting set + /// `{1`} U conflicting sets `min({1,2`)} U `min({1,2`)} = + /// `{1`} => stop and predict 1 + /// + /// * `(s, 1, x)`, `(s, 2, x)`, `(s', 1, y)`, + /// `(s', 2, y)` yields conflicting, reduced sets `{1`} U + /// `{1`} = `{1`} => stop and predict 1, can announce + /// ambiguity `{1,2`} + /// + /// * `(s, 1, x)`, `(s, 2, x)`, `(s', 2, y)`, + /// `(s', 3, y)` yields conflicting, reduced sets `{1`} U + /// `{2`} = `{1,2`} => continue + /// + /// * `(s, 1, x)`, `(s, 2, x)`, `(s', 3, y)`, + /// `(s', 4, y)` yields conflicting, reduced sets `{1`} U + /// `{3`} = `{1,3`} => continue + /// + /// + /// __EXACT AMBIGUITY DETECTION__ + /// + /// If all states report the same conflicting set of alternatives, then we + /// know we have the exact ambiguity set. + /// + /// `|A_i__|>1` and + /// `A_i = A_j` for all i, j. + /// + /// In other words, we continue examining lookahead until all `A_i` + /// have more than one alternative and all `A_i` are the same. If + /// `A={{1,2`, {1,3}}}, then regular LL prediction would terminate + /// because the resolved set is `{1`}. To determine what the real /// ambiguity is, we have to know whether the ambiguity is between one and /// two or one and three so we keep going. We can only stop prediction when /// we need exact ambiguity detection when the sets look like - /// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

      - public static func resolvesToJustOneViableAlt(_ altsets: Array) throws -> Int { - return try getSingleViableAlt(altsets) + /// `A={{1,2`}} or `{{1,2`,{1,2}}}, etc... + /// + public static func resolvesToJustOneViableAlt(_ altsets: [BitSet]) -> Int { + return getSingleViableAlt(altsets) } - /// Determines if every alternative subset in {@code altsets} contains more + /// + /// Determines if every alternative subset in `altsets` contains more /// than one alternative. - /// + /// /// - parameter altsets: a collection of alternative subsets - /// - returns: {@code true} if every {@link java.util.BitSet} in {@code altsets} has - /// {@link java.util.BitSet#cardinality cardinality} > 1, otherwise {@code false} - public static func allSubsetsConflict(_ altsets: Array) -> Bool { + /// - returns: `true` if every _java.util.BitSet_ in `altsets` has + /// _java.util.BitSet#cardinality cardinality_ > 1, otherwise `false` + /// + public static func allSubsetsConflict(_ altsets: [BitSet]) -> Bool { return !hasNonConflictingAltSet(altsets) } - /// Determines if any single alternative subset in {@code altsets} contains + /// + /// Determines if any single alternative subset in `altsets` contains /// exactly one alternative. - /// + /// /// - parameter altsets: a collection of alternative subsets - /// - returns: {@code true} if {@code altsets} contains a {@link java.util.BitSet} with - /// {@link java.util.BitSet#cardinality cardinality} 1, otherwise {@code false} - public static func hasNonConflictingAltSet(_ altsets: Array) -> Bool { + /// - returns: `true` if `altsets` contains a _java.util.BitSet_ with + /// _java.util.BitSet#cardinality cardinality_ 1, otherwise `false` + /// + public static func hasNonConflictingAltSet(_ altsets: [BitSet]) -> Bool { for alts: BitSet in altsets { if alts.cardinality() == 1 { return true @@ -385,13 +396,15 @@ public enum PredictionMode { return false } - /// Determines if any single alternative subset in {@code altsets} contains + /// + /// Determines if any single alternative subset in `altsets` contains /// more than one alternative. - /// + /// /// - parameter altsets: a collection of alternative subsets - /// - returns: {@code true} if {@code altsets} contains a {@link java.util.BitSet} with - /// {@link java.util.BitSet#cardinality cardinality} > 1, otherwise {@code false} - public static func hasConflictingAltSet(_ altsets: Array) -> Bool { + /// - returns: `true` if `altsets` contains a _java.util.BitSet_ with + /// _java.util.BitSet#cardinality cardinality_ > 1, otherwise `false` + /// + public static func hasConflictingAltSet(_ altsets: [BitSet]) -> Bool { for alts: BitSet in altsets { if alts.cardinality() > 1 { return true @@ -400,12 +413,14 @@ public enum PredictionMode { return false } - /// Determines if every alternative subset in {@code altsets} is equivalent. - /// + /// + /// Determines if every alternative subset in `altsets` is equivalent. + /// /// - parameter altsets: a collection of alternative subsets - /// - returns: {@code true} if every member of {@code altsets} is equal to the - /// others, otherwise {@code false} - public static func allSubsetsEqual(_ altsets: Array) -> Bool { + /// - returns: `true` if every member of `altsets` is equal to the + /// others, otherwise `false` + /// + public static func allSubsetsEqual(_ altsets: [BitSet]) -> Bool { let first: BitSet = altsets[0] for it in altsets { @@ -417,25 +432,29 @@ public enum PredictionMode { return true } + /// /// Returns the unique alternative predicted by all alternative subsets in - /// {@code altsets}. If no such alternative exists, this method returns - /// {@link org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER}. - /// + /// `altsets`. If no such alternative exists, this method returns + /// _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_. + /// /// - parameter altsets: a collection of alternative subsets - public static func getUniqueAlt(_ altsets: Array) throws -> Int { + /// + public static func getUniqueAlt(_ altsets: [BitSet]) -> Int { let all: BitSet = getAlts(altsets) if all.cardinality() == 1 { - return try all.nextSetBit(0) + return all.firstSetBit() } return ATN.INVALID_ALT_NUMBER } + /// /// Gets the complete set of represented alternatives for a collection of - /// alternative subsets. This method returns the union of each {@link java.util.BitSet} - /// in {@code altsets}. - /// + /// alternative subsets. This method returns the union of each _java.util.BitSet_ + /// in `altsets`. + /// /// - parameter altsets: a collection of alternative subsets - /// - returns: the set of represented alternatives in {@code altsets} + /// - returns: the set of represented alternatives in `altsets` + /// public static func getAlts(_ altsets: Array) -> BitSet { let all: BitSet = BitSet() for alts: BitSet in altsets { @@ -444,42 +463,43 @@ public enum PredictionMode { return all } - /// Get union of all alts from configs. @since 4.5.1 - public static func getAlts(_ configs: ATNConfigSet) throws -> BitSet { - - return try configs.getAltBitSet() + /// + /// Get union of all alts from configs. - Since: 4.5.1 + /// + public static func getAlts(_ configs: ATNConfigSet) -> BitSet { + return configs.getAltBitSet() } + /// /// This function gets the conflicting alt subsets from a configuration set. - /// For each configuration {@code c} in {@code configs}: - /// - ///
      -    /// map[c] U= c.{@link org.antlr.v4.runtime.atn.ATNConfig#alt alt} # map hash/equals uses s and x, not
      +    /// For each configuration `c` in `configs`:
      +    /// 
      +    /// 
      +    /// map[c] U= c._org.antlr.v4.runtime.atn.ATNConfig#alt alt_ # map hash/equals uses s and x, not
           /// alt and not pred
      -    /// 
      + /// + /// - public static func getConflictingAltSubsets(_ configs: ATNConfigSet) throws -> Array { - - return try configs.getConflictingAltSubsets() + public static func getConflictingAltSubsets(_ configs: ATNConfigSet) -> [BitSet] { + return configs.getConflictingAltSubsets() } + /// /// Get a map from state to alt subset from a configuration set. For each - /// configuration {@code c} in {@code configs}: - /// - ///
      -    /// map[c.{@link org.antlr.v4.runtime.atn.ATNConfig#state state}] U= c.{@link org.antlr.v4.runtime.atn.ATNConfig#alt alt}
      -    /// 
      - public static func getStateToAltMap(_ configs: ATNConfigSet) throws -> HashMap { - - return try configs.getStateToAltMap() + /// configuration `c` in `configs`: + /// + /// + /// map[c._org.antlr.v4.runtime.atn.ATNConfig#state state_] U= c._org.antlr.v4.runtime.atn.ATNConfig#alt alt_ + /// + /// + public static func getStateToAltMap(_ configs: ATNConfigSet) -> HashMap { + return configs.getStateToAltMap() } - public static func hasStateAssociatedWithOneAlt(_ configs: ATNConfigSet) throws -> Bool { - let x: HashMap = try getStateToAltMap(configs) - let values = x.values - for alts: BitSet in values { - + public static func hasStateAssociatedWithOneAlt(_ configs: ATNConfigSet) -> Bool { + let x = getStateToAltMap(configs) + for alts in x.values { if alts.cardinality() == 1 { return true } @@ -487,17 +507,17 @@ public enum PredictionMode { return false } - public static func getSingleViableAlt(_ altsets: Array) throws -> Int { - let viableAlts: BitSet = BitSet() - for alts: BitSet in altsets { - let minAlt: Int = try alts.nextSetBit(0) - try viableAlts.set(minAlt) + public static func getSingleViableAlt(_ altsets: [BitSet]) -> Int { + let viableAlts = BitSet() + for alts in altsets { + let minAlt = alts.firstSetBit() + try! viableAlts.set(minAlt) if viableAlts.cardinality() > 1 { // more than 1 viable alt return ATN.INVALID_ALT_NUMBER } } - return try viableAlts.nextSetBit(0) + return viableAlts.firstSetBit() } } diff --git a/runtime/Swift/Sources/Antlr4/atn/ProfilingATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/ProfilingATNSimulator.swift index f43015547..59e082d94 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ProfilingATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ProfilingATNSimulator.swift @@ -1,9 +1,13 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// - 4.3 +/// import Foundation @@ -17,6 +21,7 @@ public class ProfilingATNSimulator: ParserATNSimulator { internal var currentDecision: Int = 0 internal var currentState: DFAState? + /// /// At the point of LL failover, we record how SLL would resolve the conflict so that /// we can determine whether or not a decision / input pair is context-sensitive. /// If LL gives a different result than SLL's predicted alternative, we have a @@ -27,6 +32,7 @@ public class ProfilingATNSimulator: ParserATNSimulator { /// was not required in order to produce a correct prediction for this decision and input sequence. /// It may in fact still be a context sensitivity but we don't know by looking at the /// minimum alternatives for the current input. + /// internal var conflictingAltResolvedBySLL: Int = 0 public init(_ parser: Parser) { @@ -107,7 +113,7 @@ public class ProfilingATNSimulator: ParserATNSimulator { override internal func computeTargetState(_ dfa: DFA, _ previousD: DFAState, _ t: Int) throws -> DFAState { - let state: DFAState = try super.computeTargetState(dfa, previousD, t) + let state = try super.computeTargetState(dfa, previousD, t) currentState = state return state } @@ -120,7 +126,7 @@ public class ProfilingATNSimulator: ParserATNSimulator { _llStopIndex = _input.index() } - let reachConfigs: ATNConfigSet? = try super.computeReachSet(closure, t, fullCtx) + let reachConfigs = try super.computeReachSet(closure, t, fullCtx) if fullCtx { decisions[currentDecision].LL_ATNTransitions += 1 // count computation even if error if reachConfigs != nil { @@ -146,12 +152,12 @@ public class ProfilingATNSimulator: ParserATNSimulator { override internal func evalSemanticContext(_ pred: SemanticContext, _ parserCallStack: ParserRuleContext, _ alt: Int, _ fullCtx: Bool) throws -> Bool { - let result: Bool = try super.evalSemanticContext(pred, parserCallStack, alt, fullCtx) + let result = try super.evalSemanticContext(pred, parserCallStack, alt, fullCtx) if !(pred is SemanticContext.PrecedencePredicate) { - let fullContext: Bool = _llStopIndex >= 0 - let stopIndex: Int = fullContext ? _llStopIndex : _sllStopIndex + let fullContext = _llStopIndex >= 0 + let stopIndex = fullContext ? _llStopIndex : _sllStopIndex decisions[currentDecision].predicateEvals.append( - PredicateEvalInfo(currentDecision, _input, _startIndex, stopIndex, pred, result, alt, fullCtx) + PredicateEvalInfo(currentDecision, _input, _startIndex, stopIndex, pred, result, alt, fullCtx) ) } @@ -159,34 +165,36 @@ public class ProfilingATNSimulator: ParserATNSimulator { } override - internal func reportAttemptingFullContext(_ dfa: DFA, _ conflictingAlts: BitSet?, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) throws { + internal func reportAttemptingFullContext(_ dfa: DFA, _ conflictingAlts: BitSet?, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) { if let conflictingAlts = conflictingAlts { - conflictingAltResolvedBySLL = try conflictingAlts.nextSetBit(0) + conflictingAltResolvedBySLL = conflictingAlts.firstSetBit() } else { - conflictingAltResolvedBySLL = try configs.getAlts().nextSetBit(0) + let configAlts = configs.getAlts() + conflictingAltResolvedBySLL = configAlts.firstSetBit() } decisions[currentDecision].LL_Fallback += 1 - try super.reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex) + super.reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex) } override - internal func reportContextSensitivity(_ dfa: DFA, _ prediction: Int, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) throws { + internal func reportContextSensitivity(_ dfa: DFA, _ prediction: Int, _ configs: ATNConfigSet, _ startIndex: Int, _ stopIndex: Int) { if prediction != conflictingAltResolvedBySLL { decisions[currentDecision].contextSensitivities.append( ContextSensitivityInfo(currentDecision, configs, _input, startIndex, stopIndex) ) } - try super.reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex) + super.reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex) } override internal func reportAmbiguity(_ dfa: DFA, _ D: DFAState, _ startIndex: Int, _ stopIndex: Int, _ exact: Bool, - _ ambigAlts: BitSet?, _ configs: ATNConfigSet) throws { + _ ambigAlts: BitSet?, _ configs: ATNConfigSet) { var prediction: Int if let ambigAlts = ambigAlts { - prediction = try ambigAlts.nextSetBit(0) + prediction = ambigAlts.firstSetBit() } else { - prediction = try configs.getAlts().nextSetBit(0) + let configAlts = configs.getAlts() + prediction = configAlts.firstSetBit() } if configs.fullCtx && prediction != conflictingAltResolvedBySLL { // Even though this is an ambiguity we are reporting, we can @@ -202,7 +210,7 @@ public class ProfilingATNSimulator: ParserATNSimulator { AmbiguityInfo(currentDecision, configs, ambigAlts!, _input, startIndex, stopIndex, configs.fullCtx) ) - try super.reportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts!, configs) + super.reportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts!, configs) } diff --git a/runtime/Swift/Sources/Antlr4/atn/RangeTransition.swift b/runtime/Swift/Sources/Antlr4/atn/RangeTransition.swift index 7949a18d0..ddb2c4e75 100644 --- a/runtime/Swift/Sources/Antlr4/atn/RangeTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/RangeTransition.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class RangeTransition: Transition, CustomStringConvertible { @@ -20,9 +22,8 @@ public final class RangeTransition: Transition, CustomStringConvertible { } override - //old label() - public func labelIntervalSet() throws -> IntervalSet { - return try IntervalSet.of(from, to) + public func labelIntervalSet() -> IntervalSet? { + return IntervalSet.of(from, to) } override diff --git a/runtime/Swift/Sources/Antlr4/atn/RuleStartState.swift b/runtime/Swift/Sources/Antlr4/atn/RuleStartState.swift index dd13745eb..aeddb7753 100644 --- a/runtime/Swift/Sources/Antlr4/atn/RuleStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/RuleStartState.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class RuleStartState: ATNState { diff --git a/runtime/Swift/Sources/Antlr4/atn/RuleStopState.swift b/runtime/Swift/Sources/Antlr4/atn/RuleStopState.swift index 6a45bf352..be281afb5 100644 --- a/runtime/Swift/Sources/Antlr4/atn/RuleStopState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/RuleStopState.swift @@ -1,13 +1,17 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// The last node in the ATN for a rule, unless that rule is the start symbol. /// In that case, there is one transition to EOF. Later, we might encode /// references to all calls to this rule to compute FOLLOW sets for /// error handling. +/// public final class RuleStopState: ATNState { diff --git a/runtime/Swift/Sources/Antlr4/atn/RuleTransition.swift b/runtime/Swift/Sources/Antlr4/atn/RuleTransition.swift index 07578d1a2..7e6a82444 100644 --- a/runtime/Swift/Sources/Antlr4/atn/RuleTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/RuleTransition.swift @@ -1,23 +1,31 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class RuleTransition: Transition { + /// /// Ptr to the rule definition object for this rule ref + /// public final var ruleIndex: Int // no Rule object at runtime public final var precedence: Int + /// /// What node to begin computations following ref to rule + /// public final var followState: ATNState + /// /// - Use - /// {@link #RuleTransition(org.antlr.v4.runtime.atn.RuleStartState, int, int, org.antlr.v4.runtime.atn.ATNState)} instead. + /// _#RuleTransition(org.antlr.v4.runtime.atn.RuleStartState, int, int, org.antlr.v4.runtime.atn.ATNState)_ instead. + /// //@Deprecated public convenience init(_ ruleStart: RuleStartState, _ ruleIndex: Int, diff --git a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift index bed87c128..261a3734e 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift @@ -1,65 +1,72 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A tree structure used to record the semantic context in which /// an ATN configuration is valid. It's either a single predicate, -/// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -/// -///

      I have scoped the {@link org.antlr.v4.runtime.atn.SemanticContext.AND}, {@link org.antlr.v4.runtime.atn.SemanticContext.OR}, and {@link org.antlr.v4.runtime.atn.SemanticContext.Predicate} subclasses of -/// {@link org.antlr.v4.runtime.atn.SemanticContext} within the scope of this outer class.

      +/// a conjunction `p1&&p2`, or a sum of products `p1||p2`. +/// +/// I have scoped the _org.antlr.v4.runtime.atn.SemanticContext.AND_, _org.antlr.v4.runtime.atn.SemanticContext.OR_, and _org.antlr.v4.runtime.atn.SemanticContext.Predicate_ subclasses of +/// _org.antlr.v4.runtime.atn.SemanticContext_ within the scope of this outer class. +/// import Foundation public class SemanticContext: Hashable, CustomStringConvertible { - /// The default {@link org.antlr.v4.runtime.atn.SemanticContext}, which is semantically equivalent to - /// a predicate of the form {@code {true}?}. + /// + /// The default _org.antlr.v4.runtime.atn.SemanticContext_, which is semantically equivalent to + /// a predicate of the form `{true`?}. + /// public static let NONE: SemanticContext = Predicate() + /// /// For context independent predicates, we evaluate them without a local /// context (i.e., null context). That way, we can evaluate them without /// having to create proper rule-specific context during prediction (as /// opposed to the parser, which creates them naturally). In a practical /// sense, this avoids a cast exception from RuleContext to myruleContext. - /// - ///

      For context dependent predicates, we must pass in a local context so that + /// + /// For context dependent predicates, we must pass in a local context so that /// references such as $arg evaluate properly as _localctx.arg. We only /// capture context dependent predicates in the context in which we begin /// prediction, so we passed in the outer context here in case of context - /// dependent predicate evaluation.

      - public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { - RuntimeException(#function + " must be overridden") - return false + /// dependent predicate evaluation. + /// + public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { + fatalError(#function + " must be overridden") } + /// /// Evaluate the precedence predicates for the context and reduce the result. - /// + /// /// - parameter parser: The parser instance. /// - parameter parserCallStack: /// - returns: The simplified semantic context after precedence predicates are /// evaluated, which will be one of the following values. - ///
        - ///
      • {@link #NONE}: if the predicate simplifies to {@code true} after - /// precedence predicates are evaluated.
      • - ///
      • {@code null}: if the predicate simplifies to {@code false} after - /// precedence predicates are evaluated.
      • - ///
      • {@code this}: if the semantic context is not changed as a result of - /// precedence predicate evaluation.
      • - ///
      • A non-{@code null} {@link org.antlr.v4.runtime.atn.SemanticContext}: the new simplified - /// semantic context after precedence predicates are evaluated.
      • - ///
      - public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { + /// * _#NONE_: if the predicate simplifies to `true` after + /// precedence predicates are evaluated. + /// * `null`: if the predicate simplifies to `false` after + /// precedence predicates are evaluated. + /// * `this`: if the semantic context is not changed as a result of + /// precedence predicate evaluation. + /// * A non-`null` _org.antlr.v4.runtime.atn.SemanticContext_: the new simplified + /// semantic context after precedence predicates are evaluated. + /// + public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { return self } + public var hashValue: Int { - RuntimeException(#function + " must be overridden") - return 0 + fatalError(#function + " must be overridden") } + public var description: String { - RuntimeException(#function + " must be overridden") - return "" + fatalError(#function + " must be overridden") } public class Predicate: SemanticContext { @@ -82,19 +89,18 @@ public class SemanticContext: Hashable, CustomStringConvertible { } override - public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { - let localctx: RuleContext? = isCtxDependent ? parserCallStack : nil + public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { + let localctx = isCtxDependent ? parserCallStack : nil return try parser.sempred(localctx, ruleIndex, predIndex) } override public var hashValue: Int { - var hashCode: Int = MurmurHash.initialize() + var hashCode = MurmurHash.initialize() hashCode = MurmurHash.update(hashCode, ruleIndex) hashCode = MurmurHash.update(hashCode, predIndex) hashCode = MurmurHash.update(hashCode, isCtxDependent ? 1 : 0) - hashCode = MurmurHash.finish(hashCode, 3) - return hashCode + return MurmurHash.finish(hashCode, 3) } @@ -118,13 +124,13 @@ public class SemanticContext: Hashable, CustomStringConvertible { } override - public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { - return try parser.precpred(parserCallStack, precedence) + public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { + return parser.precpred(parserCallStack, precedence) } override - public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { - if try parser.precpred(parserCallStack, precedence) { + public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { + if parser.precpred(parserCallStack, precedence) { return SemanticContext.NONE } else { return nil @@ -146,62 +152,64 @@ public class SemanticContext: Hashable, CustomStringConvertible { } } + /// /// This is the base class for semantic context "operators", which operate on /// a collection of semantic context "operands". - /// + /// /// - 4.3 + /// public class Operator: SemanticContext { + /// /// Gets the operands for the semantic context operator. - /// - /// - returns: a collection of {@link org.antlr.v4.runtime.atn.SemanticContext} operands for the + /// + /// - returns: a collection of _org.antlr.v4.runtime.atn.SemanticContext_ operands for the /// operator. - /// + /// /// - 4.3 + /// public func getOperands() -> Array { - RuntimeException(" must overriden ") - return Array() + fatalError(#function + " must be overridden") } } + /// /// A semantic context which is true whenever none of the contained contexts /// is false. + /// public class AND: Operator { public let opnds: [SemanticContext] public init(_ a: SemanticContext, _ b: SemanticContext) { - var operands: Set = Set() - if a is AND { - operands.formUnion((a as! AND).opnds) - //operands.addAll(Arrays.asList((a as AND).opnds)); + var operands = Set() + if let aAnd = a as? AND { + operands.formUnion(aAnd.opnds) } else { operands.insert(a) } - if b is AND { - operands.formUnion((b as! AND).opnds) - //operands.addAll(Arrays.asList((b as AND).opnds)); + if let bAnd = b as? AND { + operands.formUnion(bAnd.opnds) } else { operands.insert(b) } - let precedencePredicates: Array = - SemanticContext.filterPrecedencePredicates(&operands) + let precedencePredicates = SemanticContext.filterPrecedencePredicates(&operands) if !precedencePredicates.isEmpty { // interested in the transition with the lowest precedence - let reduced: PrecedencePredicate = precedencePredicates.sorted { + let reduced = precedencePredicates.sorted { $0.precedence < $1.precedence - }.first! //Collections.min(precedencePredicates); - operands.insert(reduced) + } + operands.insert(reduced[0]) } - opnds = Array(operands) //.toArray(new, SemanticContext[operands.size()]); + opnds = Array(operands) } override - public func getOperands() -> Array { + public func getOperands() -> [SemanticContext] { return opnds } @@ -214,14 +222,16 @@ public class SemanticContext: Hashable, CustomStringConvertible { return MurmurHash.hashCode(opnds, seed) } - /// {@inheritDoc} - /// - ///

      + /// + /// + /// + /// /// The evaluation of predicates by this context is short-circuiting, but - /// unordered.

      + /// unordered. + /// override - public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { - for opnd: SemanticContext in opnds { + public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { + for opnd in opnds { if try !opnd.eval(parser, parserCallStack) { return false } @@ -230,11 +240,11 @@ public class SemanticContext: Hashable, CustomStringConvertible { } override - public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { - var differs: Bool = false - var operands: Array = Array() - for context: SemanticContext in opnds { - let evaluated: SemanticContext? = try context.evalPrecedence(parser, parserCallStack) + public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { + var differs = false + var operands = [SemanticContext]() + for context in opnds { + let evaluated = try context.evalPrecedence(parser, parserCallStack) //TODO differs |= (evaluated != context) //differs |= (evaluated != context); differs = differs || (evaluated != context) @@ -242,11 +252,10 @@ public class SemanticContext: Hashable, CustomStringConvertible { if evaluated == nil { // The AND context is false if any element is false return nil - } else { - if evaluated != SemanticContext.NONE { - // Reduce the result by skipping true elements - operands.append(evaluated!) - } + } + else if evaluated != SemanticContext.NONE { + // Reduce the result by skipping true elements + operands.append(evaluated!) } } @@ -259,7 +268,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { return SemanticContext.NONE } - var result: SemanticContext = operands[0] + var result = operands[0] let length = operands.count for i in 1.. = Set() - if a is OR { - operands.formUnion((a as! OR).opnds) - // operands.addAll(Arrays.asList((a as OR).opnds)); + if let aOr = a as? OR { + operands.formUnion(aOr.opnds) } else { operands.insert(a) } - if b is OR { - operands.formUnion((b as! OR).opnds) - //operands.addAll(Arrays.asList((b as OR).opnds)); + if let bOr = b as? OR { + operands.formUnion(bOr.opnds) } else { operands.insert(b) } - let precedencePredicates: Array = SemanticContext.filterPrecedencePredicates(&operands) + let precedencePredicates = SemanticContext.filterPrecedencePredicates(&operands) if !precedencePredicates.isEmpty { // interested in the transition with the highest precedence - let reduced: PrecedencePredicate = precedencePredicates.sorted { + + let reduced = precedencePredicates.sorted { $0.precedence > $1.precedence - }.first! - //var reduced : PrecedencePredicate = Collections.max(precedencePredicates); - operands.insert(reduced) + } + operands.insert(reduced[0]) } - self.opnds = Array(operands) //operands.toArray(new, SemanticContext[operands.size()]); + self.opnds = Array(operands) } override - public func getOperands() -> Array { - return opnds //Arrays.asList(opnds); + public func getOperands() -> [SemanticContext] { + return opnds } @@ -325,14 +334,16 @@ public class SemanticContext: Hashable, CustomStringConvertible { return MurmurHash.hashCode(opnds, NSStringFromClass(OR.self).hashValue) } - /// {@inheritDoc} - /// - ///

      + /// + /// + /// + /// /// The evaluation of predicates by this context is short-circuiting, but - /// unordered.

      + /// unordered. + /// override - public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { - for opnd: SemanticContext in opnds { + public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { + for opnd in opnds { if try opnd.eval(parser, parserCallStack) { return true } @@ -341,22 +352,19 @@ public class SemanticContext: Hashable, CustomStringConvertible { } override - public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { - var differs: Bool = false - var operands: Array = Array() - for context: SemanticContext in opnds { - let evaluated: SemanticContext? = try context.evalPrecedence(parser, parserCallStack) - //differs |= (evaluated != context); + public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { + var differs = false + var operands = [SemanticContext]() + for context in opnds { + let evaluated = try context.evalPrecedence(parser, parserCallStack) differs = differs || (evaluated != context) if evaluated == SemanticContext.NONE { // The OR context is true if any element is true return SemanticContext.NONE - } else { - if evaluated != nil { - // Reduce the result by skipping false elements - operands.append(evaluated!) - //operands.add(evaluated); - } + } + else if let evaluated = evaluated { + // Reduce the result by skipping false elements + operands.append(evaluated) } } @@ -369,7 +377,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { return nil } - var result: SemanticContext = operands[0] + var result = operands[0] let length = operands.count for i in 1.. SemanticContext { if a == nil { return b! @@ -425,21 +435,14 @@ public class SemanticContext: Hashable, CustomStringConvertible { return result } - private static func filterPrecedencePredicates( - _ collection: inout Set) -> - Array { - - let result = collection.filter { - $0 is PrecedencePredicate + private static func filterPrecedencePredicates(_ collection: inout Set) -> [PrecedencePredicate] { + let result = collection.flatMap { + $0 as? PrecedencePredicate } collection = Set(collection.filter { !($0 is PrecedencePredicate) }) - //if (result == nil) { - //return Array(); - //} - - return (result as! Array) + return result } } diff --git a/runtime/Swift/Sources/Antlr4/atn/SetTransition.swift b/runtime/Swift/Sources/Antlr4/atn/SetTransition.swift index 569b8f305..a32bb8b0b 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SetTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SetTransition.swift @@ -1,10 +1,14 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A transition containing a set of values. +/// public class SetTransition: Transition, CustomStringConvertible { public final var set: IntervalSet @@ -22,8 +26,7 @@ public class SetTransition: Transition, CustomStringConvertible { } override - ////old label() - public func labelIntervalSet() -> IntervalSet { + public func labelIntervalSet() -> IntervalSet? { return set } diff --git a/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift index a0bc1de88..01a49fc7c 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// diff --git a/runtime/Swift/Sources/Antlr4/atn/StarBlockStartState.swift b/runtime/Swift/Sources/Antlr4/atn/StarBlockStartState.swift index c9a5fc6b6..386578105 100644 --- a/runtime/Swift/Sources/Antlr4/atn/StarBlockStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/StarBlockStartState.swift @@ -1,10 +1,14 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// The block that begins a closure loop. +/// public final class StarBlockStartState: BlockStartState { diff --git a/runtime/Swift/Sources/Antlr4/atn/StarLoopEntryState.swift b/runtime/Swift/Sources/Antlr4/atn/StarLoopEntryState.swift index acb8b38a2..6e316f881 100644 --- a/runtime/Swift/Sources/Antlr4/atn/StarLoopEntryState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/StarLoopEntryState.swift @@ -1,19 +1,23 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class StarLoopEntryState: DecisionState { public var loopBackState: StarLoopbackState? + /// /// Indicates whether this state can benefit from a precedence DFA during SLL /// decision making. - /// - ///

      This is a computed property that is calculated during ATN deserialization - /// and stored for use in {@link org.antlr.v4.runtime.atn.ParserATNSimulator} and - /// {@link org.antlr.v4.runtime.ParserInterpreter}.

      - /// + /// + /// This is a computed property that is calculated during ATN deserialization + /// and stored for use in _org.antlr.v4.runtime.atn.ParserATNSimulator_ and + /// _org.antlr.v4.runtime.ParserInterpreter_. + /// /// - seealso: org.antlr.v4.runtime.dfa.DFA#isPrecedenceDfa() + /// public var precedenceRuleDecision: Bool = false override diff --git a/runtime/Swift/Sources/Antlr4/atn/StarLoopbackState.swift b/runtime/Swift/Sources/Antlr4/atn/StarLoopbackState.swift index 78671b667..dbed894fd 100644 --- a/runtime/Swift/Sources/Antlr4/atn/StarLoopbackState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/StarLoopbackState.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public final class StarLoopbackState: ATNState { diff --git a/runtime/Swift/Sources/Antlr4/atn/TokensStartState.swift b/runtime/Swift/Sources/Antlr4/atn/TokensStartState.swift index 4c52c9b0f..6d4dcca05 100644 --- a/runtime/Swift/Sources/Antlr4/atn/TokensStartState.swift +++ b/runtime/Swift/Sources/Antlr4/atn/TokensStartState.swift @@ -1,9 +1,13 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// The Tokens rule start state linking to each lexer rule start state +/// public final class TokensStartState: DecisionState { diff --git a/runtime/Swift/Sources/Antlr4/atn/Transition.swift b/runtime/Swift/Sources/Antlr4/atn/Transition.swift index a320149ae..75cc5cefa 100644 --- a/runtime/Swift/Sources/Antlr4/atn/Transition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/Transition.swift @@ -1,19 +1,23 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// An ATN transition between any two ATN states. Subclasses define /// atom, set, epsilon, action, predicate, rule transitions. -/// -///

      This is a one way link. It emanates from a state (usually via a list of -/// transitions) and has a target state.

      -/// -///

      Since we never have to change the ATN transitions once we construct it, +/// +/// This is a one way link. It emanates from a state (usually via a list of +/// transitions) and has a target state. +/// +/// Since we never have to change the ATN transitions once we construct it, /// we can fix these transitions as specific classes. The DFA transitions /// on the other hand need to update the labels as it adds transitions to /// the states. We'll use the term Edge for the DFA to distinguish them from -/// ATN transitions.

      +/// ATN transitions. +/// import Foundation @@ -65,7 +69,9 @@ public class Transition { ] + /// /// The target of this transition. + /// public final var target: ATNState @@ -76,28 +82,28 @@ public class Transition { } public func getSerializationType() -> Int { - RuntimeException(#function + " must be overridden") - fatalError() + fatalError(#function + " must be overridden") } + /// /// Determines if the transition is an "epsilon" transition. - /// - ///

      The default implementation returns {@code false}.

      - /// - /// - returns: {@code true} if traversing this transition in the ATN does not - /// consume an input symbol; otherwise, {@code false} if traversing this + /// + /// The default implementation returns `false`. + /// + /// - returns: `true` if traversing this transition in the ATN does not + /// consume an input symbol; otherwise, `false` if traversing this /// transition consumes (matches) an input symbol. + /// public func isEpsilon() -> Bool { return false } - public func labelIntervalSet() throws -> IntervalSet? { + public func labelIntervalSet() -> IntervalSet? { return nil } public func matches(_ symbol: Int, _ minVocabSymbol: Int, _ maxVocabSymbol: Int) -> Bool { - RuntimeException(#function + " must be overridden") - fatalError() + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/atn/WildcardTransition.swift b/runtime/Swift/Sources/Antlr4/atn/WildcardTransition.swift index 31becc2ea..4e52e0458 100644 --- a/runtime/Swift/Sources/Antlr4/atn/WildcardTransition.swift +++ b/runtime/Swift/Sources/Antlr4/atn/WildcardTransition.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// final public class WildcardTransition: Transition, CustomStringConvertible { diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFA.swift b/runtime/Swift/Sources/Antlr4/dfa/DFA.swift index a966fe25b..3f8096ca1 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFA.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFA.swift @@ -1,11 +1,15 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class DFA: CustomStringConvertible { - /// A set of all DFA states. Use {@link java.util.Map} so we can get old state back - /// ({@link java.util.Set} only allows you to see if it's there). + /// + /// A set of all DFA states. Use _java.util.Map_ so we can get old state back + /// (_java.util.Set_ only allows you to see if it's there). + /// public final var states: HashMap = HashMap() @@ -13,15 +17,21 @@ public class DFA: CustomStringConvertible { public final var decision: Int + /// /// From which ATN state did we create this DFA? + /// public let atnStartState: DecisionState - /// {@code true} if this DFA is for a precedence decision; otherwise, - /// {@code false}. This is the backing field for {@link #isPrecedenceDfa}. + /// + /// `true` if this DFA is for a precedence decision; otherwise, + /// `false`. This is the backing field for _#isPrecedenceDfa_. + /// private final var precedenceDfa: Bool + /// /// mutex for DFAState changes. + /// private var dfaStateMutex = Mutex() public convenience init(_ atnStartState: DecisionState) { @@ -47,28 +57,31 @@ public class DFA: CustomStringConvertible { self.precedenceDfa = precedenceDfa } + /// /// Gets whether this DFA is a precedence DFA. Precedence DFAs use a special - /// start state {@link #s0} which is not stored in {@link #states}. The - /// {@link org.antlr.v4.runtime.dfa.DFAState#edges} array for this start state contains outgoing edges + /// start state _#s0_ which is not stored in _#states_. The + /// _org.antlr.v4.runtime.dfa.DFAState#edges_ array for this start state contains outgoing edges /// supplying individual start states corresponding to specific precedence /// values. - /// - /// - returns: {@code true} if this is a precedence DFA; otherwise, - /// {@code false}. + /// + /// - returns: `true` if this is a precedence DFA; otherwise, + /// `false`. /// - seealso: org.antlr.v4.runtime.Parser#getPrecedence() + /// public final func isPrecedenceDfa() -> Bool { return precedenceDfa } + /// /// Get the start state for a specific precedence value. - /// + /// /// - parameter precedence: The current precedence. /// - returns: The start state corresponding to the specified precedence, or - /// {@code null} if no start state exists for the specified precedence. - /// - /// - IllegalStateException if this is not a precedence DFA. + /// `null` if no start state exists for the specified precedence. + /// + /// - throws: _ANTLRError.illegalState_ if this is not a precedence DFA. /// - seealso: #isPrecedenceDfa() - ////@SuppressWarnings("null") + /// public final func getPrecedenceStartState(_ precedence: Int) throws -> DFAState? { if !isPrecedenceDfa() { throw ANTLRError.illegalState(msg: "Only precedence DFAs may contain a precedence start state.") @@ -85,15 +98,16 @@ public class DFA: CustomStringConvertible { return s0!.edges![precedence] } + /// /// Set the start state for a specific precedence value. - /// + /// /// - parameter precedence: The current precedence. /// - parameter startState: The start state corresponding to the specified /// precedence. - /// - /// - IllegalStateException if this is not a precedence DFA. + /// + /// - throws: _ANTLRError.illegalState_ if this is not a precedence DFA. /// - seealso: #isPrecedenceDfa() - ////@SuppressWarnings({"SynchronizeOnNonFinalField", "null"}) + /// public final func setPrecedenceStartState(_ precedence: Int, _ startState: DFAState) throws { if !isPrecedenceDfa() { throw ANTLRError.illegalState(msg: "Only precedence DFAs may contain a precedence start state.") @@ -116,16 +130,17 @@ public class DFA: CustomStringConvertible { } } + /// /// Sets whether this is a precedence DFA. - /// - /// - parameter precedenceDfa: {@code true} if this is a precedence DFA; otherwise, - /// {@code false} - /// - /// - UnsupportedOperationException if {@code precedenceDfa} does not - /// match the value of {@link #isPrecedenceDfa} for the current DFA. - /// - /// - This method no longer performs any action. - ////@Deprecated + /// + /// - parameter precedenceDfa: `true` if this is a precedence DFA; otherwise, + /// `false` + /// + /// - throws: ANTLRError.unsupportedOperation if `precedenceDfa` does not + /// match the value of _#isPrecedenceDfa_ for the current DFA. + /// + /// - note: This method no longer performs any action. + /// public final func setPrecedenceDfa(_ precedenceDfa: Bool) throws { if precedenceDfa != isPrecedenceDfa() { throw ANTLRError.unsupportedOperation(msg: "The precedenceDfa field cannot change after a DFA is constructed.") @@ -133,8 +148,9 @@ public class DFA: CustomStringConvertible { } } + /// /// Return a list of all states in this DFA, ordered by state number. - + /// public func getStates() -> Array { var result: Array = Array(states.keys) @@ -154,22 +170,12 @@ public class DFA: CustomStringConvertible { return description } - /// - Use {@link #toString(org.antlr.v4.runtime.Vocabulary)} instead. - ////@Deprecated - public func toString(_ tokenNames: [String?]?) -> String { - if s0 == nil { - return "" - } - let serializer: DFASerializer = DFASerializer(self, tokenNames) - return serializer.toString() - } - public func toString(_ vocabulary: Vocabulary) -> String { if s0 == nil { return "" } - let serializer: DFASerializer = DFASerializer(self, vocabulary) + let serializer = DFASerializer(self, vocabulary) return serializer.toString() } @@ -177,7 +183,7 @@ public class DFA: CustomStringConvertible { if s0 == nil { return "" } - let serializer: DFASerializer = LexerDFASerializer(self) + let serializer = LexerDFASerializer(self) return serializer.toString() } diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFASerializer.swift b/runtime/Swift/Sources/Antlr4/dfa/DFASerializer.swift index 950de5f0c..3e19850de 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFASerializer.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFASerializer.swift @@ -1,22 +1,18 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A DFA walker that knows how to dump them to serialized strings. +/// public class DFASerializer: CustomStringConvertible { - private let dfa: DFA - private let vocabulary: Vocabulary - /// - Use {@link #DFASerializer(org.antlr.v4.runtime.dfa.DFA, org.antlr.v4.runtime.Vocabulary)} instead. - //@Deprecated - public convenience init(_ dfa: DFA, _ tokenNames: [String?]?) { - self.init(dfa, Vocabulary.fromTokenNames(tokenNames)) - } - public init(_ dfa: DFA, _ vocabulary: Vocabulary) { self.dfa = dfa self.vocabulary = vocabulary @@ -26,18 +22,17 @@ public class DFASerializer: CustomStringConvertible { if dfa.s0 == nil { return "" } - let buf: StringBuilder = StringBuilder() - let states: Array = dfa.getStates() - for s: DFAState in states { - var n: Int = 0 - if let sEdges = s.edges { - n = sEdges.count + let buf = StringBuilder() + let states = dfa.getStates() + for s in states { + guard let edges = s.edges else { + continue } + let n = edges.count for i in 0..") @@ -47,7 +42,7 @@ public class DFASerializer: CustomStringConvertible { } } - let output: String = buf.toString() + let output = buf.toString() if output.length == 0 { return "" } @@ -66,16 +61,16 @@ public class DFASerializer: CustomStringConvertible { internal func getStateString(_ s: DFAState) -> String { - let n: Int = s.stateNumber + let n = s.stateNumber let s1 = s.isAcceptState ? ":" : "" let s2 = s.requiresFullContext ? "^" : "" - let baseStateStr: String = s1 + "s" + String(n) + s2 + let baseStateStr = s1 + "s" + String(n) + s2 if s.isAcceptState { if let predicates = s.predicates { return baseStateStr + "=>\(predicates)" } else { - return baseStateStr + "=>\(s.prediction!)" + return baseStateStr + "=>\(s.prediction)" } } else { return baseStateStr diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift index ad5413f56..f0a09c4d4 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift @@ -1,8 +1,11 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A DFA state represents a set of possible ATN configurations. /// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state /// to keep track of all possible states the ATN can be in after @@ -18,72 +21,78 @@ /// jump from rule to rule, emulating rule invocations (method calls). /// I have to add a stack to simulate the proper lookahead sequences for /// the underlying LL grammar from which the ATN was derived. -/// -///

      I use a set of ATNConfig objects not simple states. An ATNConfig +/// +/// I use a set of ATNConfig objects not simple states. An ATNConfig /// is both a state (ala normal conversion) and a RuleContext describing -/// the chain of rules (if any) followed to arrive at that state.

      -/// -///

      A DFA state may have multiple references to a particular state, +/// the chain of rules (if any) followed to arrive at that state. +/// +/// A DFA state may have multiple references to a particular state, /// but with different ATN contexts (with same or different alts) -/// meaning that state was reached via a different set of rule invocations.

      +/// meaning that state was reached via a different set of rule invocations. +/// public class DFAState: Hashable, CustomStringConvertible { - public var stateNumber: Int = -1 + public var stateNumber = -1 + public var configs = ATNConfigSet() - public var configs: ATNConfigSet = ATNConfigSet() - - /// {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) - /// {@link org.antlr.v4.runtime.Token#EOF} maps to {@code edges[0]}. - + /// + /// `edges[symbol]` points to target of symbol. Shift up by 1 so (-1) + /// _org.antlr.v4.runtime.Token#EOF_ maps to `edges[0]`. + /// public var edges: [DFAState?]! - public var isAcceptState: Bool = false + public var isAcceptState = false + /// /// if accept state, what ttype do we match or alt do we predict? - /// This is set to {@link org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or - /// {@link #requiresFullContext}. - public var prediction: Int! = 0 + /// This is set to _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_ when _#predicates_`!=null` or + /// _#requiresFullContext_. + /// + public var prediction = 0 public var lexerActionExecutor: LexerActionExecutor! + /// /// Indicates that this state was created during SLL prediction that /// discovered a conflict between the configurations in the state. Future - /// {@link org.antlr.v4.runtime.atn.ParserATNSimulator#execATN} invocations immediately jumped doing + /// _org.antlr.v4.runtime.atn.ParserATNSimulator#execATN_ invocations immediately jumped doing /// full context prediction if this field is true. - public var requiresFullContext: Bool = false + /// + public var requiresFullContext = false + /// /// During SLL parsing, this is a list of predicates associated with the /// ATN configurations of the DFA state. When we have predicates, - /// {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates - /// on-the-fly. If this is not null, then {@link #prediction} is - /// {@link org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER}. - /// - ///

      We only use these for non-{@link #requiresFullContext} but conflicting states. That + /// _#requiresFullContext_ is `false` since full context prediction evaluates predicates + /// on-the-fly. If this is not null, then _#prediction_ is + /// _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_. + /// + /// We only use these for non-_#requiresFullContext_ but conflicting states. That /// means we know from the context (it's $ or we don't dip into outer - /// context) that it's an ambiguity not a conflict.

      - /// - ///

      This list is computed by {@link org.antlr.v4.runtime.atn.ParserATNSimulator#predicateDFAState}.

      + /// context) that it's an ambiguity not a conflict. + /// + /// This list is computed by _org.antlr.v4.runtime.atn.ParserATNSimulator#predicateDFAState_. + /// public var predicates: [PredPrediction]? + /// /// Map a predicate to a predicted alternative. + /// public class PredPrediction: CustomStringConvertible { - public final var pred: SemanticContext // never null; at least SemanticContext.NONE public final var alt: Int + public init(_ pred: SemanticContext, _ alt: Int) { self.alt = alt self.pred = pred } - public var description: String { - return "(\(pred),\(alt))" - } } @@ -98,37 +107,36 @@ public class DFAState: Hashable, CustomStringConvertible { self.configs = configs } + /// /// Get the set of all alts mentioned by all ATN configurations in this /// DFA state. + /// public func getAltSet() -> Set? { - - let alts = configs.getAltSet() - - return alts + return configs.getAltSet() } public var hashValue: Int { - var hash: Int = MurmurHash.initialize(7) + var hash = MurmurHash.initialize(7) hash = MurmurHash.update(hash, configs.hashValue) - hash = MurmurHash.finish(hash, 1) - return hash + return MurmurHash.finish(hash, 1) } - /// Two {@link org.antlr.v4.runtime.dfa.DFAState} instances are equal if their ATN configuration sets + /// + /// Two _org.antlr.v4.runtime.dfa.DFAState_ instances are equal if their ATN configuration sets /// are the same. This method is used to see if a state already exists. - /// - ///

      Because the number of alternatives and number of ATN configurations are + /// + /// Because the number of alternatives and number of ATN configurations are /// finite, there is a finite number of DFA states that can be processed. - /// This is necessary to show that the algorithm terminates.

      - /// - ///

      Cannot test the DFA state numbers here because in - /// {@link org.antlr.v4.runtime.atn.ParserATNSimulator#addDFAState} we need to know if any other state + /// This is necessary to show that the algorithm terminates. + /// + /// Cannot test the DFA state numbers here because in + /// _org.antlr.v4.runtime.atn.ParserATNSimulator#addDFAState_ we need to know if any other state /// exists that has this exact set of ATN configurations. The - /// {@link #stateNumber} is irrelevant.

      - + /// _#stateNumber_ is irrelevant. + /// public var description: String { - let buf: StringBuilder = StringBuilder() + let buf = StringBuilder() buf.append(stateNumber).append(":").append(configs) if isAcceptState { buf.append("=>") @@ -140,15 +148,11 @@ public class DFAState: Hashable, CustomStringConvertible { } return buf.toString() } - - } public func ==(lhs: DFAState, rhs: DFAState) -> Bool { - if lhs === rhs { return true } - let sameSet: Bool = lhs.configs == rhs.configs - return sameSet + return (lhs.configs == rhs.configs) } diff --git a/runtime/Swift/Sources/Antlr4/dfa/LexerDFASerializer.swift b/runtime/Swift/Sources/Antlr4/dfa/LexerDFASerializer.swift index e68205bef..82a81810f 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/LexerDFASerializer.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/LexerDFASerializer.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// public class LexerDFASerializer: DFASerializer { diff --git a/runtime/Swift/Sources/Antlr4/misc/ArrayList.swift b/runtime/Swift/Sources/Antlr4/misc/ArrayList.swift index 40239f3e9..cb23f0bc0 100644 --- a/runtime/Swift/Sources/Antlr4/misc/ArrayList.swift +++ b/runtime/Swift/Sources/Antlr4/misc/ArrayList.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // ArrayList.swift diff --git a/runtime/Swift/Sources/Antlr4/misc/ArrayWrapper.swift b/runtime/Swift/Sources/Antlr4/misc/ArrayWrapper.swift index b4f19c05b..5d5a68b88 100644 --- a/runtime/Swift/Sources/Antlr4/misc/ArrayWrapper.swift +++ b/runtime/Swift/Sources/Antlr4/misc/ArrayWrapper.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // ArrayWrapper.swift @@ -67,7 +69,7 @@ public final class ArrayWrapper: ExpressibleByArrayLiteral, Hashabl } -public func == (lhs: ArrayWrapper, rhs: ArrayWrapper) -> Bool { +public func == (lhs: ArrayWrapper, rhs: ArrayWrapper) -> Bool { if lhs === rhs { return true } diff --git a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift index fcc41e96f..4f9c5f899 100644 --- a/runtime/Swift/Sources/Antlr4/misc/BitSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/BitSet.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // BitSet.swift @@ -12,77 +14,93 @@ import Foundation +/// /// This class implements a vector of bits that grows as needed. Each -/// component of the bit set has a {@code boolean} value. The -/// bits of a {@code BitSet} are indexed by nonnegative integers. +/// component of the bit set has a `boolean` value. The +/// bits of a `BitSet` are indexed by nonnegative integers. /// Individual indexed bits can be examined, set, or cleared. One -/// {@code BitSet} may be used to modify the contents of another -/// {@code BitSet} through logical AND, logical inclusive OR, and +/// `BitSet` may be used to modify the contents of another +/// `BitSet` through logical AND, logical inclusive OR, and /// logical exclusive OR operations. -/// -///

      By default, all bits in the set initially have the value -/// {@code false}. -/// -///

      Every bit set has a current size, which is the number of bits +/// +/// By default, all bits in the set initially have the value +/// `false`. +/// +/// Every bit set has a current size, which is the number of bits /// of space currently in use by the bit set. Note that the size is /// related to the implementation of a bit set, so it may change with /// implementation. The length of a bit set relates to logical length /// of a bit set and is defined independently of implementation. -/// -///

      Unless otherwise noted, passing a null parameter to any of the -/// methods in a {@code BitSet} will result in a -/// {@code NullPointerException}. -/// -///

      A {@code BitSet} is not safe for multithreaded use without +/// +/// A `BitSet` is not safe for multithreaded use without /// external synchronization. -/// -/// - Arthur van Hoff -/// - Michael McCloskey -/// - Martin Buchholz -/// - JDK1.0 +/// +/// - note: Arthur van Hoff +/// - note: Michael McCloskey +/// - note: Martin Buchholz +/// - note: JDK1.0 +/// public class BitSet: Hashable, CustomStringConvertible { + /// /// BitSets are packed into arrays of "words." Currently a word is /// a long, which consists of 64 bits, requiring 6 address bits. /// The choice of word size is determined purely by performance concerns. + /// private static let ADDRESS_BITS_PER_WORD: Int = 6 private static let BITS_PER_WORD: Int = 1 << ADDRESS_BITS_PER_WORD private static let BIT_INDEX_MASK: Int = BITS_PER_WORD - 1 + /// /// Used to shift left or right for a partial word mask + /// private static let WORD_MASK: Int64 = Int64.max //0xfffffffffffffff//-1 // 0xffffffffffffffffL; + /// /// - bits long[] - /// + /// /// The bits in this BitSet. The ith bit is stored in bits[i/64] at /// bit position i % 64 (where bit position 0 refers to the least /// significant bit and 63 refers to the most significant bit). + /// + /// /// The internal field corresponding to the serialField "bits". + /// fileprivate var words: [Int64] + /// /// The number of words in the logical size of this BitSet. + /// fileprivate var wordsInUse: Int = 0 //transient + /// /// Whether the size of "words" is user-specified. If so, we assume /// the user knows what he's doing and try harder to preserve it. + /// private var sizeIsSticky: Bool = false //transient + /// /// use serialVersionUID from JDK 1.0.2 for interoperability + /// private let serialVersionUID: Int64 = 7997698588986878753 //L; + /// /// Given a bit index, return word index containing it. + /// private static func wordIndex(_ bitIndex: Int) -> Int { return bitIndex >> ADDRESS_BITS_PER_WORD } + /// /// Every public method must preserve these invariants. + /// fileprivate func checkInvariants() { assert((wordsInUse == 0 || words[wordsInUse - 1] != 0), "Expected: (wordsInUse==0||words[wordsInUse-1]!=0)") assert((wordsInUse >= 0 && wordsInUse <= words.count), "Expected: (wordsInUse>=0&&wordsInUse<=words.length)") @@ -90,9 +108,11 @@ public class BitSet: Hashable, CustomStringConvertible { assert((wordsInUse == words.count || words[wordsInUse] == 0), "Expected: (wordsInUse==words.count||words[wordsInUse]==0)") } + /// /// Sets the field wordsInUse to the logical size in words of the bit set. /// WARNING:This method assumes that the number of words actually in use is /// less than or equal to the current value of wordsInUse! + /// private func recalculateWordsInUse() { // Traverse the bitset until a used word is found var i: Int = wordsInUse - 1 @@ -106,7 +126,9 @@ public class BitSet: Hashable, CustomStringConvertible { wordsInUse = i + 1 // The new logical size } - /// Creates a new bit set. All bits are initially {@code false}. + /// + /// Creates a new bit set. All bits are initially `false`. + /// public init() { sizeIsSticky = false words = [Int64](repeating: Int64(0), count: BitSet.wordIndex(BitSet.BITS_PER_WORD - 1) + 1) @@ -114,13 +136,15 @@ public class BitSet: Hashable, CustomStringConvertible { } + /// /// Creates a bit set whose initial size is large enough to explicitly - /// represent bits with indices in the range {@code 0} through - /// {@code nbits-1}. All bits are initially {@code false}. - /// + /// represent bits with indices in the range `0` through + /// `nbits-1`. All bits are initially `false`. + /// /// - parameter nbits: the initial size of the bit set - /// - NegativeArraySizeException if the specified initial size + /// - throws: _ANTLRError.negativeArraySize_ if the specified initial size /// is negative + /// public init(_ nbits: Int) throws { // nbits can't be negative; size 0 is OK @@ -139,8 +163,10 @@ public class BitSet: Hashable, CustomStringConvertible { // words = [BitSet.wordIndex(nbits-1) + 1]; } + /// /// Creates a bit set using words as the internal representation. /// The last word (if there is one) must be non-zero. + /// private init(_ words: [Int64]) { self.words = words self.wordsInUse = words.count @@ -148,17 +174,18 @@ public class BitSet: Hashable, CustomStringConvertible { } + /// /// Returns a new long array containing all the bits in this bit set. - /// - ///

      More precisely, if - ///
      {@code long[] longs = s.toLongArray();} - ///
      then {@code longs.length == (s.length()+63)/64} and - ///
      {@code s.get(n) == ((longs[n/64] & (1L<<(n%64))) != 0)} - ///
      for all {@code n < 64 * longs.length}. - /// + /// + /// More precisely, if + /// `long[] longs = s.toLongArray();` + /// then `longs.length == (s.length()+63)/64` and + /// `s.get(n) == ((longs[n/64] & (1L<<(n%64))) != 0)` + /// for all `n < 64 * longs.length`. + /// /// - returns: a long array containing a little-endian representation /// of all the bits in this bit set - /// - 1.7 + /// public func toLongArray() -> [Int64] { return copyOf(words, wordsInUse) } @@ -169,8 +196,10 @@ public class BitSet: Hashable, CustomStringConvertible { newWords[0 ..< length] = words[0 ..< length] return newWords } + /// /// Ensures that the BitSet can hold enough words. /// - parameter wordsRequired: the minimum acceptable number of words. + /// private func ensureCapacity(_ wordsRequired: Int) { if words.count < wordsRequired { // Allocate larger of doubled size or required size @@ -180,11 +209,13 @@ public class BitSet: Hashable, CustomStringConvertible { } } + /// /// Ensures that the BitSet can accommodate a given wordIndex, /// temporarily violating the invariants. The caller must /// restore the invariants before returning to the user, /// possibly using recalculateWordsInUse(). /// - parameter wordIndex: the index to be accommodated. + /// private func expandTo(_ wordIndex: Int) { let wordsRequired: Int = wordIndex + 1 if wordsInUse < wordsRequired { @@ -193,7 +224,9 @@ public class BitSet: Hashable, CustomStringConvertible { } } + /// /// Checks that fromIndex ... toIndex is a valid range of bit indices. + /// private static func checkRange(_ fromIndex: Int, _ toIndex: Int) throws { if fromIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "fromIndex < 0: \(fromIndex)") @@ -210,12 +243,13 @@ public class BitSet: Hashable, CustomStringConvertible { } } + /// /// Sets the bit at the specified index to the complement of its /// current value. - /// + /// /// - parameter bitIndex: the index of the bit to flip - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative + /// public func flip(_ bitIndex: Int) throws { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -231,16 +265,17 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Sets each bit from the specified {@code fromIndex} (inclusive) to the - /// specified {@code toIndex} (exclusive) to the complement of its current + /// + /// Sets each bit from the specified `fromIndex` (inclusive) to the + /// specified `toIndex` (exclusive) to the complement of its current /// value. - /// + /// /// - parameter fromIndex: index of the first bit to flip /// - parameter toIndex: index after the last bit to flip - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, - /// or {@code toIndex} is negative, or {@code fromIndex} is - /// larger than {@code toIndex} - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if `fromIndex` is negative, + /// or `toIndex` is negative, or `fromIndex` is + /// larger than `toIndex` + /// public func flip(_ fromIndex: Int, _ toIndex: Int) throws { try BitSet.checkRange(fromIndex, toIndex) @@ -277,11 +312,12 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Sets the bit at the specified index to {@code true}. - /// + /// + /// Sets the bit at the specified index to `true`. + /// /// - parameter bitIndex: a bit index - /// - IndexOutOfBoundsException if the specified index is negative - /// - JDK1.0 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative + /// public func set(_ bitIndex: Int) throws { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -296,12 +332,13 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } + /// /// Sets the bit at the specified index to the specified value. - /// + /// /// - parameter bitIndex: a bit index /// - parameter value: a boolean value to set - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative + /// public func set(_ bitIndex: Int, _ value: Bool) throws { if value { try set(bitIndex) @@ -310,15 +347,16 @@ public class BitSet: Hashable, CustomStringConvertible { } } - /// Sets the bits from the specified {@code fromIndex} (inclusive) to the - /// specified {@code toIndex} (exclusive) to {@code true}. - /// + /// + /// Sets the bits from the specified `fromIndex` (inclusive) to the + /// specified `toIndex` (exclusive) to `true`. + /// /// - parameter fromIndex: index of the first bit to be set /// - parameter toIndex: index after the last bit to be set - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, - /// or {@code toIndex} is negative, or {@code fromIndex} is - /// larger than {@code toIndex} - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if `fromIndex` is negative, + /// or `toIndex` is negative, or `fromIndex` is + /// larger than `toIndex` + /// public func set(_ fromIndex: Int, _ toIndex: Int) throws { try BitSet.checkRange(fromIndex, toIndex) @@ -355,16 +393,17 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Sets the bits from the specified {@code fromIndex} (inclusive) to the - /// specified {@code toIndex} (exclusive) to the specified value. - /// + /// + /// Sets the bits from the specified `fromIndex` (inclusive) to the + /// specified `toIndex` (exclusive) to the specified value. + /// /// - parameter fromIndex: index of the first bit to be set /// - parameter toIndex: index after the last bit to be set /// - parameter value: value to set the selected bits to - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, - /// or {@code toIndex} is negative, or {@code fromIndex} is - /// larger than {@code toIndex} - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if `fromIndex` is negative, + /// or `toIndex` is negative, or `fromIndex` is + /// larger than `toIndex` + /// public func set(_ fromIndex: Int, _ toIndex: Int, _ value: Bool) throws { if value { try set(fromIndex, toIndex) @@ -373,11 +412,13 @@ public class BitSet: Hashable, CustomStringConvertible { } } - /// Sets the bit specified by the index to {@code false}. - /// + /// + /// Sets the bit specified by the index to `false`. + /// /// - parameter bitIndex: the index of the bit to be cleared - /// - IndexOutOfBoundsException if the specified index is negative + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative /// - JDK1.0 + /// public func clear(_ bitIndex: Int) throws { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -393,15 +434,16 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Sets the bits from the specified {@code fromIndex} (inclusive) to the - /// specified {@code toIndex} (exclusive) to {@code false}. - /// + /// + /// Sets the bits from the specified `fromIndex` (inclusive) to the + /// specified `toIndex` (exclusive) to `false`. + /// /// - parameter fromIndex: index of the first bit to be cleared /// - parameter toIndex: index after the last bit to be cleared - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, - /// or {@code toIndex} is negative, or {@code fromIndex} is - /// larger than {@code toIndex} - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if `fromIndex` is negative, + /// or `toIndex` is negative, or `fromIndex` is + /// larger than `toIndex` + /// public func clear(_ fromIndex: Int, _ toIndex: Int) throws { var toIndex = toIndex try BitSet.checkRange(fromIndex, toIndex) @@ -446,9 +488,9 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Sets all of the bits in this BitSet to {@code false}. - /// - /// - 1.4 + /// + /// Sets all of the bits in this BitSet to `false`. + /// public func clear() { while wordsInUse > 0 { wordsInUse -= 1 @@ -456,14 +498,16 @@ public class BitSet: Hashable, CustomStringConvertible { } } + /// /// Returns the value of the bit with the specified index. The value - /// is {@code true} if the bit with the index {@code bitIndex} - /// is currently set in this {@code BitSet}; otherwise, the result - /// is {@code false}. - /// + /// is `true` if the bit with the index `bitIndex` + /// is currently set in this `BitSet`; otherwise, the result + /// is `false`. + /// /// - parameter bitIndex: the bit index /// - returns: the value of the bit with the specified index - /// - IndexOutOfBoundsException if the specified index is negative + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative + /// public func get(_ bitIndex: Int) throws -> Bool { if bitIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "bitIndex < 0: \(bitIndex)") @@ -477,16 +521,17 @@ public class BitSet: Hashable, CustomStringConvertible { && ((words[index] & ((Int64(1) << Int64(bitIndex % 64)))) != 0) } - /// Returns a new {@code BitSet} composed of bits from this {@code BitSet} - /// from {@code fromIndex} (inclusive) to {@code toIndex} (exclusive). - /// + /// + /// Returns a new `BitSet` composed of bits from this `BitSet` + /// from `fromIndex` (inclusive) to `toIndex` (exclusive). + /// /// - parameter fromIndex: index of the first bit to include /// - parameter toIndex: index after the last bit to include - /// - returns: a new {@code BitSet} from a range of this {@code BitSet} - /// - IndexOutOfBoundsException if {@code fromIndex} is negative, - /// or {@code toIndex} is negative, or {@code fromIndex} is - /// larger than {@code toIndex} - /// - 1.4 + /// - returns: a new `BitSet` from a range of this `BitSet` + /// - throws: _ANTLRError.IndexOutOfBounds_ if `fromIndex` is negative, + /// or `toIndex` is negative, or `fromIndex` is + /// larger than `toIndex` + /// public func get(_ fromIndex: Int, _ toIndex: Int) throws -> BitSet { var toIndex = toIndex try BitSet.checkRange(fromIndex, toIndex) @@ -547,23 +592,31 @@ public class BitSet: Hashable, CustomStringConvertible { return result } - /// Returns the index of the first bit that is set to {@code true} + /// + /// Equivalent to nextSetBit(0), but guaranteed not to throw an exception. + /// + public func firstSetBit() -> Int { + return try! nextSetBit(0) + } + + /// + /// Returns the index of the first bit that is set to `true` /// that occurs on or after the specified starting index. If no such - /// bit exists then {@code -1} is returned. - /// - ///

      To iterate over the {@code true} bits in a {@code BitSet}, + /// bit exists then `-1` is returned. + /// + /// To iterate over the `true` bits in a `BitSet`, /// use the following loop: - /// - ///

       {@code
      -    /// for (int i = bs.nextSetBit(0); i >= 0; i = bs.nextSetBit(i+1)) {
      +    /// 
      +    /// `
      +    /// for (int i = bs.firstSetBit(); i >= 0; i = bs.nextSetBit(i+1)) {
           /// // operate on index i here
      -    /// }}
      - /// + /// `} + /// /// - parameter fromIndex: the index to start checking from (inclusive) - /// - returns: the index of the next set bit, or {@code -1} if there + /// - returns: the index of the next set bit, or `-1` if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds_ if the specified index is negative + /// public func nextSetBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { throw ANTLRError.indexOutOfBounds(msg: "fromIndex < 0: \(fromIndex)") @@ -598,12 +651,12 @@ public class BitSet: Hashable, CustomStringConvertible { return 64 } var n: Int32 = 63 - y = Int32(truncatingBitPattern: i) + y = Int32(truncatingIfNeeded: i) if y != 0 { n = n - 32 x = y } else { - x = Int32(truncatingBitPattern: i >>> 32) + x = Int32(truncatingIfNeeded: i >>> 32) } y = x << 16 @@ -629,13 +682,14 @@ public class BitSet: Hashable, CustomStringConvertible { return Int(n - ((x << 1) >>> 31)) } - /// Returns the index of the first bit that is set to {@code false} + /// + /// Returns the index of the first bit that is set to `false` /// that occurs on or after the specified starting index. - /// + /// /// - parameter fromIndex: the index to start checking from (inclusive) /// - returns: the index of the next clear bit - /// - IndexOutOfBoundsException if the specified index is negative - /// - 1.4 + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is negative + /// public func nextClearBit(_ fromIndex: Int) throws -> Int { // Neither spec nor implementation handle bitsets of maximal length. // See 4816253. @@ -665,25 +719,27 @@ public class BitSet: Hashable, CustomStringConvertible { } } - /// Returns the index of the nearest bit that is set to {@code true} + /// + /// Returns the index of the nearest bit that is set to `true` /// that occurs on or before the specified starting index. - /// If no such bit exists, or if {@code -1} is given as the - /// starting index, then {@code -1} is returned. - /// - ///

      To iterate over the {@code true} bits in a {@code BitSet}, + /// If no such bit exists, or if `-1` is given as the + /// starting index, then `-1` is returned. + /// + /// To iterate over the `true` bits in a `BitSet`, /// use the following loop: - /// - ///

       {@code
      +    /// 
      +    /// `
           /// for (int i = bs.length(); (i = bs.previousSetBit(i-1)) >= 0; ) {
           /// // operate on index i here
      -    /// }}
      - /// + /// `} + /// /// - parameter fromIndex: the index to start checking from (inclusive) - /// - returns: the index of the previous set bit, or {@code -1} if there + /// - returns: the index of the previous set bit, or `-1` if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is less - /// than {@code -1} - /// - 1.7 + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is less + /// than `-1` + /// - note: 1.7 + /// public func previousSetBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { if fromIndex == -1 { @@ -713,17 +769,19 @@ public class BitSet: Hashable, CustomStringConvertible { } } - /// Returns the index of the nearest bit that is set to {@code false} + /// + /// Returns the index of the nearest bit that is set to `false` /// that occurs on or before the specified starting index. - /// If no such bit exists, or if {@code -1} is given as the - /// starting index, then {@code -1} is returned. - /// + /// If no such bit exists, or if `-1` is given as the + /// starting index, then `-1` is returned. + /// /// - parameter fromIndex: the index to start checking from (inclusive) - /// - returns: the index of the previous clear bit, or {@code -1} if there + /// - returns: the index of the previous clear bit, or `-1` if there /// is no such bit - /// - IndexOutOfBoundsException if the specified index is less - /// than {@code -1} - /// - 1.7 + /// - throws: _ANTLRError.IndexOutOfBounds if the specified index is less + /// than `-1` + /// - note: 1.7 + /// public func previousClearBit(_ fromIndex: Int) throws -> Int { if fromIndex < 0 { if fromIndex == -1 { @@ -786,12 +844,13 @@ public class BitSet: Hashable, CustomStringConvertible { return Int(n) } - /// Returns the "logical size" of this {@code BitSet}: the index of - /// the highest set bit in the {@code BitSet} plus one. Returns zero - /// if the {@code BitSet} contains no set bits. - /// - /// - returns: the logical size of this {@code BitSet} - /// - 1.2 + /// + /// Returns the "logical size" of this `BitSet`: the index of + /// the highest set bit in the `BitSet` plus one. Returns zero + /// if the `BitSet` contains no set bits. + /// + /// - returns: the logical size of this `BitSet` + /// public func length() -> Int { if wordsInUse == 0 { return 0 @@ -801,22 +860,24 @@ public class BitSet: Hashable, CustomStringConvertible { (BitSet.BITS_PER_WORD - BitSet.numberOfLeadingZeros(words[wordsInUse - 1])) } - /// Returns true if this {@code BitSet} contains no bits that are set - /// to {@code true}. - /// - /// - returns: boolean indicating whether this {@code BitSet} is empty - /// - 1.4 + /// + /// Returns true if this `BitSet` contains no bits that are set + /// to `true`. + /// + /// - returns: boolean indicating whether this `BitSet` is empty + /// public func isEmpty() -> Bool { return wordsInUse == 0 } - /// Returns true if the specified {@code BitSet} has any bits set to - /// {@code true} that are also set to {@code true} in this {@code BitSet}. - /// - /// - parameter set: {@code BitSet} to intersect with - /// - returns: boolean indicating whether this {@code BitSet} intersects - /// the specified {@code BitSet} - /// - 1.4 + /// + /// Returns true if the specified `BitSet` has any bits set to + /// `true` that are also set to `true` in this `BitSet`. + /// + /// - parameter set: `BitSet` to intersect with + /// - returns: boolean indicating whether this `BitSet` intersects + /// the specified `BitSet` + /// public func intersects(_ set: BitSet) -> Bool { var i: Int = min(wordsInUse, set.wordsInUse) - 1 while i >= 0 { @@ -828,10 +889,11 @@ public class BitSet: Hashable, CustomStringConvertible { return false } - /// Returns the number of bits set to {@code true} in this {@code BitSet}. - /// - /// - returns: the number of bits set to {@code true} in this {@code BitSet} - /// - 1.4 + /// + /// Returns the number of bits set to `true` in this `BitSet`. + /// + /// - returns: the number of bits set to `true` in this `BitSet` + /// public func cardinality() -> Int { var sum: Int = 0 for i in 0..AND of this target bit set with the + /// + /// Performs a logical __AND__ of this target bit set with the /// argument bit set. This bit set is modified so that each bit in it - /// has the value {@code true} if and only if it both initially - /// had the value {@code true} and the corresponding bit in the - /// bit set argument also had the value {@code true}. - /// + /// has the value `true` if and only if it both initially + /// had the value `true` and the corresponding bit in the + /// bit set argument also had the value `true`. + /// /// - parameter set: a bit set + /// public func and(_ set: BitSet) { if self == set { return @@ -879,13 +943,15 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Performs a logical OR of this bit set with the bit set + /// + /// Performs a logical __OR__ of this bit set with the bit set /// argument. This bit set is modified so that a bit in it has the - /// value {@code true} if and only if it either already had the - /// value {@code true} or the corresponding bit in the bit set - /// argument has the value {@code true}. - /// + /// value `true` if and only if it either already had the + /// value `true` or the corresponding bit in the bit set + /// argument has the value `true`. + /// /// - parameter set: a bit set + /// public func or(_ set: BitSet) { if self == set { return @@ -913,18 +979,19 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Performs a logical XOR of this bit set with the bit set + /// + /// Performs a logical __XOR__ of this bit set with the bit set /// argument. This bit set is modified so that a bit in it has the - /// value {@code true} if and only if one of the following + /// value `true` if and only if one of the following /// statements holds: - ///
        - ///
      • The bit initially has the value {@code true}, and the - /// corresponding bit in the argument has the value {@code false}. - ///
      • The bit initially has the value {@code false}, and the - /// corresponding bit in the argument has the value {@code true}. - ///
      - /// + /// + /// * The bit initially has the value `true`, and the + /// corresponding bit in the argument has the value `false`. + /// * The bit initially has the value `false`, and the + /// corresponding bit in the argument has the value `true`. + /// /// - parameter set: a bit set + /// public func xor(_ set: BitSet) { let wordsInCommon: Int = min(wordsInUse, set.wordsInUse) @@ -949,12 +1016,13 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } - /// Clears all of the bits in this {@code BitSet} whose corresponding - /// bit is set in the specified {@code BitSet}. - /// - /// - parameter set: the {@code BitSet} with which to mask this - /// {@code BitSet} - /// - 1.2 + /// + /// Clears all of the bits in this `BitSet` whose corresponding + /// bit is set in the specified `BitSet`. + /// + /// - parameter set: the `BitSet` with which to mask this + /// `BitSet` + /// public func andNot(_ set: BitSet) { // Perform logical (a & !b) on words in common var i: Int = min(wordsInUse, set.wordsInUse) - 1 @@ -967,22 +1035,24 @@ public class BitSet: Hashable, CustomStringConvertible { checkInvariants() } + /// /// Returns the hash code value for this bit set. The hash code depends - /// only on which bits are set within this {@code BitSet}. - /// - ///

      The hash code is defined to be the result of the following + /// only on which bits are set within this `BitSet`. + /// + /// The hash code is defined to be the result of the following /// calculation: - ///

       {@code
      +    /// `
           /// public int hashCode() {
           /// long h = 1234;
           /// long[] words = toLongArray();
           /// for (int i = words.length; --i >= 0; )
           /// h ^= words[i] * (i + 1);
           /// return (int)((h >> 32) ^ h);
      -    /// }}
      + /// `} /// Note that the hash code changes if the set of bits is altered. - /// + /// /// - returns: the hash code value for this bit set + /// public var hashValue: Int { var h: Int64 = 1234 var i: Int = wordsInUse @@ -995,11 +1065,13 @@ public class BitSet: Hashable, CustomStringConvertible { return Int(Int32((h >> 32) ^ h)) } + /// /// Returns the number of bits of space actually in use by this - /// {@code BitSet} to represent bit values. + /// `BitSet` to represent bit values. /// The maximum element in the set is the size - 1st element. - /// + /// /// - returns: the number of bits currently in this bit set + /// public func size() -> Int { return words.count * BitSet.BITS_PER_WORD } @@ -1008,9 +1080,11 @@ public class BitSet: Hashable, CustomStringConvertible { + /// /// Attempts to reduce internal storage used for the bits in this bit set. /// Calling this method may, but is not required to, affect the value - /// returned by a subsequent call to the {@link #size()} method. + /// returned by a subsequent call to the _#size()_ method. + /// private func trimToSize() { if wordsInUse != words.count { words = copyOf(words, wordsInUse) @@ -1019,56 +1093,48 @@ public class BitSet: Hashable, CustomStringConvertible { } + /// /// Returns a string representation of this bit set. For every index - /// for which this {@code BitSet} contains a bit in the set + /// for which this `BitSet` contains a bit in the set /// state, the decimal representation of that index is included in /// the result. Such indices are listed in order from lowest to /// highest, separated by ", " (a comma and a space) and /// surrounded by braces, resulting in the usual mathematical /// notation for a set of integers. - /// - ///

      Example: - ///

      -    /// BitSet drPepper = new BitSet();
      - /// Now {@code drPepper.toString()} returns "{@code {}}". - ///
      -    /// drPepper.set(2);
      - /// Now {@code drPepper.toString()} returns "{@code {2}}". - ///
      +    /// 
      +    /// Example:
      +    /// 
      +    /// BitSet drPepper = new BitSet();
      +    /// Now `drPepper.toString()` returns "`{`}".
      +    /// 
      +    /// drPepper.set(2);
      +    /// Now `drPepper.toString()` returns "`{2`}".
      +    /// 
           /// drPepper.set(4);
      -    /// drPepper.set(10);
      - /// Now {@code drPepper.toString()} returns "{@code {2, 4, 10}}". - /// + /// drPepper.set(10); + /// Now `drPepper.toString()` returns "`{2, 4, 10`}". + /// /// - returns: a string representation of this bit set + /// public var description: String { checkInvariants() //let numBits: Int = (wordsInUse > 128) ? // cardinality() : wordsInUse * BitSet.BITS_PER_WORD - let b: StringBuilder = StringBuilder() + let b = StringBuilder() b.append("{") - do { - var i: Int = try nextSetBit(0) - if i != -1 { - b.append(i) - i = try nextSetBit(i + 1) - while i >= 0 { - let endOfRun: Int = try nextClearBit(i) - repeat { - b.append(", ").append(i) - i += 1 - } while i < endOfRun - i = try nextSetBit(i + 1) - } -// for ; i >= 0; i = try nextSetBit(i + 1) { -// let endOfRun: Int = try nextClearBit(i) -// repeat { -// b.append(", ").append(i) -// } while ++i < endOfRun -// } + var i = firstSetBit() + if i != -1 { + b.append(i) + i = try! nextSetBit(i + 1) + while i >= 0 { + let endOfRun = try! nextClearBit(i) + repeat { + b.append(", ").append(i) + i += 1 + } while i < endOfRun + i = try! nextSetBit(i + 1) } - } catch { - print("BitSet description error") } b.append("}") return b.toString() diff --git a/runtime/Swift/Sources/Antlr4/misc/DoubleKeyMap.swift b/runtime/Swift/Sources/Antlr4/misc/DoubleKeyMap.swift index 6f04cc31d..3f23787c9 100644 --- a/runtime/Swift/Sources/Antlr4/misc/DoubleKeyMap.swift +++ b/runtime/Swift/Sources/Antlr4/misc/DoubleKeyMap.swift @@ -1,13 +1,16 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// Sometimes we need to map a key to a value but key is two pieces of data. /// This nested hash table saves creating a single key each time we access /// map; avoids mem creation. - +/// public struct DoubleKeyMap { private var data: HashMap> = HashMap>() @discardableResult @@ -38,27 +41,4 @@ public struct DoubleKeyMap { public func get(_ k1: Key1) -> HashMap? { return data[k1] } - -// /** Get all values associated with primary key */ -// public func values(k1: Key1) -> LazyMapCollection<[Key2:Value], Value>? { -// let data2: Dictionary? = data[k1] -// if data2 == nil { -// return nil -// } -// return data2!.values -// } -// -// /** get all primary keys */ -// public func keySet() -> LazyMapCollection>, Key1> { -// return data.keys -// } -// -// /** get all secondary keys associated with a primary key */ -// public func keySet(k1: Key1) -> LazyMapCollection<[Key2:Value], Key2>? { -// let data2: Dictionary? = data[k1] -// if data2 == nil { -// return nil -// } -// return data2!.keys -// } } diff --git a/runtime/Swift/Sources/Antlr4/misc/HashMap.swift b/runtime/Swift/Sources/Antlr4/misc/HashMap.swift index 48bd3d427..a7cba83f7 100644 --- a/runtime/Swift/Sources/Antlr4/misc/HashMap.swift +++ b/runtime/Swift/Sources/Antlr4/misc/HashMap.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// final class Entry: CustomStringConvertible { final var key: K @@ -8,7 +10,9 @@ final class Entry: CustomStringConvertible { final var next: Entry! final var hash: Int + /// /// Creates new entry. + /// init(_ h: Int, _ k: K, _ v: V, _ n: Entry!) { value = v next = n @@ -37,7 +41,7 @@ final class Entry: CustomStringConvertible { var description: String { return "\(getKey())=\(getValue())" } } -func == (lhs: Entry, rhs: Entry) -> Bool { +func == (lhs: Entry, rhs: Entry) -> Bool { if lhs === rhs { return true } @@ -48,7 +52,7 @@ func == (lhs: Entry, rhs: Entry) -> Bool { } return false } -func == (lhs: Entry, rhs: Entry) -> Bool { +func == (lhs: Entry, rhs: Entry) -> Bool { if lhs === rhs { return true } @@ -67,37 +71,53 @@ func == (lhs: Entry, rhs: Entry) -> Bool public final class HashMap: Sequence { + /// /// The default initial capacity - MUST be a power of two. - let DEFAULT_INITIAL_CAPACITY: Int = 16 + /// + private let DEFAULT_INITIAL_CAPACITY: Int = 16 + /// /// The maximum capacity, used if a higher value is implicitly specified /// by either of the constructors with arguments. /// MUST be a power of two <= 1<<30. - let MAXIMUM_CAPACITY: Int = 1 << 30 + /// + private let MAXIMUM_CAPACITY: Int = 1 << 30 + /// /// The load factor used when none specified in constructor. - let DEFAULT_LOAD_FACTOR: Float = 0.75 + /// + private let DEFAULT_LOAD_FACTOR: Float = 0.75 + /// /// The table, resized as necessary. Length MUST Always be a power of two. + /// var table: [Entry?] + /// /// The number of key-value mappings contained in this map. + /// var size: Int = 0 + /// /// The next size value at which to resize (capacity * load factor). /// - + /// var threshold: Int = 0 + /// /// The load factor for the hash table. - /// + /// /// - + /// var loadFactor: Float = 0 + /// /// The number of times this HashMap has been structurally modified /// Structural modifications are those that change the number of mappings in /// the HashMap or otherwise modify its internal structure (e.g., /// rehash). This field is used to make iterators on Collection-views of /// the HashMap fail-fast. (See ConcurrentModificationException). + /// var modCount: Int = 0 public init(count: Int) { @@ -137,14 +157,18 @@ public final class HashMap: Sequence return h ^ (h >>> 7) ^ (h >>> 4) } + /// /// Returns index for hash code h. + /// static func indexFor(_ h: Int, _ length: Int) -> Int { return h & (length-1) } + /// /// Returns true if this map contains no key-value mappings. - /// + /// /// - returns: true if this map contains no key-value mappings + /// public final var isEmpty: Bool { return size == 0 } @@ -164,21 +188,23 @@ public final class HashMap: Sequence public final var count: Int { return size } + /// /// Returns the value to which the specified key is mapped, - /// or {@code null} if this map contains no mapping for the key. - /// - ///

      More formally, if this map contains a mapping from a key - /// {@code k} to a value {@code v} such that {@code (key==null ? k==null : - /// key.equals(k))}, then this method returns {@code v}; otherwise - /// it returns {@code null}. (There can be at most one such mapping.) - /// - ///

      A return value of {@code null} does not necessarily + /// or `null` if this map contains no mapping for the key. + /// + /// More formally, if this map contains a mapping from a key + /// `k` to a value `v` such that `(key==null ? k==null : + /// key.equals(k))`, then this method returns `v`; otherwise + /// it returns `null`. (There can be at most one such mapping.) + /// + /// A return value of `null` does not necessarily /// indicate that the map contains no mapping for the key; it's also - /// possible that the map explicitly maps the key to {@code null}. - /// The {@link #containsKey containsKey} operation may be used to + /// possible that the map explicitly maps the key to `null`. + /// The _#containsKey containsKey_ operation may be used to /// distinguish these two cases. - /// + /// /// - seealso: #put(Object, Object) + /// public final func get(_ key: K) -> V? { let hash: Int = HashMap.hash(key.hashValue) var e = table[HashMap.indexFor(hash, table.count)] @@ -192,19 +218,23 @@ public final class HashMap: Sequence return nil } + /// /// Returns true if this map contains a mapping for the /// specified key. - /// + /// /// - parameter key: The key whose presence in this map is to be tested /// - returns: true if this map contains a mapping for the specified /// key. + /// public final func containsKey(_ key: K) -> Bool { return getEntry(key) != nil } + /// /// Returns the entry associated with the specified key in the /// HashMap. Returns null if the HashMap contains no mapping /// for the key. + /// final func getEntry(_ key: K) -> Entry! { let hash: Int = HashMap.hash(key.hashValue) var e = table[HashMap.indexFor(hash, table.count)] @@ -220,16 +250,18 @@ public final class HashMap: Sequence } + /// /// Associates the specified value with the specified key in this map. /// If the map previously contained a mapping for the key, the old /// value is replaced. - /// + /// /// - parameter key: key with which the specified value is to be associated /// - parameter value: value to be associated with the specified key /// - returns: the previous value associated with key, or /// null if there was no mapping for key. /// (A null return can also indicate that the map /// previously associated null with key.) + /// @discardableResult public final func put(_ key: K, _ value: V) -> V? { @@ -251,11 +283,13 @@ public final class HashMap: Sequence return nil } + /// /// Adds a new entry with the specified key, value and hash code to /// the specified bucket. It is the responsibility of this /// method to resize the table if appropriate. - /// + /// /// Subclass overrides this to alter the behavior of put method. + /// final func addEntry(_ hash: Int, _ key: K, _ value: V, _ bucketIndex: Int) { let e = table[bucketIndex] table[bucketIndex] = Entry(hash, key, value, e) @@ -265,18 +299,20 @@ public final class HashMap: Sequence resize(2 * table.count) } } + /// /// Rehashes the contents of this map into a new array with a /// larger capacity. This method is called automatically when the /// number of keys in this map reaches its threshold. - /// + /// /// If current capacity is MAXIMUM_CAPACITY, this method does not /// resize the map, but sets threshold to Integer.MAX_VALUE. /// This has the effect of preventing future calls. - /// + /// /// - parameter newCapacity: the new capacity, MUST be a power of two; /// must be greater than current capacity unless current /// capacity is MAXIMUM_CAPACITY (in which case value /// is irrelevant). + /// final func resize(_ newCapacity: Int) { let oldCapacity: Int = table.count if oldCapacity == MAXIMUM_CAPACITY { @@ -290,7 +326,9 @@ public final class HashMap: Sequence threshold = Int(Float(newCapacity) * loadFactor) } + /// /// Transfers all entries from current table to newTable. + /// final func transfer(_ newTable: inout [Entry?]) { let newCapacity: Int = newTable.count @@ -309,8 +347,10 @@ public final class HashMap: Sequence } } } + /// /// Removes all of the mappings from this map. /// The map will be empty after this call returns. + /// public final func clear() { modCount += 1 let length = table.count diff --git a/runtime/Swift/Sources/Antlr4/misc/IntSet.swift b/runtime/Swift/Sources/Antlr4/misc/IntSet.swift index 157ab991c..cc448593f 100644 --- a/runtime/Swift/Sources/Antlr4/misc/IntSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/IntSet.swift @@ -1,139 +1,151 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// A generic set of integers. -/// +/// /// - seealso: org.antlr.v4.runtime.misc.IntervalSet +/// public protocol IntSet { + /// /// Adds the specified value to the current set. - /// + /// /// - parameter el: the value to add - /// - /// - IllegalStateException if the current set is read-only + /// + /// - throws: _ANTLRError.illegalState_ if the current set is read-only + /// func add(_ el: Int) throws - /// Modify the current {@link org.antlr.v4.runtime.misc.IntSet} object to contain all elements that are - /// present in itself, the specified {@code set}, or both. - /// - /// - parameter set: The set to add to the current set. A {@code null} argument is + /// + /// Modify the current _org.antlr.v4.runtime.misc.IntSet_ object to contain all elements that are + /// present in itself, the specified `set`, or both. + /// + /// - parameter set: The set to add to the current set. A `null` argument is /// treated as though it were an empty set. - /// - returns: {@code this} (to support chained calls) - /// - /// - IllegalStateException if the current set is read-only - + /// - returns: `this` (to support chained calls) + /// + /// - throws: _ANTLRError.illegalState_ if the current set is read-only + /// func addAll(_ set: IntSet?) throws -> IntSet - /// Return a new {@link org.antlr.v4.runtime.misc.IntSet} object containing all elements that are - /// present in both the current set and the specified set {@code a}. - /// - /// - parameter a: The set to intersect with the current set. A {@code null} + /// + /// Return a new _org.antlr.v4.runtime.misc.IntSet_ object containing all elements that are + /// present in both the current set and the specified set `a`. + /// + /// - parameter a: The set to intersect with the current set. A `null` /// argument is treated as though it were an empty set. - /// - returns: A new {@link org.antlr.v4.runtime.misc.IntSet} instance containing the intersection of the - /// current set and {@code a}. The value {@code null} may be returned in + /// - returns: A new _org.antlr.v4.runtime.misc.IntSet_ instance containing the intersection of the + /// current set and `a`. The value `null` may be returned in /// place of an empty result set. + /// + func and(_ a: IntSet?) -> IntSet? - func and(_ a: IntSet?) throws -> IntSet? - - /// Return a new {@link org.antlr.v4.runtime.misc.IntSet} object containing all elements that are - /// present in {@code elements} but not present in the current set. The - /// following expressions are equivalent for input non-null {@link org.antlr.v4.runtime.misc.IntSet} - /// instances {@code x} and {@code y}. - /// - ///

        - ///
      • {@code x.complement(y)}
      • - ///
      • {@code y.subtract(x)}
      • - ///
      - /// - /// - parameter elements: The set to compare with the current set. A {@code null} + /// + /// Return a new _org.antlr.v4.runtime.misc.IntSet_ object containing all elements that are + /// present in `elements` but not present in the current set. The + /// following expressions are equivalent for input non-null _org.antlr.v4.runtime.misc.IntSet_ + /// instances `x` and `y`. + /// + /// * `x.complement(y)` + /// *`y.subtract(x)` + /// + /// - parameter elements: The set to compare with the current set. A `null` /// argument is treated as though it were an empty set. - /// - returns: A new {@link org.antlr.v4.runtime.misc.IntSet} instance containing the elements present in - /// {@code elements} but not present in the current set. The value - /// {@code null} may be returned in place of an empty result set. + /// - returns: A new _org.antlr.v4.runtime.misc.IntSet_ instance containing the elements present in + /// `elements` but not present in the current set. The value + /// `null` may be returned in place of an empty result set. + /// + func complement(_ elements: IntSet?) -> IntSet? - func complement(_ elements: IntSet?) throws -> IntSet? - - /// Return a new {@link org.antlr.v4.runtime.misc.IntSet} object containing all elements that are - /// present in the current set, the specified set {@code a}, or both. - /// - ///

      - /// This method is similar to {@link #addAll(org.antlr.v4.runtime.misc.IntSet)}, but returns a new - /// {@link org.antlr.v4.runtime.misc.IntSet} instance instead of modifying the current set.

      - /// - /// - parameter a: The set to union with the current set. A {@code null} argument + /// + /// Return a new _org.antlr.v4.runtime.misc.IntSet_ object containing all elements that are + /// present in the current set, the specified set `a`, or both. + /// + /// + /// This method is similar to _#addAll(org.antlr.v4.runtime.misc.IntSet)_, but returns a new + /// _org.antlr.v4.runtime.misc.IntSet_ instance instead of modifying the current set. + /// + /// - parameter a: The set to union with the current set. A `null` argument /// is treated as though it were an empty set. - /// - returns: A new {@link org.antlr.v4.runtime.misc.IntSet} instance containing the union of the current - /// set and {@code a}. The value {@code null} may be returned in place of an + /// - returns: A new _org.antlr.v4.runtime.misc.IntSet_ instance containing the union of the current + /// set and `a`. The value `null` may be returned in place of an /// empty result set. + /// + func or(_ a: IntSet) -> IntSet - func or(_ a: IntSet) throws -> IntSet - - /// Return a new {@link org.antlr.v4.runtime.misc.IntSet} object containing all elements that are - /// present in the current set but not present in the input set {@code a}. + /// + /// Return a new _org.antlr.v4.runtime.misc.IntSet_ object containing all elements that are + /// present in the current set but not present in the input set `a`. /// The following expressions are equivalent for input non-null - /// {@link org.antlr.v4.runtime.misc.IntSet} instances {@code x} and {@code y}. - /// - ///
        - ///
      • {@code y.subtract(x)}
      • - ///
      • {@code x.complement(y)}
      • - ///
      - /// - /// - parameter a: The set to compare with the current set. A {@code null} + /// _org.antlr.v4.runtime.misc.IntSet_ instances `x` and `y`. + /// + /// * `y.subtract(x)` + /// * `x.complement(y)` + /// + /// - parameter a: The set to compare with the current set. A `null` /// argument is treated as though it were an empty set. - /// - returns: A new {@link org.antlr.v4.runtime.misc.IntSet} instance containing the elements present in - /// {@code elements} but not present in the current set. The value - /// {@code null} may be returned in place of an empty result set. - - func subtract(_ a: IntSet?) throws -> IntSet + /// - returns: A new _org.antlr.v4.runtime.misc.IntSet_ instance containing the elements present in + /// `elements` but not present in the current set. The value + /// `null` may be returned in place of an empty result set. + /// + func subtract(_ a: IntSet?) -> IntSet + /// /// Return the total number of elements represented by the current set. - /// + /// /// - returns: the total number of elements represented by the current set, /// regardless of the manner in which the elements are stored. + /// func size() -> Int - /// Returns {@code true} if this set contains no elements. - /// - /// - returns: {@code true} if the current set contains no elements; otherwise, - /// {@code false}. + /// + /// Returns `true` if this set contains no elements. + /// + /// - returns: `true` if the current set contains no elements; otherwise, + /// `false`. + /// func isNil() -> Bool - /// {@inheritDoc} - - //func equals(obj : AnyObject) -> Bool; - - /// Returns the single value contained in the set, if {@link #size} is 1; - /// otherwise, returns {@link org.antlr.v4.runtime.Token#INVALID_TYPE}. - /// - /// - returns: the single value contained in the set, if {@link #size} is 1; - /// otherwise, returns {@link org.antlr.v4.runtime.Token#INVALID_TYPE}. + /// + /// Returns the single value contained in the set, if _#size_ is 1; + /// otherwise, returns _org.antlr.v4.runtime.Token#INVALID_TYPE_. + /// + /// - returns: the single value contained in the set, if _#size_ is 1; + /// otherwise, returns _org.antlr.v4.runtime.Token#INVALID_TYPE_. + /// func getSingleElement() -> Int - /// Returns {@code true} if the set contains the specified element. - /// + /// + /// Returns `true` if the set contains the specified element. + /// /// - parameter el: The element to check for. - /// - returns: {@code true} if the set contains {@code el}; otherwise {@code false}. + /// - returns: `true` if the set contains `el`; otherwise `false`. + /// func contains(_ el: Int) -> Bool + /// /// Removes the specified value from the current set. If the current set does /// not contain the element, no changes are made. - /// + /// /// - parameter el: the value to remove - /// - /// - IllegalStateException if the current set is read-only + /// + /// - throws: _ANTLRError.illegalState_ if the current set is read-only + /// func remove(_ el: Int) throws + /// /// Return a list containing the elements represented by the current set. The /// list is returned in ascending numerical order. - /// + /// /// - returns: A list containing all element present in the current set, sorted /// in ascending numerical order. - - func toList() -> Array - - /// {@inheritDoc} + /// + func toList() -> [Int] func toString() -> String } diff --git a/runtime/Swift/Sources/Antlr4/misc/Interval.swift b/runtime/Swift/Sources/Antlr4/misc/Interval.swift index 56050b241..8e926a6ee 100644 --- a/runtime/Swift/Sources/Antlr4/misc/Interval.swift +++ b/runtime/Swift/Sources/Antlr4/misc/Interval.swift @@ -1,9 +1,13 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// +/// /// An immutable inclusive interval a..b +/// public class Interval: Hashable { public static let INTERVAL_POOL_MAX_VALUE: Int = 1000 @@ -27,11 +31,13 @@ public class Interval: Hashable { self.b = b } + /// /// Interval objects are used readonly so share all with the /// same single value a==b up to some max size. Use an array as a perfect hash. /// Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new /// Interval object with a..a in it. On Java.g4, 218623 IntervalSets /// have a..a (set with 1 element). + /// public static func of(_ a: Int, _ b: Int) -> Interval { // cache just a..a if a != b || a < 0 || a > INTERVAL_POOL_MAX_VALUE { @@ -44,8 +50,10 @@ public class Interval: Hashable { return cache[a]! } + /// /// return number of elements between a and b inclusively. x..x is length 1. /// if b < a, then length is 0. 9..10 has length 2. + /// public func length() -> Int { if b < a { return 0 @@ -60,37 +68,51 @@ public class Interval: Hashable { hash = hash * 31 + b return hash } + /// /// Does this start completely before other? Disjoint + /// public func startsBeforeDisjoint(_ other: Interval) -> Bool { return self.a < other.a && self.b < other.a } + /// /// Does this start at or before other? Nondisjoint + /// public func startsBeforeNonDisjoint(_ other: Interval) -> Bool { return self.a <= other.a && self.b >= other.a } + /// /// Does this.a start after other.b? May or may not be disjoint + /// public func startsAfter(_ other: Interval) -> Bool { return self.a > other.a } + /// /// Does this start completely after other? Disjoint + /// public func startsAfterDisjoint(_ other: Interval) -> Bool { return self.a > other.b } + /// /// Does this start after other? NonDisjoint + /// public func startsAfterNonDisjoint(_ other: Interval) -> Bool { return self.a > other.a && self.a <= other.b // this.b>=other.b implied } + /// /// Are both ranges disjoint? I.e., no overlap? + /// public func disjoint(_ other: Interval) -> Bool { return startsBeforeDisjoint(other) || startsAfterDisjoint(other) } + /// /// Are two intervals adjacent such as 0..41 and 42..42? + /// public func adjacent(_ other: Interval) -> Bool { return self.a == other.b + 1 || self.b == other.a - 1 } @@ -99,20 +121,26 @@ public class Interval: Hashable { return other.a >= self.a && other.b <= self.b } + /// /// Return the interval computed from combining this and other + /// public func union(_ other: Interval) -> Interval { return Interval.of(min(a, other.a), max(b, other.b)) } + /// /// Return the interval in common between this and o + /// public func intersection(_ other: Interval) -> Interval { return Interval.of(max(a, other.a), min(b, other.b)) } + /// /// Return the interval with elements from this not in other; /// other must not be totally enclosed (properly contained) /// within this, which would result in two disjoint intervals /// instead of the single one returned by this method. + /// public func differenceNotProperlyContained(_ other: Interval) -> Interval? { var diff: Interval? = nil // other.a to left of this.a (or same) diff --git a/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift b/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift index ab765da61..345f829cc 100644 --- a/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift +++ b/runtime/Swift/Sources/Antlr4/misc/IntervalSet.swift @@ -1,72 +1,71 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// -/// This class implements the {@link org.antlr.v4.runtime.misc.IntSet} backed by a sorted array of +/// +/// This class implements the _org.antlr.v4.runtime.misc.IntSet_ backed by a sorted array of /// non-overlapping intervals. It is particularly efficient for representing /// large collections of numbers, where the majority of elements appear as part /// of a sequential range of numbers that are all part of the set. For example, /// the set { 1, 2, 3, 4, 7, 8 } may be represented as { [1, 4], [7, 8] }. -/// -///

      +/// +/// /// This class is able to represent sets containing any combination of values in -/// the range {@link Integer#MIN_VALUE} to {@link Integer#MAX_VALUE} -/// (inclusive).

      +/// the range _Integer#MIN_VALUE_ to _Integer#MAX_VALUE_ +/// (inclusive). +/// public class IntervalSet: IntSet, Hashable, CustomStringConvertible { public static let COMPLETE_CHAR_SET: IntervalSet = { - let set = try! IntervalSet.of(Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE) + let set = IntervalSet.of(Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE) try! set.setReadonly(true) return set }() public static let EMPTY_SET: IntervalSet = { - let set = try! IntervalSet() + let set = IntervalSet() try! set.setReadonly(true) return set }() + /// /// The list of sorted, disjoint intervals. - internal var intervals: Array + /// + internal var intervals: [Interval] - internal var readonly: Bool = false - - public init(_ intervals: Array) { + internal var readonly = false + public init(_ intervals: [Interval]) { self.intervals = intervals } - public convenience init(_ set: IntervalSet) throws { - try self.init() - try addAll(set) + public convenience init(_ set: IntervalSet) { + self.init() + try! addAll(set) } - public init(_ els: Int...) throws { - if els.count == 0 { - intervals = Array() // most sets are 1 or 2 elements + public init(_ els: Int...) { + if els.isEmpty { + intervals = [Interval]() // most sets are 1 or 2 elements } else { - intervals = Array() - for e: Int in els { - try add(e) + intervals = [Interval]() + for e in els { + try! add(e) } } } - /// Create a set with a single element, el. - - public static func of(_ a: Int) throws -> IntervalSet { - let s: IntervalSet = try IntervalSet() - try s.add(a) - return s - } - + /// /// Create a set with all ints within range [a..b] (inclusive) - public static func of(_ a: Int, _ b: Int) throws -> IntervalSet { - let s: IntervalSet = try IntervalSet() - try s.add(a, b) + /// + public static func of(_ a: Int, _ b: Int) -> IntervalSet { + let s = IntervalSet() + try! s.add(a, b) return s } @@ -77,22 +76,26 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { intervals.removeAll() } + /// /// Add a single element to the set. An isolated element is stored /// as a range el..el. + /// public func add(_ el: Int) throws { if readonly { throw ANTLRError.illegalState(msg: "can't alter readonly IntervalSet") } - try add(el, el) + try! add(el, el) } + /// /// Add interval; i.e., add all integers from a to b to set. /// If b<a, do nothing. /// Keep list in sorted order (by left range value). /// If overlap, combine ranges. For example, /// If this is {1..5, 10..20}, adding 6..7 yields /// {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}. + /// public func add(_ a: Int, _ b: Int) throws { try add(Interval.of(a, b)) } @@ -112,41 +115,39 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { while i < intervals.count { - let r: Interval = intervals[i] + let r = intervals[i] if addition == r { return } if addition.adjacent(r) || !addition.disjoint(r) { // next to each other, make a single larger interval - let bigger: Interval = addition.union(r) + let bigger = addition.union(r) //iter.set(bigger); intervals[i] = bigger // make sure we didn't just create an interval that // should be merged with next interval in list - //while iter.hasNext() { while i < intervals.count - 1 { i += 1 - let next: Interval = intervals[i] //iter.next(); + let next = intervals[i] if !bigger.adjacent(next) && bigger.disjoint(next) { break } // if we bump up against or overlap next, merge + /// /// iter.remove(); // remove this one /// iter.previous(); // move backwards to what we just set /// iter.set(bigger.union(next)); // set to 3 merged ones /// iter.next(); // first call to next after previous duplicates the resul + /// intervals.remove(at: i) i -= 1 intervals[i] = bigger.union(next) - } return } if addition.startsBeforeDisjoint(r) { // insert before r - //iter.previous(); - //iter.add(addition); intervals.insert(addition, at: i) return } @@ -159,11 +160,13 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { intervals.append(addition) } + /// /// combine all sets in the array returned the or'd value - public func or(_ sets: [IntervalSet]) throws -> IntSet { - let r: IntervalSet = try IntervalSet() - for s: IntervalSet in sets { - try r.addAll(s) + /// + public func or(_ sets: [IntervalSet]) -> IntSet { + let r = IntervalSet() + for s in sets { + try! r.addAll(s) } return r } @@ -176,14 +179,12 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } if let other = set as? IntervalSet { // walk set and add each interval - let n: Int = other.intervals.count - for i in 0.. IntSet? { - return try self.complement(IntervalSet.of(minElement, maxElement)) + public func complement(_ minElement: Int, _ maxElement: Int) -> IntSet? { + return complement(IntervalSet.of(minElement, maxElement)) } - /// {@inheritDoc} + /// + /// + /// - public func complement(_ vocabulary: IntSet?) throws -> IntSet? { - guard let vocabulary = vocabulary , !vocabulary.isNil() else { + public func complement(_ vocabulary: IntSet?) -> IntSet? { + guard let vocabulary = vocabulary, !vocabulary.isNil() else { return nil // nothing in common with null set } var vocabularyIS: IntervalSet if let vocabulary = vocabulary as? IntervalSet { vocabularyIS = vocabulary } else { - vocabularyIS = try IntervalSet() - try vocabularyIS.addAll(vocabulary) + vocabularyIS = IntervalSet() + try! vocabularyIS.addAll(vocabulary) } - return try vocabularyIS.subtract(self) + return vocabularyIS.subtract(self) } - public func subtract(_ a: IntSet?) throws -> IntSet { - guard let a = a , !a.isNil() else { - return try IntervalSet(self) + public func subtract(_ a: IntSet?) -> IntSet { + guard let a = a, !a.isNil() else { + return IntervalSet(self) } if let a = a as? IntervalSet { - return try subtract(self, a) + return subtract(self, a) } - let other: IntervalSet = try IntervalSet() - try other.addAll(a) - return try subtract(self, other) + let other = IntervalSet() + try! other.addAll(a) + return subtract(self, other) } + /// /// Compute the set difference between two interval sets. The specific - /// operation is {@code left - right}. If either of the input sets is - /// {@code null}, it is treated as though it was an empty set. + /// operation is `left - right`. If either of the input sets is + /// `null`, it is treated as though it was an empty set. + /// - public func subtract(_ left: IntervalSet?, _ right: IntervalSet?) throws -> IntervalSet { + public func subtract(_ left: IntervalSet?, _ right: IntervalSet?) -> IntervalSet { - guard let left = left , !left.isNil() else { - return try IntervalSet() + guard let left = left, !left.isNil() else { + return IntervalSet() } - let result: IntervalSet = try IntervalSet(left) + let result = IntervalSet(left) - guard let right = right , !right.isNil() else { + guard let right = right, !right.isNil() else { // right set has no elements; just return the copy of the current set return result } - var resultI: Int = 0 - var rightI: Int = 0 + var resultI = 0 + var rightI = 0 while resultI < result.intervals.count && rightI < right.intervals.count { - let resultInterval: Interval = result.intervals[resultI] - let rightInterval: Interval = right.intervals[rightI] + let resultInterval = result.intervals[resultI] + let rightInterval = right.intervals[rightI] // operation: (resultInterval - rightInterval) and update indexes @@ -274,9 +279,7 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { if let afterCurrent = afterCurrent { // split the current interval into two result.intervals[resultI] = beforeCurrent - //result.intervals.set(beforeCurrent,resultI); result.intervals.insert(afterCurrent, at: resultI + 1) - //result.intervals.add(, afterCurrent); resultI += 1 rightI += 1 continue @@ -308,33 +311,34 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } - public func or(_ a: IntSet) throws -> IntSet { - let o: IntervalSet = try IntervalSet() - try o.addAll(self) - try o.addAll(a) + public func or(_ a: IntSet) -> IntSet { + let o = IntervalSet() + try! o.addAll(self) + try! o.addAll(a) return o } - /// {@inheritDoc} + /// + /// + /// - public func and(_ other: IntSet?) throws -> IntSet? { + public func and(_ other: IntSet?) -> IntSet? { if other == nil { - //|| !(other instanceof IntervalSet) ) { return nil // nothing in common with null set } - var myIntervals: Array = self.intervals - var theirIntervals: Array = (other as! IntervalSet).intervals + var myIntervals = self.intervals + var theirIntervals = (other as! IntervalSet).intervals var intersection: IntervalSet? = nil - let mySize: Int = myIntervals.count - let theirSize: Int = theirIntervals.count - var i: Int = 0 - var j: Int = 0 + let mySize = myIntervals.count + let theirSize = theirIntervals.count + var i = 0 + var j = 0 // iterate down both interval lists looking for nondisjoint intervals while i < mySize && j < theirSize { - let mine: Interval = myIntervals[i] - let theirs: Interval = theirIntervals[j] - //System.out.println("mine="+mine+" and theirs="+theirs); + let mine = myIntervals[i] + let theirs = theirIntervals[j] + if mine.startsBeforeDisjoint(theirs) { // move this iterator looking for interval that might overlap i += 1 @@ -346,26 +350,26 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { if mine.properlyContains(theirs) { // overlap, add intersection, get next theirs if intersection == nil { - intersection = try IntervalSet() + intersection = IntervalSet() } - try intersection!.add(mine.intersection(theirs)) + try! intersection!.add(mine.intersection(theirs)) j += 1 } else { if theirs.properlyContains(mine) { // overlap, add intersection, get next mine if intersection == nil { - intersection = try IntervalSet() + intersection = IntervalSet() } - try intersection!.add(mine.intersection(theirs)) + try! intersection!.add(mine.intersection(theirs)) i += 1 } else { if !mine.disjoint(theirs) { // overlap, add intersection if intersection == nil { - intersection = try IntervalSet() + intersection = IntervalSet() } - try intersection!.add(mine.intersection(theirs)) + try! intersection!.add(mine.intersection(theirs)) // Move the iterator of lower range [a..b], but not // the upper range as it may contain elements that will collide // with the next iterator. So, if mine=[0..115] and @@ -387,19 +391,19 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } } if intersection == nil { - return try IntervalSet() + return IntervalSet() } return intersection } - /// {@inheritDoc} + /// + /// + /// public func contains(_ el: Int) -> Bool { - let n: Int = intervals.count - for i in 0..=I.a && el<=I.b ) { -/// return true; // found in this interval -/// } -/// } -/// return false; } - /// {@inheritDoc} + /// + /// + /// public func isNil() -> Bool { return intervals.isEmpty } - /// {@inheritDoc} + /// + /// + /// public func getSingleElement() -> Int { - //intervals=nil && intervals.count==1 ) if intervals.count == 1 { - let I: Interval = intervals[0] - if I.a == I.b { - return I.a + let interval = intervals[0] + if interval.a == interval.b { + return interval.a } } return CommonToken.INVALID_TYPE } + /// /// Returns the maximum value contained in the set. - /// + /// /// - returns: the maximum value contained in the set. If the set is empty, this - /// method returns {@link org.antlr.v4.runtime.Token#INVALID_TYPE}. + /// method returns _org.antlr.v4.runtime.Token#INVALID_TYPE_. + /// public func getMaxElement() -> Int { if isNil() { return CommonToken.INVALID_TYPE } - let last: Interval = intervals[intervals.count - 1] + let last = intervals[intervals.count - 1] return last.b } + /// /// Returns the minimum value contained in the set. - /// + /// /// - returns: the minimum value contained in the set. If the set is empty, this - /// method returns {@link org.antlr.v4.runtime.Token#INVALID_TYPE}. + /// method returns _org.antlr.v4.runtime.Token#INVALID_TYPE_. + /// public func getMinElement() -> Int { if isNil() { return CommonToken.INVALID_TYPE @@ -463,43 +464,47 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { return intervals[0].a } + /// /// Return a list of Interval objects. - public func getIntervals() -> Array { + /// + public func getIntervals() -> [Interval] { return intervals } public func hashCode() -> Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() for I: Interval in intervals { hash = MurmurHash.update(hash, I.a) hash = MurmurHash.update(hash, I.b) } - hash = MurmurHash.finish(hash, intervals.count * 2) - return hash + return MurmurHash.finish(hash, intervals.count * 2) } public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() for I: Interval in intervals { hash = MurmurHash.update(hash, I.a) hash = MurmurHash.update(hash, I.b) } - hash = MurmurHash.finish(hash, intervals.count * 2) - return hash + return MurmurHash.finish(hash, intervals.count * 2) } + /// /// Are two IntervalSets equal? Because all intervals are sorted /// and disjoint, equals is a simple linear walk over both lists /// to make sure they are the same. Interval.equals() is used /// by the List.equals() method to check the ranges. + /// + /// /// public func equals(obj : AnyObject) -> Bool { /// if ( obj==nil || !(obj is IntervalSet) ) { /// return false; /// } /// var other : IntervalSet = obj as! IntervalSet; /// return self.intervals.equals(other.intervals); + /// public var description: String { return toString(false) @@ -509,25 +514,21 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } public func toString(_ elemAreChar: Bool) -> String { - let buf: StringBuilder = StringBuilder() - //if ( self.intervals==nil || self.intervals.isEmpty() ) { + let buf = StringBuilder() if self.intervals.isEmpty { return "{}" } if self.size() > 1 { buf.append("{") } - //var iter : Iterator = self.intervals.iterator(); - //while iter.hasNext() { var first = true - for I: Interval in intervals { + for interval in intervals { if !first { buf.append(", ") } first = false - //var I : Interval = iter.next(); - let a: Int = I.a - let b: Int = I.b + let a = interval.a + let b = interval.b if a == b { if a == CommonToken.EOF { buf.append("") @@ -545,9 +546,6 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { buf.append(a).append("..").append(b) } } - //if ( iter.hasNext() ) { - // buf.append(", "); - //} } if self.size() > 1 { buf.append("}") @@ -555,14 +553,8 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { return buf.toString() } - /// - Use {@link #toString(org.antlr.v4.runtime.Vocabulary)} instead. - ////@Deprecated - public func toString(_ tokenNames: [String?]?) -> String { - return toString(Vocabulary.fromTokenNames(tokenNames)) - } - public func toString(_ vocabulary: Vocabulary) -> String { - let buf: StringBuilder = StringBuilder() + let buf = StringBuilder() if self.intervals.isEmpty { return "{}" @@ -572,14 +564,14 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } var first = true - for I: Interval in intervals { + for interval in intervals { if !first { buf.append(", ") } first = false - //var I : Interval = iter.next(); - let a: Int = I.a - let b: Int = I.b + + let a = interval.a + let b = interval.b if a == b { buf.append(elementName(vocabulary, a)) } else { @@ -598,13 +590,6 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { return buf.toString() } - /// - Use {@link #elementName(org.antlr.v4.runtime.Vocabulary, int)} instead. - ////@Deprecated - internal func elementName(_ tokenNames: [String?]?, _ a: Int) -> String { - return elementName(Vocabulary.fromTokenNames(tokenNames), a) - } - - internal func elementName(_ vocabulary: Vocabulary, _ a: Int) -> String { if a == CommonToken.EOF { return "" @@ -619,43 +604,25 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { public func size() -> Int { - var n: Int = 0 - let numIntervals: Int = intervals.count + var n = 0 + let numIntervals = intervals.count if numIntervals == 1 { - let firstInterval: Interval = self.intervals[0] + let firstInterval = intervals[0] return firstInterval.b - firstInterval.a + 1 } for i in 0.. Array { - var values: Array = Array() - let n: Int = intervals.count - for i in 0.. Array { - var values: Array = Array() - let n: Int = intervals.count - for i in 0.. [Int] { + var values = [Int]() + for interval in intervals { + let a = interval.a + let b = interval.b for v in a...b { values.append(v) @@ -665,28 +632,27 @@ public class IntervalSet: IntSet, Hashable, CustomStringConvertible { } public func toSet() -> Set { - var s: Set = Set() - for I: Interval in intervals { - let a: Int = I.a - let b: Int = I.b + var s = Set() + for interval in intervals { + let a = interval.a + let b = interval.b for v in a...b { s.insert(v) - //s.add(v); } } return s } + /// /// Get the ith element of ordered set. Used only by RandomPhrase so /// don't bother to implement if you're not doing that for a new /// ANTLR code gen target. + /// public func get(_ i: Int) -> Int { - let n: Int = intervals.count - var index: Int = 0 - for j in 0.. [Int] { - return toIntegerList() - } - - public func remove(_ el: Int) throws { if readonly { throw ANTLRError.illegalState(msg: "can't alter readonly IntervalSet") } - let n: Int = intervals.count - for i in 0.. a && el < b { // found in this interval - let oldb: Int = I.b - I.b = el - 1 // [a..x-1] + let oldb = interval.b + interval.b = el - 1 // [a..x-1] try add(el + 1, oldb) // add [x+1..b] } } diff --git a/runtime/Swift/Sources/Antlr4/misc/MultiMap.swift b/runtime/Swift/Sources/Antlr4/misc/MultiMap.swift index 14765fb60..e46f6542d 100644 --- a/runtime/Swift/Sources/Antlr4/misc/MultiMap.swift +++ b/runtime/Swift/Sources/Antlr4/misc/MultiMap.swift @@ -1,8 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. - - +/// public class MultiMap { private var mapping = [K: Array < V>]() public func map(_ key: K, _ value: V) { diff --git a/runtime/Swift/Sources/Antlr4/misc/MurmurHash.swift b/runtime/Swift/Sources/Antlr4/misc/MurmurHash.swift index 898c40b4b..1386e8f09 100644 --- a/runtime/Swift/Sources/Antlr4/misc/MurmurHash.swift +++ b/runtime/Swift/Sources/Antlr4/misc/MurmurHash.swift @@ -5,120 +5,162 @@ -/** - * - * @author Sam Harwell - */ +/// +/// https://en.wikipedia.org/wiki/MurmurHash +/// +/// - Author: Sam Harwell +/// public final class MurmurHash { - private static let DEFAULT_SEED: Int = 0 + private static let DEFAULT_SEED: UInt32 = 0 - /** - * Initialize the hash using the default seed value. - * - * @return the intermediate hash value - */ - public static func initialize() -> Int { + private static let c1 = UInt32(0xCC9E2D51) + private static let c2 = UInt32(0x1B873593) + private static let r1 = UInt32(15) + private static let r2 = UInt32(13) + private static let m = UInt32(5) + private static let n = UInt32(0xE6546B64) + + /// + /// Initialize the hash using the default seed value. + /// + /// - Returns: the intermediate hash value + /// + public static func initialize() -> UInt32 { return initialize(DEFAULT_SEED) } - /** - * Initialize the hash using the specified {@code seed}. - * - * @param seed the seed - * @return the intermediate hash value - */ - public static func initialize(_ seed: Int) -> Int { + /// + /// Initialize the hash using the specified `seed`. + /// + /// - Parameter seed: the seed + /// - Returns: the intermediate hash value + /// + public static func initialize(_ seed: UInt32) -> UInt32 { return seed } - /** - * Update the intermediate hash value for the next input {@code value}. - * - * @param hash the intermediate hash value - * @param value the value to add to the current hash - * @return the updated intermediate hash value - */ - public static func update2(_ hashIn: Int, _ value: Int) -> Int { + private static func calcK(_ value: UInt32) -> UInt32 { + var k = value + k = k &* c1 + k = (k << r1) | (k >> (32 - r1)) + k = k &* c2 + return k + } - let c1: Int32 = -862048943//0xCC9E2D51; - let c2: Int32 = 0x1B873593 - let r1: Int32 = 15 - let r2: Int32 = 13 - let m: Int32 = 5 - let n: Int32 = -430675100//0xE6546B64; - - var k: Int32 = Int32(truncatingBitPattern: value) - k = Int32.multiplyWithOverflow(k, c1).0 - // (k,_) = UInt32.multiplyWithOverflow(k, c1) ;//( k * c1); - //TODO: CHECKE >>> - k = (k << r1) | (k >>> (Int32(32) - r1)) //k = (k << r1) | (k >>> (32 - r1)); - //k = UInt32 (truncatingBitPattern:Int64(Int64(k) * Int64(c2)));//( k * c2); - //(k,_) = UInt32.multiplyWithOverflow(k, c2) - k = Int32.multiplyWithOverflow(k, c2).0 - var hash = Int32(hashIn) + /// + /// Update the intermediate hash value for the next input `value`. + /// + /// - Parameter hash: the intermediate hash value + /// - Parameter value: the value to add to the current hash + /// - Returns: the updated intermediate hash value + /// + public static func update2(_ hashIn: UInt32, _ value: Int) -> UInt32 { + let k = calcK(UInt32(truncatingIfNeeded: value)) + var hash = hashIn hash = hash ^ k - hash = (hash << r2) | (hash >>> (Int32(32) - r2))//hash = (hash << r2) | (hash >>> (32 - r2)); - (hash, _) = Int32.multiplyWithOverflow(hash, m) - (hash, _) = Int32.addWithOverflow(hash, n) - //hash = hash * m + n; + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash &* m &+ n // print("murmur update2 : \(hash)") - return Int(hash) + return hash } - /** - * Update the intermediate hash value for the next input {@code value}. - * - * @param hash the intermediate hash value - * @param value the value to add to the current hash - * @return the updated intermediate hash value - */ - public static func update(_ hash: Int, _ value: T?) -> Int { + /// + /// Update the intermediate hash value for the next input `value`. + /// + /// - Parameter hash: the intermediate hash value + /// - Parameter value: the value to add to the current hash + /// - Returns: the updated intermediate hash value + /// + public static func update(_ hash: UInt32, _ value: T?) -> UInt32 { return update2(hash, value != nil ? value!.hashValue : 0) - // return update2(hash, value); } - /** - * Apply the final computation steps to the intermediate value {@code hash} - * to form the final result of the MurmurHash 3 hash function. - * - * @param hash the intermediate hash value - * @param numberOfWords the number of integer values added to the hash - * @return the final hash result - */ - public static func finish(_ hashin: Int, _ numberOfWordsIn: Int) -> Int { - var hash = Int32(hashin) - let numberOfWords = Int32(numberOfWordsIn) - hash = hash ^ Int32.multiplyWithOverflow(numberOfWords, Int32(4)).0 //(numberOfWords * UInt32(4)); - hash = hash ^ (hash >>> Int32(16)) //hash = hash ^ (hash >>> 16); - (hash, _) = Int32.multiplyWithOverflow(hash, Int32(-2048144789))//hash * UInt32(0x85EBCA6B); - hash = hash ^ (hash >>> Int32(13))//hash = hash ^ (hash >>> 13); - //hash = UInt32(truncatingBitPattern: UInt64(hash) * UInt64(0xC2B2AE35)) ; - (hash, _) = Int32.multiplyWithOverflow(hash, Int32(-1028477387)) - hash = hash ^ (hash >>> Int32(16))// hash = hash ^ (hash >>> 16); + /// + /// Apply the final computation steps to the intermediate value `hash` + /// to form the final result of the MurmurHash 3 hash function. + /// + /// - Parameter hash: the intermediate hash value + /// - Parameter numberOfWords: the number of UInt32 values added to the hash + /// - Returns: the final hash result + /// + public static func finish(_ hashin: UInt32, _ numberOfWords: Int) -> Int { + return Int(finish(hashin, byteCount: (numberOfWords &* 4))) + } + + private static func finish(_ hashin: UInt32, byteCount byteCountInt: Int) -> UInt32 { + let byteCount = UInt32(truncatingIfNeeded: byteCountInt) + var hash = hashin + hash ^= byteCount + hash ^= (hash >> 16) + hash = hash &* 0x85EBCA6B + hash ^= (hash >> 13) + hash = hash &* 0xC2B2AE35 + hash ^= (hash >> 16) //print("murmur finish : \(hash)") - return Int(hash) + return hash } - /** - * Utility function to compute the hash code of an array using the - * MurmurHash algorithm. - * - * @param the array element type - * @param data the array data - * @param seed the seed for the MurmurHash algorithm - * @return the hash code of the data - */ + /// + /// Utility function to compute the hash code of an array using the + /// MurmurHash algorithm. + /// + /// - Parameter : the array element type + /// - Parameter data: the array data + /// - Parameter seed: the seed for the MurmurHash algorithm + /// - Returns: the hash code of the data + /// public static func hashCode(_ data: [T], _ seed: Int) -> Int { - var hash: Int = initialize(seed) - for value: T in data { - //var hashValue = value != nil ? value.hashValue : 0 - hash = update(hash, value.hashValue) + var hash = initialize(UInt32(truncatingIfNeeded: seed)) + for value in data { + hash = update(hash, value) } - hash = finish(hash, data.count) - return hash + return finish(hash, data.count) + } + + /// + /// Compute a hash for the given String and seed. The String is encoded + /// using UTF-8, then the bytes are interpreted as unsigned 32-bit + /// little-endian values, giving UInt32 values for the update call. + /// + /// If the bytes do not evenly divide by 4, the final bytes are treated + /// slightly differently (not doing the final rotate / multiply / add). + /// + /// This matches the treatment of byte sequences in publicly available + /// test patterns (see MurmurHashTests.swift) and the example code on + /// Wikipedia. + /// + public static func hashString(_ s: String, _ seed: UInt32) -> UInt32 { + let bytes = Array(s.utf8) + return hashBytesLittleEndian(bytes, seed) + } + + private static func hashBytesLittleEndian(_ bytes: [UInt8], _ seed: UInt32) -> UInt32 { + let byteCount = bytes.count + + var hash = seed + for i in stride(from: 0, to: byteCount - 3, by: 4) { + var word = UInt32(bytes[i]) + word |= UInt32(bytes[i + 1]) << 8 + word |= UInt32(bytes[i + 2]) << 16 + word |= UInt32(bytes[i + 3]) << 24 + + hash = update(hash, word) + } + let remaining = byteCount & 3 + if remaining != 0 { + var lastWord = UInt32(0) + for r in 0 ..< remaining { + lastWord |= UInt32(bytes[byteCount - 1 - r]) << (8 * (remaining - 1 - r)) + } + + let k = calcK(lastWord) + hash ^= k + } + + return finish(hash, byteCount: byteCount) } private init() { diff --git a/runtime/Swift/Sources/Antlr4/misc/Triple.swift b/runtime/Swift/Sources/Antlr4/misc/Triple.swift index 9864d9bed..a05366038 100644 --- a/runtime/Swift/Sources/Antlr4/misc/Triple.swift +++ b/runtime/Swift/Sources/Antlr4/misc/Triple.swift @@ -1,7 +1,8 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ +/// +/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/// Use of this file is governed by the BSD 3-clause license that +/// can be found in the LICENSE.txt file in the project root. +/// public class Triple: Hashable, CustomStringConvertible { @@ -15,7 +16,7 @@ public class Triple: Hashable, CustomStringC self.c = c } public var hashValue: Int { - var hash: Int = MurmurHash.initialize() + var hash = MurmurHash.initialize() hash = MurmurHash.update(hash, a) hash = MurmurHash.update(hash, b) hash = MurmurHash.update(hash, c) diff --git a/runtime/Swift/Sources/Antlr4/misc/Utils.swift b/runtime/Swift/Sources/Antlr4/misc/Utils.swift index d8ad9bfd7..c025becdc 100644 --- a/runtime/Swift/Sources/Antlr4/misc/Utils.swift +++ b/runtime/Swift/Sources/Antlr4/misc/Utils.swift @@ -1,7 +1,8 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ +/// +/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/// Use of this file is governed by the BSD 3-clause license that +/// can be found in the LICENSE.txt file in the project root. +/// import Foundation @@ -33,19 +34,6 @@ public class Utils { } - public static func writeFile(_ fileName: String, _ content: String, _ encoding: String.Encoding = String.Encoding.utf8) { - - //writing - do { - try content.write(toFile: fileName, atomically: false, encoding: encoding) - } catch { - /* error handling here */ - RuntimeException(" write file fail \(error)") - } - - } - - public static func readFile(_ path: String, _ encoding: String.Encoding = String.Encoding.utf8) -> [Character] { var fileContents: String @@ -59,35 +47,6 @@ public class Utils { return Array(fileContents.characters) } - public static func readFile2String(_ fileName: String, _ encoding: String.Encoding = String.Encoding.utf8) -> String { - let path = Bundle.main.path(forResource: fileName, ofType: nil) - if path == nil { - return "" - } - - var fileContents: String? = nil - do { - fileContents = try String(contentsOfFile: path!, encoding: encoding) - } catch { - return "" - } - - return fileContents ?? "" - } - - public static func readFile2StringByPath(_ path: String, _ encoding: String.Encoding = String.Encoding.utf8) -> String { - - var fileContents: String? = nil - - do { - fileContents = try String(contentsOfFile: path, encoding: String.Encoding.utf8) - } catch { - return "" - } - - return fileContents ?? "" - } - public static func toMap(_ keys: [String]) -> Dictionary { var m = Dictionary() for (index,v) in keys.enumerated() { diff --git a/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRError.swift b/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRError.swift index 2b013958e..c9df3022e 100644 --- a/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRError.swift +++ b/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRError.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // ANTLRError.swift @@ -12,7 +14,6 @@ import Foundation public enum ANTLRError: Error { - case nullPointer(msg:String) case unsupportedOperation(msg:String) case indexOutOfBounds(msg:String) case illegalState(msg:String) diff --git a/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRException.swift b/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRException.swift index 3f7d65f73..c4453384d 100644 --- a/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRException.swift +++ b/runtime/Swift/Sources/Antlr4/misc/exception/ANTLRException.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // ANTLRException.swift @@ -14,5 +16,5 @@ import Foundation public enum ANTLRException: Error { case cannotInvokeStartRule - case recognition(e:AnyObject) + case recognition(e: RecognitionException) } diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/ArrayExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/ArrayExtension.swift index 310de7b6f..294b1ba1c 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/ArrayExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/ArrayExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// import Foundation @@ -29,15 +31,19 @@ extension Array { } + /// /// Removes the last element from self and returns it. - /// + /// /// :returns: The removed element + /// mutating func pop() -> Element { return removeLast() } + /// /// Same as append. - /// + /// /// :param: newElement Element to append + /// mutating func push(_ newElement: Element) { return append(newElement) } @@ -53,10 +59,12 @@ extension Array { } + /// /// Checks if test returns true for all the elements in self - /// + /// /// :param: test Function to call for each element /// :returns: True if test returns true for all the elements in self + /// func every(_ test: (Element) -> Bool) -> Bool { for item in self { if !test(item) { @@ -67,10 +75,12 @@ extension Array { return true } + /// /// Checks if test returns true for any element of self. - /// + /// /// :param: test Function to call for each element /// :returns: true if test returns true for any element of self + /// func any(_ test: (Element) -> Bool) -> Bool { for item in self { if test(item) { @@ -83,11 +93,13 @@ extension Array { + /// /// slice array /// :param: index slice index /// :param: isClose is close array /// :param: first First array /// :param: second Second array + /// //func slice(startIndex startIndex:Int, endIndex:Int) -> Slice { func slice(startIndex: Int, endIndex: Int) -> ArraySlice { diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/CharacterExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/CharacterExtension.swift index e2ad6f2a7..154d23434 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/CharacterExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/CharacterExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // CharacterEextension.swift diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/IntStreamExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/IntStreamExtension.swift index a0709f1c4..b77037d94 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/IntStreamExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/IntStreamExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // IntStreamExtension.swift @@ -13,14 +15,18 @@ import Foundation extension IntStream { - /// The value returned by {@link #LA LA()} when the end of the stream is + /// + /// The value returned by _#LA LA()_ when the end of the stream is /// reached. + /// public static var EOF: Int { return -1 } - /// The value returned by {@link #getSourceName} when the actual name of the + /// + /// The value returned by _#getSourceName_ when the actual name of the /// underlying source is not known. + /// public static var UNKNOWN_SOURCE_NAME: String { return "" } diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/NSUUIDExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/NSUUIDExtension.swift index 460790e84..3f8a4d5d2 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/NSUUIDExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/NSUUIDExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // NSUUIDExtension.swift @@ -33,7 +35,6 @@ extension UUID { let intLiteral = hi | (val & (hi - 1)) let s: String = String(Character(UnicodeScalar(UInt32(intLiteral))!)) return s[1 ..< s.length] - // return s.substringFromIndex(1) } diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/StringExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/StringExtension.swift index a5f8ebea3..1928be7f9 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/StringExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/StringExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// import Foundation @@ -9,14 +11,6 @@ import Foundation extension String { - func split(_ separator: String) -> [String] { - return self.components(separatedBy: separator) - } - - func containsIgnoreCase(_ find: String) -> Bool { - return self.lowercased().range(of: find.lowercased()) != nil - } - var length: Int { return self.characters.count } @@ -58,32 +52,6 @@ extension String { return index } - func substringAfter(_ string: String) -> String { - if let range = self.range(of: string) { - let intIndex: Int = self.characters.distance(from: self.startIndex, to: range.upperBound) - return self.substring(from: self.characters.index(self.startIndex, offsetBy: intIndex)) - } - return self - - } - - var lowercaseFirstChar: String { - var result = self - if self.length > 0 { - let startIndex = self.startIndex - result.replaceSubrange(startIndex ... startIndex, with: String(self[startIndex]).lowercased()) - } - return result - } - func substringWithRange(_ range: Range) -> String { - - - let start = self.characters.index(self.startIndex, offsetBy: range.lowerBound) - - let end = self.characters.index(self.startIndex, offsetBy: range.upperBound) - return self.substring(with: start ..< end) - } - subscript(integerIndex: Int) -> Character { let index = characters.index(startIndex, offsetBy: integerIndex) return self[index] @@ -93,116 +61,7 @@ extension String { let start = characters.index(startIndex, offsetBy: integerRange.lowerBound) let end = characters.index(startIndex, offsetBy: integerRange.upperBound) let range = start ..< end - return self[range] - } - - func charAt(_ index: Int) -> Character { - return self[self.characters.index(self.startIndex, offsetBy: index)] - } - -} - -// Mapping from XML/HTML character entity reference to character -// From http://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references -private let characterEntities: [String:Character] = [ - // XML predefined entities: - """: "\"", - "&": "&", - "'": "'", - "<": "<", - ">": ">", - - // HTML character entity references: - " ": "\u{00a0}", - // ... - "♦": "♦", -] - -extension String { - - /// Returns a new string made by replacing in the `String` - /// all HTML character entity references with the corresponding - /// character. - var stringByDecodingHTMLEntities: String { - - - // Convert the number in the string to the corresponding - // Unicode character, e.g. - // decodeNumeric("64", 10) --> "@" - // decodeNumeric("20ac", 16) --> "€" - func decodeNumeric(_ string: String, base: Int32) -> Character? { - let code = UInt32(strtoul(string, nil, base)) - return Character(UnicodeScalar(code)!) - } - - // Decode the HTML character entity to the corresponding - // Unicode character, return `nil` for invalid input. - // decode("@") --> "@" - // decode("€") --> "€" - // decode("<") --> "<" - // decode("&foo;") --> nil - func decode(_ entity: String) -> Character? { - - if entity.hasPrefix("&#x") || entity.hasPrefix("&#X") { - return decodeNumeric(entity.substring(from: entity.characters.index(entity.startIndex, offsetBy: 3)), base: 16) - } else if entity.hasPrefix("&#") { - return decodeNumeric(entity.substring(from: entity.characters.index(entity.startIndex, offsetBy: 2)), base: 10) - } else { - return characterEntities[entity] - } - } - - var result = "" - var position = startIndex - - // Find the next '&' and copy the characters preceding it to `result`: - while let ampRange = self.range(of: "&", range: position ..< endIndex) { - result.append(self[position ..< ampRange.lowerBound]) - position = ampRange.lowerBound - - // Find the next ';' and copy everything from '&' to ';' into `entity` - if let semiRange = self.range(of: ";", range: position ..< endIndex) { - let entity = self[position ..< semiRange.upperBound] - position = semiRange.upperBound - - if let decoded = decode(entity) { - // Replace by decoded character: - result.append(decoded) - } else { - // Invalid entity, copy verbatim: - result.append(entity) - } - } else { - // No matching ';'. - break - } - } - // Copy remaining characters to `result`: - result.append(self[position ..< endIndex]) - return result + return String(self[range]) } } -extension String { - - static let htmlEscapedDictionary = [ - "&": "&", - """: "\"", - "'": "'", - "9": "'", - "’": "'", - "–": "'", - ">": ">", - "<": "<" - ] - - public var escapedHtmlString: String { - var newString = "\(self)" - - for (key, value) in String.htmlEscapedDictionary { - newString = newString.replacingOccurrences(of: value, with: key) - } - return newString - } - -} diff --git a/runtime/Swift/Sources/Antlr4/misc/extension/TokenExtension.swift b/runtime/Swift/Sources/Antlr4/misc/extension/TokenExtension.swift index b25a77358..aa634227a 100644 --- a/runtime/Swift/Sources/Antlr4/misc/extension/TokenExtension.swift +++ b/runtime/Swift/Sources/Antlr4/misc/extension/TokenExtension.swift @@ -1,6 +1,8 @@ +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. +/// // // TokenExtension.swift @@ -17,9 +19,10 @@ extension Token { return 0 } + /// /// During lookahead operations, this "token" signifies we hit rule end ATN state /// and did not follow it despite needing to. - + /// static public var EPSILON: Int { return -2 } @@ -32,30 +35,35 @@ extension Token { static public var EOF: Int { return -1 } - //IntStream.EOF + + /// /// All tokens go to the parser (unless skip() is called in that rule) /// on a particular "channel". The parser tunes to a particular channel /// so that whitespace etc... can go to the parser on a "hidden" channel. - + /// static public var DEFAULT_CHANNEL: Int { return 0 } + + /// /// Anything on different channel than DEFAULT_CHANNEL is not parsed /// by parser. - + /// static public var HIDDEN_CHANNEL: Int { return 1 } + + /// /// This is the minimum constant value which can be assigned to a /// user-defined token channel. - /// - ///

      - /// The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are - /// assigned to the predefined channels {@link #DEFAULT_CHANNEL} and - /// {@link #HIDDEN_CHANNEL}.

      - /// + /// + /// + /// The non-negative numbers less than _#MIN_USER_CHANNEL_VALUE_ are + /// assigned to the predefined channels _#DEFAULT_CHANNEL_ and + /// _#HIDDEN_CHANNEL_. + /// /// - seealso: org.antlr.v4.runtime.Token#getChannel() - + /// static public var MIN_USER_CHANNEL_VALUE: Int { return 2 } diff --git a/runtime/Swift/Sources/Antlr4/misc/utils/CommonUtil.swift b/runtime/Swift/Sources/Antlr4/misc/utils/CommonUtil.swift index f1126c589..ccb819a37 100644 --- a/runtime/Swift/Sources/Antlr4/misc/utils/CommonUtil.swift +++ b/runtime/Swift/Sources/Antlr4/misc/utils/CommonUtil.swift @@ -70,17 +70,6 @@ func log(_ message: String = "", file: String = #file, function: String = #funct // #endif } -func RuntimeException(_ message: String = "", file: String = #file, function: String = #function, lineNum: Int = #line) { - // #if DEBUG - let info = "FILE: \(URL(fileURLWithPath: file).pathComponents.last!),FUNC: \(function), LINE: \(lineNum) MESSAGE: \(message)" - // #else - // let info = "FILE: \(NSURL(fileURLWithPath: file).pathComponents!.last!),FUNC: \(function), LINE: \(lineNum) MESSAGE: \(message)" - // #endif - - fatalError(info) - -} - func toInt(_ c: Character) -> Int { return c.unicodeValue } diff --git a/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift b/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift index 84bdafe28..70c3ab9df 100644 --- a/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift +++ b/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift @@ -1,23 +1,31 @@ import Foundation +/// /// Using class so it can be shared even if /// it appears to be a field in a class. +/// class Mutex { + /// /// The mutex instance. + /// private var mutex = pthread_mutex_t() + /// /// Initialization + /// init() { pthread_mutex_init(&mutex, nil) } + /// /// Running the supplied closure synchronously. - /// + /// /// - Parameter closure: the closure to run /// - Returns: the value returned by the closure /// - Throws: the exception populated by the closure run + /// @discardableResult func synchronized(closure: () throws -> R) rethrows -> R { pthread_mutex_lock(&mutex) diff --git a/runtime/Swift/Sources/Antlr4/tree/AbstractParseTreeVisitor.swift b/runtime/Swift/Sources/Antlr4/tree/AbstractParseTreeVisitor.swift index 651ce5547..689e2d7db 100644 --- a/runtime/Swift/Sources/Antlr4/tree/AbstractParseTreeVisitor.swift +++ b/runtime/Swift/Sources/Antlr4/tree/AbstractParseTreeVisitor.swift @@ -10,31 +10,31 @@ open class AbstractParseTreeVisitor: ParseTreeVisitor { } - /** - * {@inheritDoc} - * - *

      The default implementation calls {@link org.antlr.v4.runtime.tree.ParseTree#accept} on the - * specified tree.

      - */ + /// + /// + /// + /// The default implementation calls _org.antlr.v4.runtime.tree.ParseTree#accept_ on the + /// specified tree. + /// open override func visit(_ tree: ParseTree) -> T? { return tree.accept(self) } - /** - * {@inheritDoc} - * - *

      The default implementation initializes the aggregate result to - * {@link #defaultResult defaultResult()}. Before visiting each child, it - * calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result - * is {@code false} no more children are visited and the current aggregate - * result is returned. After visiting a child, the aggregate result is - * updated by calling {@link #aggregateResult aggregateResult} with the - * previous aggregate result and the result of visiting the child.

      - * - *

      The default implementation is not safe for use in visitors that modify - * the tree structure. Visitors that modify the tree should override this - * method to behave properly in respect to the specific algorithm in use.

      - */ + /// + /// + /// + /// The default implementation initializes the aggregate result to + /// _#defaultResult defaultResult()_. Before visiting each child, it + /// calls _#shouldVisitNextChild shouldVisitNextChild_; if the result + /// is `false` no more children are visited and the current aggregate + /// result is returned. After visiting a child, the aggregate result is + /// updated by calling _#aggregateResult aggregateResult_ with the + /// previous aggregate result and the result of visiting the child. + /// + /// The default implementation is not safe for use in visitors that modify + /// the tree structure. Visitors that modify the tree should override this + /// method to behave properly in respect to the specific algorithm in use. + /// open override func visitChildren(_ node: RuleNode) -> T? { var result: T? = defaultResult() let n: Int = node.getChildCount() @@ -52,89 +52,89 @@ open class AbstractParseTreeVisitor: ParseTreeVisitor { return result } - /** - * {@inheritDoc} - * - *

      The default implementation returns the result of - * {@link #defaultResult defaultResult}.

      - */ + /// + /// + /// + /// The default implementation returns the result of + /// _#defaultResult defaultResult_. + /// open override func visitTerminal(_ node: TerminalNode) -> T? { return defaultResult() } - /** - * {@inheritDoc} - * - *

      The default implementation returns the result of - * {@link #defaultResult defaultResult}.

      - */ + /// + /// + /// + /// The default implementation returns the result of + /// _#defaultResult defaultResult_. + /// override open func visitErrorNode(_ node: ErrorNode) -> T? { return defaultResult() } - /** - * Gets the default value returned by visitor methods. This value is - * returned by the default implementations of - * {@link #visitTerminal visitTerminal}, {@link #visitErrorNode visitErrorNode}. - * The default implementation of {@link #visitChildren visitChildren} - * initializes its aggregate result to this value. - * - *

      The base implementation returns {@code null}.

      - * - * @return The default value returned by visitor methods. - */ + /// + /// Gets the default value returned by visitor methods. This value is + /// returned by the default implementations of + /// _#visitTerminal visitTerminal_, _#visitErrorNode visitErrorNode_. + /// The default implementation of _#visitChildren visitChildren_ + /// initializes its aggregate result to this value. + /// + /// The base implementation returns `null`. + /// + /// - Returns: The default value returned by visitor methods. + /// open func defaultResult() -> T? { return nil } - /** - * Aggregates the results of visiting multiple children of a node. After - * either all children are visited or {@link #shouldVisitNextChild} returns - * {@code false}, the aggregate value is returned as the result of - * {@link #visitChildren}. - * - *

      The default implementation returns {@code nextResult}, meaning - * {@link #visitChildren} will return the result of the last child visited - * (or return the initial value if the node has no children).

      - * - * @param aggregate The previous aggregate value. In the default - * implementation, the aggregate value is initialized to - * {@link #defaultResult}, which is passed as the {@code aggregate} argument - * to this method after the first child node is visited. - * @param nextResult The result of the immediately preceeding call to visit - * a child node. - * - * @return The updated aggregate result. - */ + /// + /// Aggregates the results of visiting multiple children of a node. After + /// either all children are visited or _#shouldVisitNextChild_ returns + /// `false`, the aggregate value is returned as the result of + /// _#visitChildren_. + /// + /// The default implementation returns `nextResult`, meaning + /// _#visitChildren_ will return the result of the last child visited + /// (or return the initial value if the node has no children). + /// + /// - Parameter aggregate: The previous aggregate value. In the default + /// implementation, the aggregate value is initialized to + /// _#defaultResult_, which is passed as the `aggregate` argument + /// to this method after the first child node is visited. + /// - Parameter nextResult: The result of the immediately preceeding call to visit + /// a child node. + /// + /// - Returns: The updated aggregate result. + /// open func aggregateResult(_ aggregate: T?, _ nextResult: T?) -> T? { return nextResult } - /** - * This method is called after visiting each child in - * {@link #visitChildren}. This method is first called before the first - * child is visited; at that point {@code currentResult} will be the initial - * value (in the default implementation, the initial value is returned by a - * call to {@link #defaultResult}. This method is not called after the last - * child is visited. - * - *

      The default implementation always returns {@code true}, indicating that - * {@code visitChildren} should only return after all children are visited. - * One reason to override this method is to provide a "short circuit" - * evaluation option for situations where the result of visiting a single - * child has the potential to determine the result of the visit operation as - * a whole.

      - * - * @param node The {@link org.antlr.v4.runtime.tree.RuleNode} whose children are currently being - * visited. - * @param currentResult The current aggregate result of the children visited - * to the current point. - * - * @return {@code true} to continue visiting children. Otherwise return - * {@code false} to stop visiting children and immediately return the - * current aggregate result from {@link #visitChildren}. - */ + /// + /// This method is called after visiting each child in + /// _#visitChildren_. This method is first called before the first + /// child is visited; at that point `currentResult` will be the initial + /// value (in the default implementation, the initial value is returned by a + /// call to _#defaultResult_. This method is not called after the last + /// child is visited. + /// + /// The default implementation always returns `true`, indicating that + /// `visitChildren` should only return after all children are visited. + /// One reason to override this method is to provide a "short circuit" + /// evaluation option for situations where the result of visiting a single + /// child has the potential to determine the result of the visit operation as + /// a whole. + /// + /// - Parameter node: The _org.antlr.v4.runtime.tree.RuleNode_ whose children are currently being + /// visited. + /// - Parameter currentResult: The current aggregate result of the children visited + /// to the current point. + /// + /// - Returns: `true` to continue visiting children. Otherwise return + /// `false` to stop visiting children and immediately return the + /// current aggregate result from _#visitChildren_. + /// open func shouldVisitNextChild(_ node: RuleNode, _ currentResult: T?) -> Bool { return true } diff --git a/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift b/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift index 9b896fd74..e9075b006 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ErrorNode.swift @@ -4,14 +4,12 @@ */ -/** Represents a token that was consumed during resynchronization - * rather than during a valid match operation. For example, - * we will create this kind of a node during single token insertion - * and deletion as well as during "consume until error recovery set" - * upon no viable alternative exceptions. - */ -//public class ErrorNodeImpl : TerminalNodeImpl,ErrorNode{ - +/// Represents a token that was consumed during resynchronization +/// rather than during a valid match operation. For example, +/// we will create this kind of a node during single token insertion +/// and deletion as well as during "consume until error recovery set" +/// upon no viable alternative exceptions. +/// public class ErrorNode: TerminalNodeImpl { public override init(_ token: Token) { super.init(token) diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTree.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTree.swift index 7ef4d7a2b..dfddc4c67 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ParseTree.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTree.swift @@ -4,88 +4,65 @@ */ -/** An interface to access the tree of {@link org.antlr.v4.runtime.RuleContext} objects created - * during a parse that makes the data structure look like a simple parse tree. - * This node represents both internal nodes, rule invocations, - * and leaf nodes, token matches. - * - *

      The payload is either a {@link org.antlr.v4.runtime.Token} or a {@link org.antlr.v4.runtime.RuleContext} object.

      - */ -//public protocol ParseTree : SyntaxTree { +/// An interface to access the tree of _org.antlr.v4.runtime.RuleContext_ objects created +/// during a parse that makes the data structure look like a simple parse tree. +/// This node represents both internal nodes, rule invocations, +/// and leaf nodes, token matches. +/// +/// The payload is either a _org.antlr.v4.runtime.Token_ or a _org.antlr.v4.runtime.RuleContext_ object. +/// +open class ParseTree: SyntaxTree, CustomStringConvertible, CustomDebugStringConvertible { -open class ParseTree: SyntaxTree, CustomStringConvertible , CustomDebugStringConvertible { - - // the following methods narrow the return type; they are not additional methods - - //func getParent() -> ParseTree? - - //func getChild(i : Int) -> ParseTree? - - /** The {@link org.antlr.v4.runtime.tree.ParseTreeVisitor} needs a double dispatch method. */ + /// The _org.antlr.v4.runtime.tree.ParseTreeVisitor_ needs a double dispatch method. open func accept(_ visitor: ParseTreeVisitor) -> T? { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } - /** Return the combined text of all leaf nodes. Does not get any - * off-channel tokens (if any) so won't return whitespace and - * comments if they are sent to parser on hidden channel. - */ + /// Return the combined text of all leaf nodes. Does not get any + /// off-channel tokens (if any) so won't return whitespace and + /// comments if they are sent to parser on hidden channel. + /// open func getText() -> String { - RuntimeException(" must overriden !") - return "" + fatalError(#function + " must be overridden") } - /** Specialize toStringTree so that it can print out more information - * based upon the parser. - */ + + /// Specialize toStringTree so that it can print out more information + /// based upon the parser. + /// open func toStringTree(_ parser: Parser) -> String { - RuntimeException(" must overriden !") - return "" - + fatalError(#function + " must be overridden") } - open func getSourceInterval() -> Interval { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } - open func getParent() -> Tree? { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } open func getPayload() -> AnyObject { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } open func getChild(_ i: Int) -> Tree? { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } - open func getChildCount() -> Int { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } open func toStringTree() -> String { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } - open var description: String { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } open var debugDescription: String { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTreeListener.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTreeListener.swift index e427352c2..c08cfa1fb 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ParseTreeListener.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTreeListener.swift @@ -5,17 +5,17 @@ -/** This interface describes the minimal core of methods triggered - * by {@link org.antlr.v4.runtime.tree.ParseTreeWalker}. E.g., - * - * ParseTreeWalker walker = new ParseTreeWalker(); - * walker.walk(myParseTreeListener, myParseTree); <-- triggers events in your listener - * - * If you want to trigger events in multiple listeners during a single - * tree walk, you can use the ParseTreeDispatcher object available at - * - * https://github.com/antlr/antlr4/issues/841 - */ +/// This interface describes the minimal core of methods triggered +/// by _org.antlr.v4.runtime.tree.ParseTreeWalker_. E.g., +/// +/// ParseTreeWalker walker = new ParseTreeWalker(); +/// walker.walk(myParseTreeListener, myParseTree); <-- triggers events in your listener +/// +/// If you want to trigger events in multiple listeners during a single +/// tree walk, you can use the ParseTreeDispatcher object available at +/// +/// https://github.com/antlr/antlr4/issues/841 +/// public protocol ParseTreeListener: class { func visitTerminal(_ node: TerminalNode) diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTreeVisitor.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTreeVisitor.swift index 3d02d85a3..5d1a48525 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ParseTreeVisitor.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTreeVisitor.swift @@ -5,14 +5,14 @@ -/** - * This interface defines the basic notion of a parse tree visitor. Generated - * visitors implement this interface and the {@code XVisitor} interface for - * grammar {@code X}. - * - * @param The return type of the visit operation. Use {@link Void} for - * operations with no return type. - */ +/// +/// This interface defines the basic notion of a parse tree visitor. Generated +/// visitors implement this interface and the `XVisitor` interface for +/// grammar `X`. +/// +/// - Parameter : The return type of the visit operation. Use _Void_ for +/// operations with no return type. +/// open class ParseTreeVisitor { @@ -20,51 +20,44 @@ open class ParseTreeVisitor { } // typealias T - /** - * Visit a parse tree, and return a user-defined result of the operation. - * - * @param tree The {@link org.antlr.v4.runtime.tree.ParseTree} to visit. - * @return The result of visiting the parse tree. - */ + /// + /// Visit a parse tree, and return a user-defined result of the operation. + /// + /// - Parameter tree: The _org.antlr.v4.runtime.tree.ParseTree_ to visit. + /// - Returns: The result of visiting the parse tree. + /// open func visit(_ tree: ParseTree) -> T? { - RuntimeException(" must overriden !") - return nil - + fatalError(#function + " must be overridden") } - /** - * Visit the children of a node, and return a user-defined result of the - * operation. - * - * @param node The {@link org.antlr.v4.runtime.tree.RuleNode} whose children should be visited. - * @return The result of visiting the children of the node. - */ + /// + /// Visit the children of a node, and return a user-defined result of the + /// operation. + /// + /// - Parameter node: The _org.antlr.v4.runtime.tree.RuleNode_ whose children should be visited. + /// - Returns: The result of visiting the children of the node. + /// open func visitChildren(_ node: RuleNode) -> T? { - RuntimeException(" must overriden !") - return nil - + fatalError(#function + " must be overridden") } - /** - * Visit a terminal node, and return a user-defined result of the operation. - * - * @param node The {@link org.antlr.v4.runtime.tree.TerminalNode} to visit. - * @return The result of visiting the node. - */ + /// + /// Visit a terminal node, and return a user-defined result of the operation. + /// + /// - Parameter node: The _org.antlr.v4.runtime.tree.TerminalNode_ to visit. + /// - Returns: The result of visiting the node. + /// open func visitTerminal(_ node: TerminalNode) -> T? { - RuntimeException(" must overriden !") - return nil - + fatalError(#function + " must be overridden") } - /** - * Visit an error node, and return a user-defined result of the operation. - * - * @param node The {@link org.antlr.v4.runtime.tree.ErrorNode} to visit. - * @return The result of visiting the node. - */ + /// + /// Visit an error node, and return a user-defined result of the operation. + /// + /// - Parameter node: The _org.antlr.v4.runtime.tree.ErrorNode_ to visit. + /// - Returns: The result of visiting the node. + /// open func visitErrorNode(_ node: ErrorNode) -> T? { - RuntimeException(" must overriden !") - return nil + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift b/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift index 35c71ba1f..c93874561 100644 --- a/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift +++ b/runtime/Swift/Sources/Antlr4/tree/ParseTreeWalker.swift @@ -29,12 +29,12 @@ public class ParseTreeWalker { try exitRule(listener, r) } - /** - * The discovery of a rule node, involves sending two events: the generic - * {@link org.antlr.v4.runtime.tree.ParseTreeListener#enterEveryRule} and a - * {@link org.antlr.v4.runtime.RuleContext}-specific event. First we trigger the generic and then - * the rule specific. We to them in reverse order upon finishing the node. - */ + /// + /// The discovery of a rule node, involves sending two events: the generic + /// _org.antlr.v4.runtime.tree.ParseTreeListener#enterEveryRule_ and a + /// _org.antlr.v4.runtime.RuleContext_-specific event. First we trigger the generic and then + /// the rule specific. We to them in reverse order upon finishing the node. + /// internal func enterRule(_ listener: ParseTreeListener, _ r: RuleNode) throws { let ctx: ParserRuleContext = r.getRuleContext() as! ParserRuleContext try listener.enterEveryRule(ctx) diff --git a/runtime/Swift/Sources/Antlr4/tree/RuleNode.swift b/runtime/Swift/Sources/Antlr4/tree/RuleNode.swift index e85dd8342..35af9c779 100644 --- a/runtime/Swift/Sources/Antlr4/tree/RuleNode.swift +++ b/runtime/Swift/Sources/Antlr4/tree/RuleNode.swift @@ -6,7 +6,6 @@ open class RuleNode: ParseTree { open func getRuleContext() -> RuleContext { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/tree/SyntaxTree.swift b/runtime/Swift/Sources/Antlr4/tree/SyntaxTree.swift index 34c42202b..623124548 100644 --- a/runtime/Swift/Sources/Antlr4/tree/SyntaxTree.swift +++ b/runtime/Swift/Sources/Antlr4/tree/SyntaxTree.swift @@ -4,20 +4,20 @@ */ -/** A tree that knows about an interval in a token stream - * is some kind of syntax tree. Subinterfaces distinguish - * between parse trees and other kinds of syntax trees we might want to create. - */ +/// A tree that knows about an interval in a token stream +/// is some kind of syntax tree. Subinterfaces distinguish +/// between parse trees and other kinds of syntax trees we might want to create. +/// public protocol SyntaxTree: Tree { - /** - * Return an {@link org.antlr.v4.runtime.misc.Interval} indicating the index in the - * {@link org.antlr.v4.runtime.TokenStream} of the first and last token associated with this - * subtree. If this node is a leaf, then the interval represents a single - * token. - * - *

      If source interval is unknown, this returns {@link org.antlr.v4.runtime.misc.Interval#INVALID}.

      - */ + /// + /// Return an _org.antlr.v4.runtime.misc.Interval_ indicating the index in the + /// _org.antlr.v4.runtime.TokenStream_ of the first and last token associated with this + /// subtree. If this node is a leaf, then the interval represents a single + /// token. + /// + /// If source interval is unknown, this returns _org.antlr.v4.runtime.misc.Interval#INVALID_. + /// func getSourceInterval() -> Interval } diff --git a/runtime/Swift/Sources/Antlr4/tree/TerminalNode.swift b/runtime/Swift/Sources/Antlr4/tree/TerminalNode.swift index 26b96c64d..d2d9d9d5f 100644 --- a/runtime/Swift/Sources/Antlr4/tree/TerminalNode.swift +++ b/runtime/Swift/Sources/Antlr4/tree/TerminalNode.swift @@ -5,22 +5,19 @@ public class TerminalNode: ParseTree { public func getSymbol() -> Token? { - RuntimeException(" must overriden !") - fatalError() - + fatalError(#function + " must be overridden") } - /** Set the parent for this leaf node. - * - * Technically, this is not backward compatible as it changes - * the interface but no one was able to create custom - * TerminalNodes anyway so I'm adding as it improves internal - * code quality. - * - * @since 4.7 - */ + /// Set the parent for this leaf node. + /// + /// Technically, this is not backward compatible as it changes + /// the interface but no one was able to create custom + /// TerminalNodes anyway so I'm adding as it improves internal + /// code quality. + /// + /// - Since: 4.7 + /// public func setParent(_ parent: RuleContext) { - RuntimeException(" must overriden !") - fatalError() + fatalError(#function + " must be overridden") } } diff --git a/runtime/Swift/Sources/Antlr4/tree/TerminalNodeImpl.swift b/runtime/Swift/Sources/Antlr4/tree/TerminalNodeImpl.swift index 8f5b0020d..4eab9bafc 100644 --- a/runtime/Swift/Sources/Antlr4/tree/TerminalNodeImpl.swift +++ b/runtime/Swift/Sources/Antlr4/tree/TerminalNodeImpl.swift @@ -6,7 +6,7 @@ public class TerminalNodeImpl: TerminalNode { public var symbol: Token - public var parent: ParseTree? + public weak var parent: ParseTree? public init(_ symbol: Token) { self.symbol = symbol diff --git a/runtime/Swift/Sources/Antlr4/tree/Tree.swift b/runtime/Swift/Sources/Antlr4/tree/Tree.swift index 124116c11..68bb5628b 100644 --- a/runtime/Swift/Sources/Antlr4/tree/Tree.swift +++ b/runtime/Swift/Sources/Antlr4/tree/Tree.swift @@ -4,36 +4,36 @@ */ -/** The basic notion of a tree has a parent, a payload, and a list of children. - * It is the most abstract interface for all the trees used by ANTLR. - */ +/// The basic notion of a tree has a parent, a payload, and a list of children. +/// It is the most abstract interface for all the trees used by ANTLR. +/// public protocol Tree: class { - /** The parent of this node. If the return value is null, then this - * node is the root of the tree. - */ + /// The parent of this node. If the return value is null, then this + /// node is the root of the tree. + /// func getParent() -> Tree? - /** - * This method returns whatever object represents the data at this note. For - * example, for parse trees, the payload can be a {@link org.antlr.v4.runtime.Token} representing - * a leaf node or a {@link org.antlr.v4.runtime.RuleContext} object representing a rule - * invocation. For abstract syntax trees (ASTs), this is a {@link org.antlr.v4.runtime.Token} - * object. - */ + /// + /// This method returns whatever object represents the data at this note. For + /// example, for parse trees, the payload can be a _org.antlr.v4.runtime.Token_ representing + /// a leaf node or a _org.antlr.v4.runtime.RuleContext_ object representing a rule + /// invocation. For abstract syntax trees (ASTs), this is a _org.antlr.v4.runtime.Token_ + /// object. + /// func getPayload() -> AnyObject - /** If there are children, get the {@code i}th value indexed from 0. */ + /// If there are children, get the `i`th value indexed from 0. func getChild(_ i: Int) -> Tree? - /** How many children are there? If there is none, then this - * node represents a leaf node. - */ + /// How many children are there? If there is none, then this + /// node represents a leaf node. + /// func getChildCount() -> Int - /** Print out a whole tree, not just a node, in LISP format - * {@code (root child1 .. childN)}. Print just a node if this is a leaf. - */ + /// Print out a whole tree, not just a node, in LISP format + /// `(root child1 .. childN)`. Print just a node if this is a leaf. + /// func toStringTree() -> String } diff --git a/runtime/Swift/Sources/Antlr4/tree/Trees.swift b/runtime/Swift/Sources/Antlr4/tree/Trees.swift index 659669f73..09f724e8b 100644 --- a/runtime/Swift/Sources/Antlr4/tree/Trees.swift +++ b/runtime/Swift/Sources/Antlr4/tree/Trees.swift @@ -4,7 +4,7 @@ */ -/** A set of utility routines useful for all kinds of ANTLR trees. */ +/// A set of utility routines useful for all kinds of ANTLR trees. public class Trees { /* @@ -40,29 +40,29 @@ public class Trees { writePS(t, ruleNames, fileName, "Helvetica", 11) } */ - /** Print out a whole tree in LISP form. {@link #getNodeText} is used on the - * node payloads to get the text for the nodes. Detect - * parse trees and extract data appropriately. - */ + /// Print out a whole tree in LISP form. _#getNodeText_ is used on the + /// node payloads to get the text for the nodes. Detect + /// parse trees and extract data appropriately. + /// public static func toStringTree(_ t: Tree) -> String { let rulsName: Array? = nil return toStringTree(t, rulsName) } - /** Print out a whole tree in LISP form. {@link #getNodeText} is used on the - * node payloads to get the text for the nodes. Detect - * parse trees and extract data appropriately. - */ + /// Print out a whole tree in LISP form. _#getNodeText_ is used on the + /// node payloads to get the text for the nodes. Detect + /// parse trees and extract data appropriately. + /// public static func toStringTree(_ t: Tree, _ recog: Parser?) -> String { let ruleNames: [String]? = recog != nil ? recog!.getRuleNames() : nil let ruleNamesList: Array? = ruleNames ?? nil return toStringTree(t, ruleNamesList) } - /** Print out a whole tree in LISP form. {@link #getNodeText} is used on the - * node payloads to get the text for the nodes. Detect - * parse trees and extract data appropriately. - */ + /// Print out a whole tree in LISP form. _#getNodeText_ is used on the + /// node payloads to get the text for the nodes. Detect + /// parse trees and extract data appropriately. + /// public static func toStringTree(_ t: Tree, _ ruleNames: Array?) -> String { var s: String = Utils.escapeWhitespace(getNodeText(t, ruleNames), false) if t.getChildCount() == 0 { @@ -121,7 +121,7 @@ public class Trees { } - /** Return ordered list of all children of this node */ + /// Return ordered list of all children of this node public static func getChildren(_ t: Tree) -> Array { var kids: Array = Array() let length = t.getChildCount() @@ -131,9 +131,9 @@ public class Trees { return kids } - /** Return a list of all ancestors of this node. The first node of - * list is the root and the last is the parent of this node. - */ + /// Return a list of all ancestors of this node. The first node of + /// list is the root and the last is the parent of this node. + /// public static func getAncestors(_ t: Tree) -> Array { var ancestors: Array = Array() @@ -204,11 +204,11 @@ public class Trees { return nodes } - /** Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex - * inclusively using postorder traversal. Recursive depth-first-search. - * - * @since 4.5.1 - */ + /// Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex + /// inclusively using postorder traversal. Recursive depth-first-search. + /// + /// - Since: 4.5.1 + /// public static func getRootOfSubtreeEnclosingRegion(_ t: ParseTree, _ startTokenIndex: Int, _ stopTokenIndex: Int) -> ParserRuleContext? { diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/Chunk.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/Chunk.swift index c41ab8fe9..badb6820f 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/Chunk.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/Chunk.swift @@ -5,17 +5,17 @@ -/** - * A chunk is either a token tag, a rule tag, or a span of literal text within a - * tree pattern. - * - *

      The method {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#split(String)} returns a list of - * chunks in preparation for creating a token stream by - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#tokenize(String)}. From there, we get a parse - * tree from with {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#compile(String, int)}. These - * chunks are converted to {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken}, {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken}, or the - * regular tokens of the text surrounding the tags.

      - */ +/// +/// A chunk is either a token tag, a rule tag, or a span of literal text within a +/// tree pattern. +/// +/// The method _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#split(String)_ returns a list of +/// chunks in preparation for creating a token stream by +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#tokenize(String)_. From there, we get a parse +/// tree from with _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#compile(String, int)_. These +/// chunks are converted to _org.antlr.v4.runtime.tree.pattern.RuleTagToken_, _org.antlr.v4.runtime.tree.pattern.TokenTagToken_, or the +/// regular tokens of the text surrounding the tags. +/// public class Chunk { } diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift index 3a93ab155..55b362a3c 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreeMatch.swift @@ -4,46 +4,46 @@ */ -/** - * Represents the result of matching a {@link org.antlr.v4.runtime.tree.ParseTree} against a tree pattern. - */ +/// +/// Represents the result of matching a _org.antlr.v4.runtime.tree.ParseTree_ against a tree pattern. +/// public class ParseTreeMatch: CustomStringConvertible { - /** - * This is the backing field for {@link #getTree()}. - */ + /// + /// This is the backing field for _#getTree()_. + /// private let tree: ParseTree - /** - * This is the backing field for {@link #getPattern()}. - */ + /// + /// This is the backing field for _#getPattern()_. + /// private let pattern: ParseTreePattern - /** - * This is the backing field for {@link #getLabels()}. - */ + /// + /// This is the backing field for _#getLabels()_. + /// private let labels: MultiMap - /** - * This is the backing field for {@link #getMismatchedNode()}. - */ + /// + /// This is the backing field for _#getMismatchedNode()_. + /// private let mismatchedNode: ParseTree? - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} from the specified - * parse tree and pattern. - * - * @param tree The parse tree to match against the pattern. - * @param pattern The parse tree pattern. - * @param labels A mapping from label names to collections of - * {@link org.antlr.v4.runtime.tree.ParseTree} objects located by the tree pattern matching process. - * @param mismatchedNode The first node which failed to match the tree - * pattern during the matching process. - * - * @exception IllegalArgumentException if {@code tree} is {@code null} - * @exception IllegalArgumentException if {@code pattern} is {@code null} - * @exception IllegalArgumentException if {@code labels} is {@code null} - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ from the specified + /// parse tree and pattern. + /// + /// - Parameter tree: The parse tree to match against the pattern. + /// - Parameter pattern: The parse tree pattern. + /// - Parameter labels: A mapping from label names to collections of + /// _org.antlr.v4.runtime.tree.ParseTree_ objects located by the tree pattern matching process. + /// - Parameter mismatchedNode: The first node which failed to match the tree + /// pattern during the matching process. + /// + /// - Throws: ANTLRError.ilegalArgument if `tree` is `null` + /// - Throws: ANTLRError.ilegalArgument if `pattern` is `null` + /// - Throws: ANTLRError.ilegalArgument if `labels` is `null` + /// public init(_ tree: ParseTree, _ pattern: ParseTreePattern, _ labels: MultiMap, _ mismatchedNode: ParseTree?) { self.tree = tree @@ -52,22 +52,22 @@ public class ParseTreeMatch: CustomStringConvertible { self.mismatchedNode = mismatchedNode } - /** - * Get the last node associated with a specific {@code label}. - * - *

      For example, for pattern {@code }, {@code get("id")} returns the - * node matched for that {@code ID}. If more than one node - * matched the specified label, only the last is returned. If there is - * no node associated with the label, this returns {@code null}.

      - * - *

      Pattern tags like {@code } and {@code } without labels are - * considered to be labeled with {@code ID} and {@code expr}, respectively.

      - * - * @param label The label to check. - * - * @return The last {@link org.antlr.v4.runtime.tree.ParseTree} to match a tag with the specified - * label, or {@code null} if no parse tree matched a tag with the label. - */ + /// + /// Get the last node associated with a specific `label`. + /// + /// For example, for pattern ``, `get("id")` returns the + /// node matched for that `ID`. If more than one node + /// matched the specified label, only the last is returned. If there is + /// no node associated with the label, this returns `null`. + /// + /// Pattern tags like `` and `` without labels are + /// considered to be labeled with `ID` and `expr`, respectively. + /// + /// - Parameter label: The label to check. + /// + /// - Returns: The last _org.antlr.v4.runtime.tree.ParseTree_ to match a tag with the specified + /// label, or `null` if no parse tree matched a tag with the label. + /// public func get(_ label: String) -> ParseTree? { if let parseTrees = labels.get(label) , parseTrees.count > 0 { @@ -78,30 +78,27 @@ public class ParseTreeMatch: CustomStringConvertible { } - /** - * Return all nodes matching a rule or token tag with the specified label. - * - *

      If the {@code label} is the name of a parser rule or token in the - * grammar, the resulting list will contain both the parse trees matching - * rule or tags explicitly labeled with the label and the complete set of - * parse trees matching the labeled and unlabeled tags in the pattern for - * the parser rule or token. For example, if {@code label} is {@code "foo"}, - * the result will contain all of the following.

      - * - *
        - *
      • Parse tree nodes matching tags of the form {@code } and - * {@code }.
      • - *
      • Parse tree nodes matching tags of the form {@code }.
      • - *
      • Parse tree nodes matching tags of the form {@code }.
      • - *
      - * - * @param label The label. - * - * @return A collection of all {@link org.antlr.v4.runtime.tree.ParseTree} nodes matching tags with - * the specified {@code label}. If no nodes matched the label, an empty list - * is returned. - */ - + /// + /// Return all nodes matching a rule or token tag with the specified label. + /// + /// If the `label` is the name of a parser rule or token in the + /// grammar, the resulting list will contain both the parse trees matching + /// rule or tags explicitly labeled with the label and the complete set of + /// parse trees matching the labeled and unlabeled tags in the pattern for + /// the parser rule or token. For example, if `label` is `"foo"`, + /// the result will contain __all__ of the following. + /// + /// * Parse tree nodes matching tags of the form `` and + /// ``. + /// * Parse tree nodes matching tags of the form ``. + /// * Parse tree nodes matching tags of the form ``. + /// + /// - Parameter label: The label. + /// + /// - Returns: A collection of all _org.antlr.v4.runtime.tree.ParseTree_ nodes matching tags with + /// the specified `label`. If no nodes matched the label, an empty list + /// is returned. + /// public func getAll(_ label: String) -> Array { let nodes: Array? = labels.get(label) if nodes == nil { @@ -111,66 +108,58 @@ public class ParseTreeMatch: CustomStringConvertible { return nodes! } - /** - * Return a mapping from label → [list of nodes]. - * - *

      The map includes special entries corresponding to the names of rules and - * tokens referenced in tags in the original pattern. For additional - * information, see the description of {@link #getAll(String)}.

      - * - * @return A mapping from labels to parse tree nodes. If the parse tree - * pattern did not contain any rule or token tags, this map will be empty. - */ - + /// + /// Return a mapping from label → [list of nodes]. + /// + /// The map includes special entries corresponding to the names of rules and + /// tokens referenced in tags in the original pattern. For additional + /// information, see the description of _#getAll(String)_. + /// + /// - Returns: A mapping from labels to parse tree nodes. If the parse tree + /// pattern did not contain any rule or token tags, this map will be empty. + /// public func getLabels() -> MultiMap { return labels } - /** - * Get the node at which we first detected a mismatch. - * - * @return the node at which we first detected a mismatch, or {@code null} - * if the match was successful. - */ - + /// + /// Get the node at which we first detected a mismatch. + /// + /// - Returns: the node at which we first detected a mismatch, or `null` + /// if the match was successful. + /// public func getMismatchedNode() -> ParseTree? { return mismatchedNode } - /** - * Gets a value indicating whether the match operation succeeded. - * - * @return {@code true} if the match operation succeeded; otherwise, - * {@code false}. - */ + /// + /// Gets a value indicating whether the match operation succeeded. + /// + /// - Returns: `true` if the match operation succeeded; otherwise, + /// `false`. + /// public func succeeded() -> Bool { return mismatchedNode == nil } - /** - * Get the tree pattern we are matching against. - * - * @return The tree pattern we are matching against. - */ - + /// + /// Get the tree pattern we are matching against. + /// + /// - Returns: The tree pattern we are matching against. + /// public func getPattern() -> ParseTreePattern { return pattern } - /** - * Get the parse tree we are trying to match to a pattern. - * - * @return The {@link org.antlr.v4.runtime.tree.ParseTree} we are trying to match to a pattern. - */ - + /// + /// Get the parse tree we are trying to match to a pattern. + /// + /// - Returns: The _org.antlr.v4.runtime.tree.ParseTree_ we are trying to match to a pattern. + /// public func getTree() -> ParseTree { return tree } - /** - * {@inheritDoc} - */ - public func toString() -> String { return description } diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePattern.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePattern.swift index 3cce16576..d8a682257 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePattern.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePattern.swift @@ -4,45 +4,45 @@ */ -/** - * A pattern like {@code = ;} converted to a {@link org.antlr.v4.runtime.tree.ParseTree} by - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#compile(String, int)}. - */ +/// +/// A pattern like ` = ;` converted to a _org.antlr.v4.runtime.tree.ParseTree_ by +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher#compile(String, int)_. +/// public class ParseTreePattern { - /** - * This is the backing field for {@link #getPatternRuleIndex()}. - */ + /// + /// This is the backing field for _#getPatternRuleIndex()_. + /// private let patternRuleIndex: Int - /** - * This is the backing field for {@link #getPattern()}. - */ + /// + /// This is the backing field for _#getPattern()_. + /// private let pattern: String - /** - * This is the backing field for {@link #getPatternTree()}. - */ + /// + /// This is the backing field for _#getPatternTree()_. + /// private let patternTree: ParseTree - /** - * This is the backing field for {@link #getMatcher()}. - */ + /// + /// This is the backing field for _#getMatcher()_. + /// private let matcher: ParseTreePatternMatcher - /** - * Construct a new instance of the {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern} class. - * - * @param matcher The {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} which created this - * tree pattern. - * @param pattern The tree pattern in concrete syntax form. - * @param patternRuleIndex The parser rule which serves as the root of the - * tree pattern. - * @param patternTree The tree pattern in {@link org.antlr.v4.runtime.tree.ParseTree} form. - */ + /// + /// Construct a new instance of the _org.antlr.v4.runtime.tree.pattern.ParseTreePattern_ class. + /// + /// - Parameter matcher: The _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ which created this + /// tree pattern. + /// - Parameter pattern: The tree pattern in concrete syntax form. + /// - Parameter patternRuleIndex: The parser rule which serves as the root of the + /// tree pattern. + /// - Parameter patternTree: The tree pattern in _org.antlr.v4.runtime.tree.ParseTree_ form. + /// public init(_ matcher: ParseTreePatternMatcher, _ pattern: String, _ patternRuleIndex: Int, _ patternTree: ParseTree) { self.matcher = matcher @@ -51,41 +51,41 @@ public class ParseTreePattern { self.patternTree = patternTree } - /** - * Match a specific parse tree against this tree pattern. - * - * @param tree The parse tree to match against this tree pattern. - * @return A {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} object describing the result of the - * match operation. The {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#succeeded()} method can be - * used to determine whether or not the match was successful. - */ + /// + /// Match a specific parse tree against this tree pattern. + /// + /// - Parameter tree: The parse tree to match against this tree pattern. + /// - Returns: A _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ object describing the result of the + /// match operation. The _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#succeeded()_ method can be + /// used to determine whether or not the match was successful. + /// public func match(_ tree: ParseTree) throws -> ParseTreeMatch { return try matcher.match(tree, self) } - /** - * Determine whether or not a parse tree matches this tree pattern. - * - * @param tree The parse tree to match against this tree pattern. - * @return {@code true} if {@code tree} is a match for the current tree - * pattern; otherwise, {@code false}. - */ + /// + /// Determine whether or not a parse tree matches this tree pattern. + /// + /// - Parameter tree: The parse tree to match against this tree pattern. + /// - Returns: `true` if `tree` is a match for the current tree + /// pattern; otherwise, `false`. + /// public func matches(_ tree: ParseTree) throws -> Bool { return try matcher.match(tree, self).succeeded() } - /** - * Find all nodes using XPath and then try to match those subtrees against - * this tree pattern. - * - * @param tree The {@link org.antlr.v4.runtime.tree.ParseTree} to match against this pattern. - * @param xpath An expression matching the nodes - * - * @return A collection of {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} objects describing the - * successful matches. Unsuccessful matches are omitted from the result, - * regardless of the reason for the failure. - */ + /// + /// Find all nodes using XPath and then try to match those subtrees against + /// this tree pattern. + /// + /// - Parameter tree: The _org.antlr.v4.runtime.tree.ParseTree_ to match against this pattern. + /// - Parameter xpath: An expression matching the nodes + /// + /// - Returns: A collection of _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ objects describing the + /// successful matches. Unsuccessful matches are omitted from the result, + /// regardless of the reason for the failure. + /// /*public func findAll(tree : ParseTree, _ xpath : String) -> Array { var subtrees : Array = XPath.findAll(tree, xpath, matcher.getParser()); @@ -99,45 +99,45 @@ public class ParseTreePattern { return matches; }*/ - /** - * Get the {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} which created this tree pattern. - * - * @return The {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} which created this tree - * pattern. - */ + /// + /// Get the _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ which created this tree pattern. + /// + /// - Returns: The _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ which created this tree + /// pattern. + /// public func getMatcher() -> ParseTreePatternMatcher { return matcher } - /** - * Get the tree pattern in concrete syntax form. - * - * @return The tree pattern in concrete syntax form. - */ + /// + /// Get the tree pattern in concrete syntax form. + /// + /// - Returns: The tree pattern in concrete syntax form. + /// public func getPattern() -> String { return pattern } - /** - * Get the parser rule which serves as the outermost rule for the tree - * pattern. - * - * @return The parser rule which serves as the outermost rule for the tree - * pattern. - */ + /// + /// Get the parser rule which serves as the outermost rule for the tree + /// pattern. + /// + /// - Returns: The parser rule which serves as the outermost rule for the tree + /// pattern. + /// public func getPatternRuleIndex() -> Int { return patternRuleIndex } - /** - * Get the tree pattern as a {@link org.antlr.v4.runtime.tree.ParseTree}. The rule and token tags from - * the pattern are present in the parse tree as terminal nodes with a symbol - * of type {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} or {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken}. - * - * @return The tree pattern as a {@link org.antlr.v4.runtime.tree.ParseTree}. - */ + /// + /// Get the tree pattern as a _org.antlr.v4.runtime.tree.ParseTree_. The rule and token tags from + /// the pattern are present in the parse tree as terminal nodes with a symbol + /// of type _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ or _org.antlr.v4.runtime.tree.pattern.TokenTagToken_. + /// + /// - Returns: The tree pattern as a _org.antlr.v4.runtime.tree.ParseTree_. + /// public func getPatternTree() -> ParseTree { return patternTree diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift index cb42b5c52..3b78b7fe8 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/ParseTreePatternMatcher.swift @@ -5,126 +5,108 @@ -/** - * A tree pattern matching mechanism for ANTLR {@link org.antlr.v4.runtime.tree.ParseTree}s. - * - *

      Patterns are strings of source input text with special tags representing - * token or rule references such as:

      - * - *

      {@code = ;}

      - * - *

      Given a pattern start rule such as {@code statement}, this object constructs - * a {@link org.antlr.v4.runtime.tree.ParseTree} with placeholders for the {@code ID} and {@code expr} - * subtree. Then the {@link #match} routines can compare an actual - * {@link org.antlr.v4.runtime.tree.ParseTree} from a parse with this pattern. Tag {@code } matches - * any {@code ID} token and tag {@code } references the result of the - * {@code expr} rule (generally an instance of {@code ExprContext}.

      - * - *

      Pattern {@code x = 0;} is a similar pattern that matches the same pattern - * except that it requires the identifier to be {@code x} and the expression to - * be {@code 0}.

      - * - *

      The {@link #matches} routines return {@code true} or {@code false} based - * upon a match for the tree rooted at the parameter sent in. The - * {@link #match} routines return a {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} object that - * contains the parse tree, the parse tree pattern, and a map from tag name to - * matched nodes (more below). A subtree that fails to match, returns with - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#mismatchedNode} set to the first tree node that did not - * match.

      - * - *

      For efficiency, you can compile a tree pattern in string form to a - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern} object.

      - * - *

      See {@code TestParseTreeMatcher} for lots of examples. - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern} has two static helper methods: - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern#findAll} and {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern#match} that - * are easy to use but not super efficient because they create new - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} objects each time and have to compile the - * pattern in string form before using it.

      - * - *

      The lexer and parser that you pass into the {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} - * constructor are used to parse the pattern in string form. The lexer converts - * the {@code = ;} into a sequence of four tokens (assuming lexer - * throws out whitespace or puts it on a hidden channel). Be aware that the - * input stream is reset for the lexer (but not the parser; a - * {@link org.antlr.v4.runtime.ParserInterpreter} is created to parse the input.). Any user-defined - * fields you have put into the lexer might get changed when this mechanism asks - * it to scan the pattern string.

      - * - *

      Normally a parser does not accept token {@code } as a valid - * {@code expr} but, from the parser passed in, we create a special version of - * the underlying grammar representation (an {@link org.antlr.v4.runtime.atn.ATN}) that allows imaginary - * tokens representing rules ({@code }) to match entire rules. We call - * these bypass alternatives.

      - * - *

      Delimiters are {@code <} and {@code >}, with {@code \} as the escape string - * by default, but you can set them to whatever you want using - * {@link #setDelimiters}. You must escape both start and stop strings - * {@code \<} and {@code \>}.

      - */ +/// +/// A tree pattern matching mechanism for ANTLR _org.antlr.v4.runtime.tree.ParseTree_s. +/// +/// Patterns are strings of source input text with special tags representing +/// token or rule references such as: +/// +/// ` = ;` +/// +/// Given a pattern start rule such as `statement`, this object constructs +/// a _org.antlr.v4.runtime.tree.ParseTree_ with placeholders for the `ID` and `expr` +/// subtree. Then the _#match_ routines can compare an actual +/// _org.antlr.v4.runtime.tree.ParseTree_ from a parse with this pattern. Tag `` matches +/// any `ID` token and tag `` references the result of the +/// `expr` rule (generally an instance of `ExprContext`. +/// +/// Pattern `x = 0;` is a similar pattern that matches the same pattern +/// except that it requires the identifier to be `x` and the expression to +/// be `0`. +/// +/// The _#matches_ routines return `true` or `false` based +/// upon a match for the tree rooted at the parameter sent in. The +/// _#match_ routines return a _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ object that +/// contains the parse tree, the parse tree pattern, and a map from tag name to +/// matched nodes (more below). A subtree that fails to match, returns with +/// _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#mismatchedNode_ set to the first tree node that did not +/// match. +/// +/// For efficiency, you can compile a tree pattern in string form to a +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePattern_ object. +/// +/// See `TestParseTreeMatcher` for lots of examples. +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePattern_ has two static helper methods: +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePattern#findAll_ and _org.antlr.v4.runtime.tree.pattern.ParseTreePattern#match_ that +/// are easy to use but not super efficient because they create new +/// _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ objects each time and have to compile the +/// pattern in string form before using it. +/// +/// The lexer and parser that you pass into the _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ +/// constructor are used to parse the pattern in string form. The lexer converts +/// the ` = ;` into a sequence of four tokens (assuming lexer +/// throws out whitespace or puts it on a hidden channel). Be aware that the +/// input stream is reset for the lexer (but not the parser; a +/// _org.antlr.v4.runtime.ParserInterpreter_ is created to parse the input.). Any user-defined +/// fields you have put into the lexer might get changed when this mechanism asks +/// it to scan the pattern string. +/// +/// Normally a parser does not accept token `` as a valid +/// `expr` but, from the parser passed in, we create a special version of +/// the underlying grammar representation (an _org.antlr.v4.runtime.atn.ATN_) that allows imaginary +/// tokens representing rules (``) to match entire rules. We call +/// these __bypass alternatives__. +/// +/// Delimiters are `<` and `>`, with `\` as the escape string +/// by default, but you can set them to whatever you want using +/// _#setDelimiters_. You must escape both start and stop strings +/// `\<` and `\>`. +/// public class ParseTreePatternMatcher { -// public class CannotInvokeStartRule : RuntimeException { -// public convenience init(_ e : Throwable) { -// super.init(e); -// } -// } -// -// // Fixes https://github.com/antlr/antlr4/issues/413 -// // "Tree pattern compilation doesn't check for a complete parse" -// public class StartRuleDoesNotConsumeFullPattern : RuntimeException { -// } - /** - * This is the backing field for {@link #getLexer()}. - */ - private final var lexer: Lexer + /// + /// This is the backing field for _#getLexer()_. + /// + private final let lexer: Lexer - /** - * This is the backing field for {@link #getParser()}. - */ - private final var parser: Parser + /// + /// This is the backing field for _#getParser()_. + /// + private final let parser: Parser internal var start: String = "<" internal var stop: String = ">" internal var escape: String = "\\" - // e.g., \< and \> must escape BOTH! - /** - * Constructs a {@link org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher} or from a {@link org.antlr.v4.runtime.Lexer} and - * {@link org.antlr.v4.runtime.Parser} object. The lexer input stream is altered for tokenizing - * the tree patterns. The parser is used as a convenient mechanism to get - * the grammar name, plus token, rule names. - */ + /// + /// Constructs a _org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher_ or from a _org.antlr.v4.runtime.Lexer_ and + /// _org.antlr.v4.runtime.Parser_ object. The lexer input stream is altered for tokenizing + /// the tree patterns. The parser is used as a convenient mechanism to get + /// the grammar name, plus token, rule names. + /// public init(_ lexer: Lexer, _ parser: Parser) { self.lexer = lexer self.parser = parser } - /** - * Set the delimiters used for marking rule and token tags within concrete - * syntax used by the tree pattern parser. - * - * @param start The start delimiter. - * @param stop The stop delimiter. - * @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. - * - * @exception IllegalArgumentException if {@code start} is {@code null} or empty. - * @exception IllegalArgumentException if {@code stop} is {@code null} or empty. - */ + /// + /// Set the delimiters used for marking rule and token tags within concrete + /// syntax used by the tree pattern parser. + /// + /// - Parameter start: The start delimiter. + /// - Parameter stop: The stop delimiter. + /// - Parameter escapeLeft: The escape sequence to use for escaping a start or stop delimiter. + /// + /// - Throws: ANTLRError.ilegalArgument if `start` is `null` or empty. + /// - Throws: ANTLRError.ilegalArgument if `stop` is `null` or empty. + /// public func setDelimiters(_ start: String, _ stop: String, _ escapeLeft: String) throws { - //start == nil || if start.isEmpty { throw ANTLRError.illegalArgument(msg: "start cannot be null or empty") - // RuntimeException("start cannot be null or empty") - //throwException() /* throw IllegalArgumentException("start cannot be null or empty"); */ } - //stop == nil || if stop.isEmpty { throw ANTLRError.illegalArgument(msg: "stop cannot be null or empty") - //RuntimeException("stop cannot be null or empty") - - //throwException() /* throw IllegalArgumentException("stop cannot be null or empty"); */ } self.start = start @@ -132,125 +114,107 @@ public class ParseTreePatternMatcher { self.escape = escapeLeft } - /** Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}? */ + /// + /// Does `pattern` matched as rule `patternRuleIndex` match `tree`? + /// public func matches(_ tree: ParseTree, _ pattern: String, _ patternRuleIndex: Int) throws -> Bool { let p: ParseTreePattern = try compile(pattern, patternRuleIndex) return try matches(tree, p) } - /** Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a - * compiled pattern instead of a string representation of a tree pattern. - */ + /// + /// Does `pattern` matched as rule patternRuleIndex match tree? Pass in a + /// compiled pattern instead of a string representation of a tree pattern. + /// public func matches(_ tree: ParseTree, _ pattern: ParseTreePattern) throws -> Bool { let labels: MultiMap = MultiMap() let mismatchedNode: ParseTree? = try matchImpl(tree, pattern.getPatternTree(), labels) return mismatchedNode == nil } - /** - * Compare {@code pattern} matched as rule {@code patternRuleIndex} against - * {@code tree} and return a {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} object that contains the - * matched elements, or the node at which the match failed. - */ + /// + /// Compare `pattern` matched as rule `patternRuleIndex` against + /// `tree` and return a _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ object that contains the + /// matched elements, or the node at which the match failed. + /// public func match(_ tree: ParseTree, _ pattern: String, _ patternRuleIndex: Int) throws -> ParseTreeMatch { let p: ParseTreePattern = try compile(pattern, patternRuleIndex) return try match(tree, p) } - /** - * Compare {@code pattern} matched against {@code tree} and return a - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch} object that contains the matched elements, or the - * node at which the match failed. Pass in a compiled pattern instead of a - * string representation of a tree pattern. - */ - + /// + /// Compare `pattern` matched against `tree` and return a + /// _org.antlr.v4.runtime.tree.pattern.ParseTreeMatch_ object that contains the matched elements, or the + /// node at which the match failed. Pass in a compiled pattern instead of a + /// string representation of a tree pattern. + /// public func match(_ tree: ParseTree, _ pattern: ParseTreePattern) throws -> ParseTreeMatch { let labels: MultiMap = MultiMap() let mismatchedNode: ParseTree? = try matchImpl(tree, pattern.getPatternTree(), labels) return ParseTreeMatch(tree, pattern, labels, mismatchedNode) } - /** - * For repeated use of a tree pattern, compile it to a - * {@link org.antlr.v4.runtime.tree.pattern.ParseTreePattern} using this method. - */ + /// + /// For repeated use of a tree pattern, compile it to a + /// _org.antlr.v4.runtime.tree.pattern.ParseTreePattern_ using this method. + /// public func compile(_ pattern: String, _ patternRuleIndex: Int) throws -> ParseTreePattern { - let tokenList: Array = try tokenize(pattern) - let tokenSrc: ListTokenSource = ListTokenSource(tokenList) - let tokens: CommonTokenStream = CommonTokenStream(tokenSrc) + let tokenList = try tokenize(pattern) + let tokenSrc = ListTokenSource(tokenList) + let tokens = CommonTokenStream(tokenSrc) - let parserInterp: ParserInterpreter = try ParserInterpreter(parser.getGrammarFileName(), + let parserInterp = try ParserInterpreter(parser.getGrammarFileName(), parser.getVocabulary(), parser.getRuleNames(), parser.getATNWithBypassAlts(), tokens) - var tree: ParseTree //= nil; - //TODO: exception handler - //try { parserInterp.setErrorHandler(BailErrorStrategy()) - tree = try parserInterp.parse(patternRuleIndex) -// print("pattern tree = "+tree.toStringTree(parserInterp)); -// } -// catch (ParseCancellationException e) { -// throwException() /* throw e.getCause() as RecognitionException; */ -// } -// catch (RecognitionException re) { -// throwException() /* throw re; */ -// } -// catch (Exception e) { -// throwException() /* throw CannotInvokeStartRule(e); */ -// } + let tree = try parserInterp.parse(patternRuleIndex) // Make sure tree pattern compilation checks for a complete parse if try tokens.LA(1) != CommonToken.EOF { throw ANTLRError.illegalState(msg: "Tree pattern compilation doesn't check for a complete parse") - // RuntimeException("Tree pattern compilation doesn't check for a complete parse") - //throw ANTLRException.StartRuleDoesNotConsumeFullPattern - //throwException() /* throw StartRuleDoesNotConsumeFullPattern(); */ } return ParseTreePattern(self, pattern, patternRuleIndex, tree) } - /** - * Used to convert the tree pattern string into a series of tokens. The - * input stream is reset. - */ - + /// + /// Used to convert the tree pattern string into a series of tokens. The + /// input stream is reset. + /// public func getLexer() -> Lexer { return lexer } - /** - * Used to collect to the grammar file name, token names, rule names for - * used to parse the pattern into a parse tree. - */ - + /// + /// Used to collect to the grammar file name, token names, rule names for + /// used to parse the pattern into a parse tree. + /// public func getParser() -> Parser { return parser } // ---- SUPPORT CODE ---- - /** - * Recursively walk {@code tree} against {@code patternTree}, filling - * {@code match.}{@link org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#labels labels}. - * - * @return the first node encountered in {@code tree} which does not match - * a corresponding node in {@code patternTree}, or {@code null} if the match - * was successful. The specific node returned depends on the matching - * algorithm used by the implementation, and may be overridden. - */ - + /// + /// Recursively walk `tree` against `patternTree`, filling + /// `match.`_org.antlr.v4.runtime.tree.pattern.ParseTreeMatch#labels labels_. + /// + /// - Returns: the first node encountered in `tree` which does not match + /// a corresponding node in `patternTree`, or `null` if the match + /// was successful. The specific node returned depends on the matching + /// algorithm used by the implementation, and may be overridden. + /// internal func matchImpl(_ tree: ParseTree, _ patternTree: ParseTree, _ labels: MultiMap) throws -> ParseTree? { // x and , x and y, or x and x; or could be mismatched types if tree is TerminalNode && patternTree is TerminalNode { - let t1: TerminalNode = tree as! TerminalNode - let t2: TerminalNode = patternTree as! TerminalNode + let t1 = tree as! TerminalNode + let t2 = patternTree as! TerminalNode var mismatchedNode: ParseTree? = nil // both are tokens and they have same type if t1.getSymbol()!.getType() == t2.getSymbol()!.getType() { @@ -287,7 +251,6 @@ public class ParseTreePatternMatcher { var mismatchedNode: ParseTree? = nil // (expr ...) and if let ruleTagToken = getRuleTagToken(r2) { - //var m : ParseTreeMatch? = nil; if r1.getRuleContext().getRuleIndex() == r2.getRuleContext().getRuleIndex() { // track label->list-of-nodes for both rule name and label (if any) labels.map(ruleTagToken.getRuleName(), tree) @@ -312,11 +275,8 @@ public class ParseTreePatternMatcher { return mismatchedNode } - let n: Int = r1.getChildCount() - for i in 0..)} subtree? */ + /// Is `t` `(expr )` subtree? internal func getRuleTagToken(_ t: ParseTree) -> RuleTagToken? { - if t is RuleNode { - let r: RuleNode = t as! RuleNode + if let r = t as? RuleNode { if r.getChildCount() == 1 && r.getChild(0) is TerminalNode { - let c: TerminalNode = r.getChild(0) as! TerminalNode + let c = r.getChild(0) as! TerminalNode if c.getSymbol() is RuleTagToken { // print("rule tag subtree "+t.toStringTree(parser)); return c.getSymbol() as? RuleTagToken @@ -345,26 +304,23 @@ public class ParseTreePatternMatcher { public func tokenize(_ pattern: String) throws -> Array { // split pattern into chunks: sea (raw input) and islands (, ) - let chunks: Array = try split(pattern) + let chunks = try split(pattern) // create token stream from text and tags - var tokens: Array = Array() - for chunk: Chunk in chunks { - if chunk is TagChunk { - let tagChunk: TagChunk = chunk as! TagChunk + var tokens = [Token]() + for chunk in chunks { + if let tagChunk = chunk as? TagChunk { // add special rule token or conjure up new token from name let firstStr = String(tagChunk.getTag()[0]) if firstStr.lowercased() != firstStr { - //if ( Character.isUpperCase(tagChunk.getTag().charAt(0)) ) { - let ttype: Int = parser.getTokenType(tagChunk.getTag()) + let ttype = parser.getTokenType(tagChunk.getTag()) if ttype == CommonToken.INVALID_TYPE { throw ANTLRError.illegalArgument(msg: "Unknown token " + tagChunk.getTag() + " in pattern: " + pattern) } - let t: TokenTagToken = TokenTagToken(tagChunk.getTag(), ttype, tagChunk.getLabel()) + let t = TokenTagToken(tagChunk.getTag(), ttype, tagChunk.getLabel()) tokens.append(t) } else { if firstStr.uppercased() != firstStr { - // if ( Character.isLowerCase(tagChunk.getTag().charAt(0)) ) { let ruleIndex: Int = parser.getRuleIndex(tagChunk.getTag()) if ruleIndex == -1 { throw ANTLRError.illegalArgument(msg: "Unknown rule " + tagChunk.getTag() + " in pattern: " + pattern) @@ -376,10 +332,10 @@ public class ParseTreePatternMatcher { } } } else { - let textChunk: TextChunk = chunk as! TextChunk - let inputStream: ANTLRInputStream = ANTLRInputStream(textChunk.getText()) + let textChunk = chunk as! TextChunk + let inputStream = ANTLRInputStream(textChunk.getText()) try lexer.setInputStream(inputStream) - var t: Token = try lexer.nextToken() + var t = try lexer.nextToken() while t.getType() != CommonToken.EOF { tokens.append(t) t = try lexer.nextToken() @@ -391,34 +347,33 @@ public class ParseTreePatternMatcher { return tokens } - /** Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}. */ - public func split(_ pattern: String) throws -> Array { - var p: Int = 0 - let n: Int = pattern.length - var chunks: Array = Array() - //var buf : StringBuilder = StringBuilder(); + /// + /// Split ` = ;` into 4 chunks for tokenizing by _#tokenize_. + /// + public func split(_ pattern: String) throws -> [Chunk] { + var p = 0 + let n = pattern.length + var chunks = [Chunk]() // find all start and stop indexes first, then collect - var starts: Array = Array() - var stops: Array = Array() + var starts = [Int]() + var stops = [Int]() while p < n { if p == pattern.indexOf(escape + start, startIndex: p) { p += escape.length + start.length - } else { - if p == pattern.indexOf(escape + stop, startIndex: p) { - p += escape.length + stop.length - } else { - if p == pattern.indexOf(start, startIndex: p) { - starts.append(p) - p += start.length - } else { - if p == pattern.indexOf(stop, startIndex: p) { - stops.append(p) - p += stop.length - } else { - p += 1 - } - } - } + } + else if p == pattern.indexOf(escape + stop, startIndex: p) { + p += escape.length + stop.length + } + else if p == pattern.indexOf(start, startIndex: p) { + starts.append(p) + p += start.length + } + else if p == pattern.indexOf(stop, startIndex: p) { + stops.append(p) + p += stop.length + } + else { + p += 1 } } @@ -430,58 +385,55 @@ public class ParseTreePatternMatcher { throw ANTLRError.illegalArgument(msg: "missing start tag in pattern: " + pattern) } - let ntags: Int = starts.count + let ntags = starts.count for i in 0.. 0 && starts[0] > 0 { // copy text up to first tag into chunks - let text: String = pattern[0 ..< starts[0]] //; substring(0, starts.get(0)); + let text = pattern[0 ..< starts[0]] chunks.append(TextChunk(text)) } - for i in 0.. - let tag: String = pattern[starts[i] + start.length ..< stops[i]] // pattern.substring(starts.get(i) + start.length(), stops.get(i)); - var ruleOrToken: String = tag - var label: String = "" - let colon: Int = tag.indexOf(":") + let tag = pattern[starts[i] + start.length ..< stops[i]] + var ruleOrToken = tag + var label = "" + let colon = tag.indexOf(":") if colon >= 0 { - label = tag[0 ..< colon] //(0,colon); - ruleOrToken = tag[colon + 1 ..< tag.length] //(colon+1, tag.length()); + label = tag[0 ..< colon] + ruleOrToken = tag[colon + 1 ..< tag.length] } chunks.append(try TagChunk(label, ruleOrToken)) if i + 1 < ntags { // copy from end of to start of next - let text: String = pattern[stops[i] + stop.length ..< starts[i] + 1] //.substring(stops.get(i) + stop.length(), starts.get(i + 1)); + let text = pattern[stops[i] + stop.length ..< starts[i] + 1] chunks.append(TextChunk(text)) } } if ntags > 0 { - let afterLastTag: Int = stops[ntags - 1] + stop.length + let afterLastTag = stops[ntags - 1] + stop.length if afterLastTag < n { // copy text from end of last tag to end - let text: String = pattern[afterLastTag ..< n] //.substring(afterLastTag, n); + let text = pattern[afterLastTag ..< n] chunks.append(TextChunk(text)) } } // strip out the escape sequences from text chunks but not tags - let length = chunks.count - for i in 0..}. These tokens are created for {@link org.antlr.v4.runtime.tree.pattern.TagChunk} - * chunks where the tag corresponds to a parser rule. - */ +/// +/// A _org.antlr.v4.runtime.Token_ object representing an entire subtree matched by a parser +/// rule; e.g., ``. These tokens are created for _org.antlr.v4.runtime.tree.pattern.TagChunk_ +/// chunks where the tag corresponds to a parser rule. +/// public class RuleTagToken: Token, CustomStringConvertible { - /** - * This is the backing field for {@link #getRuleName}. - */ + /// + /// This is the backing field for _#getRuleName_. + /// private final var ruleName: String - /** - * The token type for the current token. This is the token type assigned to - * the bypass alternative for the rule during ATN deserialization. - */ + /// + /// The token type for the current token. This is the token type assigned to + /// the bypass alternative for the rule during ATN deserialization. + /// private final var bypassTokenType: Int - /** - * This is the backing field for {@link #getLabel}. - */ + /// + /// This is the backing field for _#getLabel_. + /// private final var label: String? - public var visited: Bool = false + public var visited = false - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} with the specified rule - * name and bypass token type and no label. - * - * @param ruleName The name of the parser rule this rule tag matches. - * @param bypassTokenType The bypass token type assigned to the parser rule. - * - * @exception IllegalArgumentException if {@code ruleName} is {@code null} - * or empty. - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ with the specified rule + /// name and bypass token type and no label. + /// + /// - Parameter ruleName: The name of the parser rule this rule tag matches. + /// - Parameter bypassTokenType: The bypass token type assigned to the parser rule. + /// + /// - Throws: ANTLRError.illegalArgument if `ruleName` is `null` + /// or empty. + /// public convenience init(_ ruleName: String, _ bypassTokenType: Int) { self.init(ruleName, bypassTokenType, nil) } - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} with the specified rule - * name, bypass token type, and label. - * - * @param ruleName The name of the parser rule this rule tag matches. - * @param bypassTokenType The bypass token type assigned to the parser rule. - * @param label The label associated with the rule tag, or {@code null} if - * the rule tag is unlabeled. - * - * @exception IllegalArgumentException if {@code ruleName} is {@code null} - * or empty. - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ with the specified rule + /// name, bypass token type, and label. + /// + /// - Parameter ruleName: The name of the parser rule this rule tag matches. + /// - Parameter bypassTokenType: The bypass token type assigned to the parser rule. + /// - Parameter label: The label associated with the rule tag, or `null` if + /// the rule tag is unlabeled. + /// + /// - Throws: ANTLRError.illegalArgument if `ruleName` is `null` + /// or empty. + /// public init(_ ruleName: String, _ bypassTokenType: Int, _ label: String?) { - - self.ruleName = ruleName self.bypassTokenType = bypassTokenType self.label = label } - /** - * Gets the name of the rule associated with this rule tag. - * - * @return The name of the parser rule associated with this rule tag. - */ - + /// + /// Gets the name of the rule associated with this rule tag. + /// + /// - Returns: The name of the parser rule associated with this rule tag. + /// public final func getRuleName() -> String { return ruleName } - /** - * Gets the label associated with the rule tag. - * - * @return The name of the label associated with the rule tag, or - * {@code null} if this is an unlabeled rule tag. - */ - + /// + /// Gets the label associated with the rule tag. + /// + /// - Returns: The name of the label associated with the rule tag, or + /// `null` if this is an unlabeled rule tag. + /// public final func getLabel() -> String? { return label } - /** - * {@inheritDoc} - * - *

      Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.

      - */ - + /// + /// Rule tag tokens are always placed on the _#DEFAULT_CHANNEL_. + /// public func getChannel() -> Int { return RuleTagToken.DEFAULT_CHANNEL } - /** - * {@inheritDoc} - * - *

      This method returns the rule tag formatted with {@code <} and {@code >} - * delimiters.

      - */ - + /// + /// This method returns the rule tag formatted with `<` and `>` + /// delimiters. + /// public func getText() -> String? { - if label != nil { - return "<" + label! + ":" + ruleName + ">" + if let label = label { + return "<\(label):\(ruleName)>" } - - return "<" + ruleName + ">" + return "<\(ruleName)>" } - /** - * {@inheritDoc} - * - *

      Rule tag tokens have types assigned according to the rule bypass - * transitions created during ATN deserialization.

      - */ - + /// + /// Rule tag tokens have types assigned according to the rule bypass + /// transitions created during ATN deserialization. + /// public func getType() -> Int { return bypassTokenType } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns 0.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns 0. + /// public func getLine() -> Int { return 0 } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns -1. + /// public func getCharPositionInLine() -> Int { return -1 } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

      - */ - + /// + /// + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns -1. + /// public func getTokenIndex() -> Int { return -1 } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns -1. + /// public func getStartIndex() -> Int { return -1 } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns -1.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns -1. + /// public func getStopIndex() -> Int { return -1 } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns {@code null}.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns `null`. + /// public func getTokenSource() -> TokenSource? { return nil } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} always returns {@code null}.

      - */ - + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ always returns `null`. + /// public func getInputStream() -> CharStream? { return nil } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.RuleTagToken} returns a string of the form - * {@code ruleName:bypassTokenType}.

      - */ - + public func getTokenSourceAndStream() -> TokenSourceAndStream { + return TokenSourceAndStream.EMPTY + } + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.RuleTagToken_ returns a string of the form + /// `ruleName:bypassTokenType`. + /// public var description: String { return ruleName + ":" + String(bypassTokenType) } diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift index 0538f6dc6..f4765712c 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/TagChunk.swift @@ -4,57 +4,54 @@ */ -/** - * Represents a placeholder tag in a tree pattern. A tag can have any of the - * following forms. - * - *
        - *
      • {@code expr}: An unlabeled placeholder for a parser rule {@code expr}.
      • - *
      • {@code ID}: An unlabeled placeholder for a token of type {@code ID}.
      • - *
      • {@code e:expr}: A labeled placeholder for a parser rule {@code expr}.
      • - *
      • {@code id:ID}: A labeled placeholder for a token of type {@code ID}.
      • - *
      - * - * This class does not perform any validation on the tag or label names aside - * from ensuring that the tag is a non-null, non-empty string. - */ - +/// +/// Represents a placeholder tag in a tree pattern. A tag can have any of the +/// following forms. +/// +/// * `expr`: An unlabeled placeholder for a parser rule `expr`. +/// * `ID`: An unlabeled placeholder for a token of type `ID`. +/// * `e:expr`: A labeled placeholder for a parser rule `expr`. +/// * `id:ID`: A labeled placeholder for a token of type `ID`. +/// +/// This class does not perform any validation on the tag or label names aside +/// from ensuring that the tag is a non-null, non-empty string. +/// public class TagChunk: Chunk, CustomStringConvertible { - /** - * This is the backing field for {@link #getTag}. - */ + /// + /// This is the backing field for _#getTag_. + /// private let tag: String - /** - * This is the backing field for {@link #getLabel}. - */ + /// + /// This is the backing field for _#getLabel_. + /// private let label: String? - /** - * Construct a new instance of {@link org.antlr.v4.runtime.tree.pattern.TagChunk} using the specified tag and - * no label. - * - * @param tag The tag, which should be the name of a parser rule or token - * type. - * - * @exception IllegalArgumentException if {@code tag} is {@code null} or - * empty. - */ + /// + /// Construct a new instance of _org.antlr.v4.runtime.tree.pattern.TagChunk_ using the specified tag and + /// no label. + /// + /// - Parameter tag: The tag, which should be the name of a parser rule or token + /// type. + /// + /// - Throws: ANTLRError.illegalArgument if `tag` is `null` or + /// empty. + /// public convenience init(_ tag: String) throws { try self.init(nil, tag) } - /** - * Construct a new instance of {@link org.antlr.v4.runtime.tree.pattern.TagChunk} using the specified label - * and tag. - * - * @param label The label for the tag. If this is {@code null}, the - * {@link org.antlr.v4.runtime.tree.pattern.TagChunk} represents an unlabeled tag. - * @param tag The tag, which should be the name of a parser rule or token - * type. - * - * @exception IllegalArgumentException if {@code tag} is {@code null} or - * empty. - */ + /// + /// Construct a new instance of _org.antlr.v4.runtime.tree.pattern.TagChunk_ using the specified label + /// and tag. + /// + /// - Parameter label: The label for the tag. If this is `null`, the + /// _org.antlr.v4.runtime.tree.pattern.TagChunk_ represents an unlabeled tag. + /// - Parameter tag: The tag, which should be the name of a parser rule or token + /// type. + /// + /// - Throws: ANTLRError.illegalArgument if `tag` is `null` or + /// empty. + /// public init(_ label: String?, _ tag: String) throws { self.label = label @@ -65,34 +62,30 @@ public class TagChunk: Chunk, CustomStringConvertible { } } - /** - * Get the tag for this chunk. - * - * @return The tag for the chunk. - */ - + /// + /// Get the tag for this chunk. + /// + /// - Returns: The tag for the chunk. + /// public final func getTag() -> String { return tag } - /** - * Get the label, if any, assigned to this chunk. - * - * @return The label assigned to this chunk, or {@code null} if no label is - * assigned to the chunk. - */ - + /// + /// Get the label, if any, assigned to this chunk. + /// + /// - Returns: The label assigned to this chunk, or `null` if no label is + /// assigned to the chunk. + /// public final func getLabel() -> String? { return label } - /** - * This method returns a text representation of the tag chunk. Labeled tags - * are returned in the form {@code label:tag}, and unlabeled tags are - * returned as just the tag name. - */ - - + /// + /// This method returns a text representation of the tag chunk. Labeled tags + /// are returned in the form `label:tag`, and unlabeled tags are + /// returned as just the tag name. + /// public var description: String { if label != nil { return label! + ":" + tag diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift index 6d4ed317e..d3072b38c 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/TextChunk.swift @@ -4,44 +4,44 @@ */ -/** - * Represents a span of raw text (concrete syntax) between tags in a tree - * pattern string. - */ +/// +/// Represents a span of raw text (concrete syntax) between tags in a tree +/// pattern string. +/// public class TextChunk: Chunk, CustomStringConvertible { - /** - * This is the backing field for {@link #getText}. - */ + /// + /// This is the backing field for _#getText_. + /// private let text: String - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.TextChunk} with the specified text. - * - * @param text The text of this chunk. - * @exception IllegalArgumentException if {@code text} is {@code null}. - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.TextChunk_ with the specified text. + /// + /// - Parameter text: The text of this chunk. + /// - Throws: ANTLRError.illegalArgument if `text` is `null`. + /// public init(_ text: String) { self.text = text } - /** - * Gets the raw text of this chunk. - * - * @return The text of the chunk. - */ + /// + /// Gets the raw text of this chunk. + /// + /// - Returns: The text of the chunk. + /// public final func getText() -> String { return text } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.TextChunk} returns the result of - * {@link #getText()} in single quotes.

      - */ + /// + /// + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.TextChunk_ returns the result of + /// _#getText()_ in single quotes. + /// public var description: String { diff --git a/runtime/Swift/Sources/Antlr4/tree/pattern/TokenTagToken.swift b/runtime/Swift/Sources/Antlr4/tree/pattern/TokenTagToken.swift index 490b8e386..2b2dee261 100644 --- a/runtime/Swift/Sources/Antlr4/tree/pattern/TokenTagToken.swift +++ b/runtime/Swift/Sources/Antlr4/tree/pattern/TokenTagToken.swift @@ -5,44 +5,44 @@ -/** - * A {@link org.antlr.v4.runtime.Token} object representing a token of a particular type; e.g., - * {@code }. These tokens are created for {@link org.antlr.v4.runtime.tree.pattern.TagChunk} chunks where the - * tag corresponds to a lexer rule or token type. - */ +/// +/// A _org.antlr.v4.runtime.Token_ object representing a token of a particular type; e.g., +/// ``. These tokens are created for _org.antlr.v4.runtime.tree.pattern.TagChunk_ chunks where the +/// tag corresponds to a lexer rule or token type. +/// public class TokenTagToken: CommonToken { - /** - * This is the backing field for {@link #getTokenName}. - */ + /// + /// This is the backing field for _#getTokenName_. + /// private let tokenName: String - /** - * This is the backing field for {@link #getLabel}. - */ + /// + /// This is the backing field for _#getLabel_. + /// private let label: String? - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken} for an unlabeled tag - * with the specified token name and type. - * - * @param tokenName The token name. - * @param type The token type. - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.TokenTagToken_ for an unlabeled tag + /// with the specified token name and type. + /// + /// - Parameter tokenName: The token name. + /// - Parameter type: The token type. + /// public convenience init(_ tokenName: String, _ type: Int) { self.init(tokenName, type, nil) } - /** - * Constructs a new instance of {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken} with the specified - * token name, type, and label. - * - * @param tokenName The token name. - * @param type The token type. - * @param label The label associated with the token tag, or {@code null} if - * the token tag is unlabeled. - */ + /// + /// Constructs a new instance of _org.antlr.v4.runtime.tree.pattern.TokenTagToken_ with the specified + /// token name, type, and label. + /// + /// - Parameter tokenName: The token name. + /// - Parameter type: The token type. + /// - Parameter label: The label associated with the token tag, or `null` if + /// the token tag is unlabeled. + /// public init(_ tokenName: String, _ type: Int, _ label: String?) { self.tokenName = tokenName @@ -50,32 +50,32 @@ public class TokenTagToken: CommonToken { super.init(type) } - /** - * Gets the token name. - * @return The token name. - */ + /// + /// Gets the token name. + /// - Returns: The token name. + /// public final func getTokenName() -> String { return tokenName } - /** - * Gets the label associated with the rule tag. - * - * @return The name of the label associated with the rule tag, or - * {@code null} if this is an unlabeled rule tag. - */ + /// + /// Gets the label associated with the rule tag. + /// + /// - Returns: The name of the label associated with the rule tag, or + /// `null` if this is an unlabeled rule tag. + /// public final func getLabel() -> String? { return label } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken} returns the token tag - * formatted with {@code <} and {@code >} delimiters.

      - */ + /// + /// + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.TokenTagToken_ returns the token tag + /// formatted with `<` and `>` delimiters. + /// override public func getText() -> String { if label != nil { @@ -85,12 +85,12 @@ public class TokenTagToken: CommonToken { return "<" + tokenName + ">" } - /** - * {@inheritDoc} - * - *

      The implementation for {@link org.antlr.v4.runtime.tree.pattern.TokenTagToken} returns a string of the form - * {@code tokenName:type}.

      - */ + /// + /// + /// + /// The implementation for _org.antlr.v4.runtime.tree.pattern.TokenTagToken_ returns a string of the form + /// `tokenName:type`. + /// override public var description: String { diff --git a/runtime/Swift/Tests/Antlr4Tests/MurmurHashTests.swift b/runtime/Swift/Tests/Antlr4Tests/MurmurHashTests.swift new file mode 100644 index 000000000..dad5853de --- /dev/null +++ b/runtime/Swift/Tests/Antlr4Tests/MurmurHashTests.swift @@ -0,0 +1,47 @@ +/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/// Use of this file is governed by the BSD 3-clause license that +/// can be found in the LICENSE.txt file in the project root. + +/// The test patterns below are by Ian Boyd and have been released into the +/// public domain. +/// https://stackoverflow.com/questions/14747343/murmurhash3-test-vectors + +import XCTest +import Antlr4 + +class MurmurHashTests: XCTestCase { + + func testMurmurHash() { + doMurmurHashTest("", 0, 0) //empty string with zero seed should give zero + doMurmurHashTest("", 1, 0x514E28B7) + doMurmurHashTest("", 0xffffffff, 0x81F16F39) //make sure seed value is handled unsigned + doMurmurHashTest("\0\0\0\0", 0, 0x2362F9DE) //make sure we handle embedded nulls + + doMurmurHashTest("aaaa", 0x9747b28c, 0x5A97808A) //one full chunk + doMurmurHashTest("aaa", 0x9747b28c, 0x283E0130) //three characters + doMurmurHashTest("aa", 0x9747b28c, 0x5D211726) //two characters + doMurmurHashTest("a", 0x9747b28c, 0x7FA09EA6) //one character + + //Endian order within the chunks + doMurmurHashTest("abcd", 0x9747b28c, 0xF0478627) //one full chunk + doMurmurHashTest("abc", 0x9747b28c, 0xC84A62DD) + doMurmurHashTest("ab", 0x9747b28c, 0x74875592) + doMurmurHashTest("a", 0x9747b28c, 0x7FA09EA6) + + doMurmurHashTest("Hello, world!", 0x9747b28c, 0x24884CBA) + + //Make sure you handle UTF-8 high characters. A bcrypt implementation messed this up + doMurmurHashTest("ππππππππ", 0x9747b28c, 0xD58063C1) //U+03C0: Greek Small Letter Pi + + //String of 256 characters. + doMurmurHashTest(String(repeating: "a", count: 256), 0x9747b28c, 0x37405BDC) + + doMurmurHashTest("abc", 0, 0xB3DD93FA) + doMurmurHashTest("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 0, 0xEE925B90) + doMurmurHashTest("The quick brown fox jumps over the lazy dog", 0x9747b28c, 0x2FA826CD) + } +} + +private func doMurmurHashTest(_ input: String, _ seed: UInt32, _ expected: UInt32) { + XCTAssertEqual(MurmurHash.hashString(input, seed), expected) +} diff --git a/runtime/Swift/Tests/Antlr4Tests/VisitorTests.swift b/runtime/Swift/Tests/Antlr4Tests/VisitorTests.swift index e22488797..d7adb1cb6 100644 --- a/runtime/Swift/Tests/Antlr4Tests/VisitorTests.swift +++ b/runtime/Swift/Tests/Antlr4Tests/VisitorTests.swift @@ -62,10 +62,10 @@ class VisitorTests: XCTestCase { var errors = [String]() - override func syntaxError(_ recognizer: Recognizer, - _ offendingSymbol: AnyObject?, - _ line: Int, _ charPositionInLine: Int, - _ msg: String, _ e: AnyObject?) { + override func syntaxError(_ recognizer: Recognizer, + _ offendingSymbol: AnyObject?, + _ line: Int, _ charPositionInLine: Int, + _ msg: String, _ e: AnyObject?) { errors.append("line \(line):\(charPositionInLine) \(msg)") } } diff --git a/runtime/Swift/boot.py b/runtime/Swift/boot.py index b362224de..ff82c9e00 100755 --- a/runtime/Swift/boot.py +++ b/runtime/Swift/boot.py @@ -110,7 +110,7 @@ def get_argument_parser(): "Use this command if you want to include ANTLR4 as SPM dependency.", ) p.add_argument("--gen-xcodeproj", action="store_true", - help=" Generates an Xcode project for ANTLR4 Swift runtime. " + help=" Generates an Xcode project for ANTLR4 Swift runtime. " "This directive will generate all the required parsers for the project. " "Feel free to re-run whenever you updated the test grammar files.") p.add_argument("--test", @@ -147,7 +147,11 @@ def generate_spm_module(in_folder=TMP_FOLDER): call(["git", "tag", "{}.0.0".format(MAJOR_VERSION)]) antlr_says("Created local repository.") - antlr_says("Put .Package(url: \"{}\", majorVersion: {}) in Package.swift.".format(os.getcwd(), MAJOR_VERSION)) + antlr_says("(swift-tools-version:3.0) " + "Put .Package(url: \"{}\", majorVersion: {}) in Package.swift.".format(os.getcwd(), MAJOR_VERSION)) + antlr_says("(swift-tools-wersion:4.0) " + "Put .package(url: \"{}\", from: \"{}.0.0\") in Package.swift " + "and add \"Antlr4\" to target dependencies. ".format(os.getcwd(), MAJOR_VERSION)) def generate_xcodeproj(): diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java index 3219c0bad..b19d6c865 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java @@ -8,6 +8,7 @@ package org.antlr.v4.test.tool; import org.antlr.v4.Tool; import org.antlr.v4.tool.ErrorType; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -60,6 +61,16 @@ public class TestToolSyntaxErrors extends BaseJavaToolTest { super.testSetUp(); } + @Test + public void AllErrorCodesDistinct() { + ErrorType[] errorTypes = ErrorType.class.getEnumConstants(); + for (int i = 0; i < errorTypes.length; i++) { + for (int j = i + 1; j < errorTypes.length; j++) { + Assert.assertNotEquals(errorTypes[i].code, errorTypes[j].code); + } + } + } + @Test public void testA() { super.testErrors(A, true); } @Test public void testExtraColon() { diff --git a/tool/pom.xml b/tool/pom.xml index 1952a25ec..cb172c534 100644 --- a/tool/pom.xml +++ b/tool/pom.xml @@ -58,6 +58,7 @@ org.apache.maven.plugins maven-source-plugin + 3.0.1 @@ -86,6 +87,7 @@ org.codehaus.mojo build-helper-maven-plugin + 3.0.0 generate-sources diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg index b1835e4e7..81d00484e 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg @@ -1,5 +1,5 @@ fileHeader(grammarFileName, ANTLRVersion) ::= << -// Generated from by ANTLR . +// Code generated from by ANTLR . DO NOT EDIT. >> ParserFile(file, parser, namedActions, contextSuperClass) ::= << @@ -777,29 +777,31 @@ MatchSet(m, expr, capture) ::= "" MatchNotSet(m, expr, capture) ::= "" CommonSetStuff(m, expr, capture, invert) ::= << -p.SetState() - - -var _lt = p.GetTokenStream().LT(1) - - = _lt}; separator="\n"> - - - - - - -if \<= 0 || if !() { +{ + p.SetState() - var _ri = p.GetErrorHandler().RecoverInline(p) - = _ri}; separator="\n"> - - p.GetErrorHandler().RecoverInline(p) + var _lt = p.GetTokenStream().LT(1) + + = _lt}; separator="\n"> + -} else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() + + + + + if \<= 0 || if !() { + + var _ri = p.GetErrorHandler().RecoverInline(p) + + = _ri}; separator="\n"> + + p.GetErrorHandler().RecoverInline(p) + + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } } >> diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg index 0ebd212ec..492c56644 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Java/Java.stg @@ -894,8 +894,10 @@ public class extends { protected static final DFA[] _decisionToDFA; protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); + public static final int =}; separator=", ", wrap, anchor>; + public static final int =}; separator=", ", wrap, anchor>; diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg index fb5883c02..728db6307 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/JavaScript/JavaScript.stg @@ -36,12 +36,12 @@ * REQUIRED. */ -pythonTypeInitMap ::= [ - "bool":"False", +javascriptTypeInitMap ::= [ + "bool":"false", "int":"0", "float":"0.0", "str":"", - default:"None" // anything other than a primitive type is an object + default:"{}" // anything other than a primitive type is an object ] // args must be , @@ -802,6 +802,9 @@ var antlr4 = require('antlr4/index'); >> Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= << + +var = require('./').; + @@ -860,7 +863,7 @@ var serializedATN = [" "}>"].join(""); * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".js" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg index b01a76fc4..570f1659f 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Python2/Python2.stg @@ -809,7 +809,7 @@ def serializedATN(): * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".py" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg index 081e3f3f1..34e525b85 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Python3/Python3.stg @@ -816,7 +816,7 @@ def serializedATN(): * must be an object, default value is "null". */ initValue(typeName) ::= << - + >> codeFileExtension() ::= ".py" diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Swift/Swift.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Swift/Swift.stg index 2367a441f..2cae8daf2 100755 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Swift/Swift.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Swift/Swift.stg @@ -241,12 +241,14 @@ open class : { } return decisionToDFA }() - internal static let _sharedContextCache: PredictionContextCache = PredictionContextCache() + internal static let _sharedContextCache = PredictionContextCache() + public enum Tokens: Int { case EOF = -1, = }; separator=", ", wrap, anchor> } + public static let = }; separator=", ", wrap, anchor> public static let ruleNames: [String] = [ "}; separator=", ", wrap, anchor> @@ -285,8 +287,8 @@ case : - public static let _serializedATN : String = ATN().jsonString - public static let _ATN: ATN = ATNDeserializer().deserializeFromJson(_serializedATN) + public static let _serializedATN = ATN().jsonString + public static let _ATN = ATNDeserializer().deserializeFromJson(_serializedATN) } >> @@ -297,33 +299,7 @@ private static let _LITERAL_NAMES: [String?] = [ private static let _SYMBOLIC_NAMES: [String?] = [ }; null="nil", separator=", ", wrap, anchor> ] -public static let VOCABULARY: Vocabulary = Vocabulary(_LITERAL_NAMES, _SYMBOLIC_NAMES) - -/** - * @deprecated Use {@link #VOCABULARY} instead. - */ -//@Deprecated -public let tokenNames: [String?]? = { - let length = _SYMBOLIC_NAMES.count - var tokenNames = [String?](repeating: nil, count: length) - for i in 0..\ -open func getTokenNames() -> [String?]? { - return tokenNames -} +public static let VOCABULARY = Vocabulary(_LITERAL_NAMES, _SYMBOLIC_NAMES) >> dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= << @@ -361,11 +337,12 @@ open override func getVocabulary() -> Vocabulary { return .VOCABULARY } -public override init(_ input:TokenStream)throws { +public override init(_ input:TokenStream) throws { RuntimeMetaData.checkVersion("4.7", RuntimeMetaData.VERSION) try super.init(input) _interp = ParserATNSimulator(self,._ATN,._decisionToDFA, ._sharedContextCache) } + >> /* This generates a private method since the actionIndex is generated, making an @@ -612,7 +589,7 @@ case +1: Sync(s) ::= "sync();" -ThrowNoViableAlt(t) ::= "throw try ANTLRException.recognition(e: NoViableAltException(self))" +ThrowNoViableAlt(t) ::= "throw ANTLRException.recognition(e: NoViableAltException(self))" TestSetInline(s) ::= << }; separator=" || ">!> @@ -717,7 +694,7 @@ ArgAction(a, chunks) ::= "" SemPred(p, chunks, failChunks) ::= << setState() if (!()) { - throw try ANTLRException.recognition(e:FailedPredicateException(self, , , )) + throw ANTLRException.recognition(e:FailedPredicateException(self, , , )) } >> @@ -796,7 +773,7 @@ open func (_ i:Int) -> TerminalNode?{ >> ContextRuleGetterDecl(r) ::= << open func () -> ? { - return getRuleContext(.self,0) + return getRuleContext(.self, 0) } >> ContextRuleListGetterDecl(r) ::= << @@ -806,7 +783,7 @@ open func () -> Array\<\> { >> ContextRuleListIndexedGetterDecl(r) ::= << open func (_ i: Int) -> ? { - return getRuleContext(.self,i) + return getRuleContext(.self, i) } >> @@ -827,7 +804,7 @@ CaptureNextTokenType(d) ::= " = try _input.LA(1)" StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers, superClass={ParserRuleContext}) ::= << -open class :ParserRuleContext, { +open class : ParserRuleContext, { }; separator="\n"> }; separator="\n"> public init(_ parent: ParserRuleContext,_ invokingState: Int) { super.init(parent, invokingState) } !> @@ -865,8 +842,8 @@ public final class : Context { ListenerDispatchMethod(method) ::= << override open func enterexitRule(_ listener: ParseTreeListener) { - if listener is Listener { - (listener as! Listener).enterexit(self) + if let listener = listener as? Listener { + listener.enterexit(self) } } >> @@ -874,11 +851,12 @@ open func enterexitRule(_ listener: ParseTreeLi VisitorDispatchMethod(method) ::= << override open func accept\(_ visitor: ParseTreeVisitor\) -> T? { - if visitor is Visitor { - return (visitor as! Visitor\).visit(self) - }else if visitor is BaseVisitor { - return (visitor as! BaseVisitor\).visit(self) - } + if let visitor = visitor as? Visitor { + return visitor.visit(self) + } + else if let visitor = visitor as? BaseVisitor { + return visitor.visit(self) + } else { return visitor.visitChildren(self) } @@ -959,7 +937,7 @@ open class : { return decisionToDFA }() - internal static let _sharedContextCache:PredictionContextCache = PredictionContextCache() + internal static let _sharedContextCache = PredictionContextCache() public static let =}; separator=", ", wrap, anchor> public static let =}; separator=", ", wrap, anchor> diff --git a/tool/src/org/antlr/v4/gui/TreeViewer.java b/tool/src/org/antlr/v4/gui/TreeViewer.java index f20f3d8ea..2c000aa90 100644 --- a/tool/src/org/antlr/v4/gui/TreeViewer.java +++ b/tool/src/org/antlr/v4/gui/TreeViewer.java @@ -37,7 +37,10 @@ import java.awt.geom.CubicCurve2D; import java.awt.geom.Rectangle2D; import java.awt.image.BufferedImage; import java.io.File; +import java.io.FileWriter; +import java.io.BufferedWriter; import java.io.IOException; +import java.io.Writer; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -245,6 +248,69 @@ public class TreeViewer extends JComponent { } } + protected void generateEdges(Writer writer, Tree parent) throws IOException { + if (!getTree().isLeaf(parent)) { + Rectangle2D.Double b1 = getBoundsOfNode(parent); + double x1 = b1.getCenterX(); + double y1 = b1.getCenterY(); + + for (Tree child : getTree().getChildren(parent)) { + Rectangle2D.Double childBounds = getBoundsOfNode(child); + double x2 = childBounds.getCenterX(); + double y2 = childBounds.getMinY(); + writer.write(line(""+x1, ""+y1, ""+x2, ""+y2, + "stroke:black; stroke-width:1px;")); + generateEdges(writer, child); + } + } + } + + protected void generateBox(Writer writer, Tree parent) throws IOException { + + // draw the box in the background + Rectangle2D.Double box = getBoundsOfNode(parent); + writer.write(rect(""+box.x, ""+box.y, ""+box.width, ""+box.height, + "fill:orange; stroke:rgb(0,0,0);", "rx=\"1\"")); + + // draw the text on top of the box (possibly multiple lines) + String line = getText(parent).replace("<","<").replace(">",">"); + int fontSize = 10; + int x = (int) box.x + 2; + int y = (int) box.y + fontSize - 1; + String style = String.format("font-family:sans-serif;font-size:%dpx;", + fontSize); + writer.write(text(""+x, ""+y, style, line)); + } + + private static String line(String x1, String y1, String x2, String y2, + String style) { + return String + .format("\n", + x1, y1, x2, y2, style); + } + + private static String rect(String x, String y, String width, String height, + String style, String extraAttributes) { + return String + .format("\n", + x, y, width, height, style, extraAttributes); + } + + private static String text(String x, String y, String style, String text) { + return String.format( + "\n%s\n\n", x, y, + style, text); + } + + private void paintSVG(Writer writer) throws IOException { + + generateEdges(writer, getTree().getRoot()); + + for (Tree tree : treeLayout.getNodeBounds().keySet()) { + generateBox(writer, tree); + } + } + @Override protected Graphics getComponentGraphics(Graphics g) { Graphics2D g2d=(Graphics2D)g; @@ -307,6 +373,18 @@ public class TreeViewer extends JComponent { ); wrapper.add(png); + // Add an export-to-png button right of the "OK" button + JButton svg = new JButton("Export as SVG"); + svg.addActionListener( + new ActionListener() { + @Override + public void actionPerformed(ActionEvent e) { + generateSVGFile(viewer, dialog); + } + } + ); + wrapper.add(svg); + bottomPanel.add(wrapper, BorderLayout.SOUTH); // Add scale slider @@ -418,29 +496,7 @@ public class TreeViewer extends JComponent { g.dispose(); try { - File suggestedFile = generateNonExistingPngFile(); - JFileChooser fileChooser = new JFileChooserConfirmOverwrite(); - fileChooser.setCurrentDirectory(suggestedFile.getParentFile()); - fileChooser.setSelectedFile(suggestedFile); - FileFilter pngFilter = new FileFilter() { - - @Override - public boolean accept(File pathname) { - if (pathname.isFile()) { - return pathname.getName().toLowerCase().endsWith(".png"); - } - - return true; - } - - @Override - public String getDescription() { - return "PNG Files (*.png)"; - } - }; - - fileChooser.addChoosableFileFilter(pngFilter); - fileChooser.setFileFilter(pngFilter); + JFileChooser fileChooser = getFileChooser(".png", "PNG files"); int returnValue = fileChooser.showSaveDialog(dialog); if (returnValue == JFileChooser.APPROVE_OPTION) { @@ -469,23 +525,85 @@ public class TreeViewer extends JComponent { } } - private static File generateNonExistingPngFile() { + private static JFileChooser getFileChooser(final String fileEnding, + final String description) { + File suggestedFile = generateNonExistingFile(fileEnding); + JFileChooser fileChooser = new JFileChooserConfirmOverwrite(); + fileChooser.setCurrentDirectory(suggestedFile.getParentFile()); + fileChooser.setSelectedFile(suggestedFile); + FileFilter filter = new FileFilter() { + + @Override + public boolean accept(File pathname) { + if (pathname.isFile()) { + return pathname.getName().toLowerCase().endsWith(fileEnding); + } + + return true; + } + + @Override + public String getDescription() { + return description+" (*"+fileEnding+")"; + } + }; + fileChooser.addChoosableFileFilter(filter); + fileChooser.setFileFilter(filter); + return fileChooser; + } + + private static void generateSVGFile(TreeViewer viewer, JFrame dialog) { + + try { + JFileChooser fileChooser = getFileChooser(".svg", "SVG files"); + + int returnValue = fileChooser.showSaveDialog(dialog); + if (returnValue == JFileChooser.APPROVE_OPTION) { + File svgFile = fileChooser.getSelectedFile(); + // save the new svg file here! + BufferedWriter writer = new BufferedWriter(new FileWriter(svgFile)); + // HACK: multiplying with 1.1 should be replaced wit an accurate number + writer.write(""); + viewer.paintSVG(writer); + writer.write(""); + writer.flush(); + writer.close(); + try { + // Try to open the parent folder using the OS' native file manager. + Desktop.getDesktop().open(svgFile.getParentFile()); + } catch (Exception ex) { + // We could not launch the file manager: just show a popup that we + // succeeded in saving the PNG file. + JOptionPane.showMessageDialog(dialog, "Saved SVG to: " + + svgFile.getAbsolutePath()); + ex.printStackTrace(); + } + } + } catch (Exception ex) { + JOptionPane.showMessageDialog(dialog, + "Could not export to SVG: " + ex.getMessage(), + "Error", + JOptionPane.ERROR_MESSAGE); + ex.printStackTrace(); + } + } + + private static File generateNonExistingFile(String extension) { final String parent = "."; final String name = "antlr4_parse_tree"; - final String extension = ".png"; - File pngFile = new File(parent, name + extension); + File file = new File(parent, name + extension); int counter = 1; // Keep looping until we create a File that does not yet exist. - while (pngFile.exists()) { - pngFile = new File(parent, name + "_"+ counter + extension); + while (file.exists()) { + file = new File(parent, name + "_" + counter + extension); counter++; } - return pngFile; + return file; } private static void fillTree(TreeNodeWrapper node, Tree tree, TreeViewer viewer) { diff --git a/tool/src/org/antlr/v4/tool/ErrorType.java b/tool/src/org/antlr/v4/tool/ErrorType.java index 700269fb9..d7ad7ab40 100644 --- a/tool/src/org/antlr/v4/tool/ErrorType.java +++ b/tool/src/org/antlr/v4/tool/ErrorType.java @@ -394,11 +394,11 @@ public enum ErrorType { */ IMPORT_NAME_CLASH(113, " grammar and imported grammar both generate ", ErrorSeverity.ERROR), /** - * Compiler Error 160. + * Compiler Error 114. * *

      cannot find tokens file filename

      */ - CANNOT_FIND_TOKENS_FILE_REFD_IN_GRAMMAR(160, "cannot find tokens file ", ErrorSeverity.ERROR), + CANNOT_FIND_TOKENS_FILE_REFD_IN_GRAMMAR(114, "cannot find tokens file ", ErrorSeverity.ERROR), /** * Compiler Warning 118. * @@ -522,7 +522,7 @@ public enum ErrorType { */ USE_OF_BAD_WORD(134, "symbol conflicts with generated code in target language or runtime", ErrorSeverity.ERROR), /** - * Compiler Error 134. + * Compiler Error 183. * *

      rule reference rule is not currently supported in a set

      * @@ -530,7 +530,7 @@ public enum ErrorType { * Note: This error has the same number as the unrelated error * {@link #USE_OF_BAD_WORD}.

      */ - UNSUPPORTED_REFERENCE_IN_LEXER_SET(134, "rule reference is not currently supported in a set", ErrorSeverity.ERROR), + UNSUPPORTED_REFERENCE_IN_LEXER_SET(183, "rule reference is not currently supported in a set", ErrorSeverity.ERROR), /** * Compiler Error 135. *