diff --git a/.editorconfig b/.editorconfig
index 53b65e9f3..daa6da0fb 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -1,5 +1,8 @@
root = true
+[*]
+tab_width = 4
+
[*.{java,stg}]
charset = utf-8
insert_final_newline = true
diff --git a/.travis.yml b/.travis.yml
index d27ee56b3..d9969b6eb 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,30 +2,26 @@ sudo: true
language: java
+cache:
+ directories:
+ - $HOME/.m2
+ - $HOME/Library/Caches/Homebrew
+
+stages:
+ - smoke-test
+ - main-test
+ - extended-test
+
matrix:
include:
- os: linux
compiler: clang
- jdk: oraclejdk7
+ jdk: openjdk7
env:
- TARGET=cpp
- CXX=g++-5
- - GROUP=ALL
- addons:
- apt:
- sources:
- - ubuntu-toolchain-r-test
- - llvm-toolchain-precise-3.7
- packages:
- - g++-5
- - uuid-dev
- - clang-3.7
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=cpp
- GROUP=LEXER
+ stage: main-test
addons:
apt:
sources:
@@ -35,106 +31,150 @@ matrix:
- g++-5
- uuid-dev
- clang-3.7
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=cpp
- - GROUP=PARSER
- addons:
- apt:
- sources:
- - ubuntu-toolchain-r-test
- - llvm-toolchain-precise-3.7
- packages:
- - g++-5
- - uuid-dev
- - clang-3.7
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=cpp
- - GROUP=RECURSION
- addons:
- apt:
- sources:
- - ubuntu-toolchain-r-test
- - llvm-toolchain-precise-3.7
- packages:
- - g++-5
- - uuid-dev
- - clang-3.7
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=swift
- - GROUP=LEXER
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=swift
- - GROUP=PARSER
- - os: osx
- compiler: clang
- osx_image: xcode8.1
- env:
- - TARGET=swift
- - GROUP=RECURSION
- os: linux
+ compiler: clang
+ jdk: openjdk7
+ env:
+ - TARGET=cpp
+ - CXX=g++-5
+ - GROUP=PARSER
+ stage: main-test
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+ packages:
+ - g++-5
+ - uuid-dev
+ - clang-3.7
+ - os: linux
+ compiler: clang
+ jdk: openjdk7
+ env:
+ - TARGET=cpp
+ - CXX=g++-5
+ - GROUP=RECURSION
+ stage: main-test
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+ packages:
+ - g++-5
+ - uuid-dev
+ - clang-3.7
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=cpp
+ - GROUP=LEXER
+ stage: extended-test
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=cpp
+ - GROUP=PARSER
+ stage: extended-test
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=cpp
+ - GROUP=RECURSION
+ stage: extended-test
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=swift
+ - GROUP=LEXER
+ stage: main-test
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=swift
+ - GROUP=PARSER
+ stage: main-test
+ - os: osx
+ compiler: clang
+ osx_image: xcode9
+ env:
+ - TARGET=swift
+ - GROUP=RECURSION
+ stage: main-test
+ - os: linux
+ dist: trusty
compiler: clang
env:
- TARGET=swift
- GROUP=ALL
+ stage: extended-test
- os: osx
- osx_image: xcode8.2
+ osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=LEXER
+ stage: extended-test
- os: osx
- osx_image: xcode8.2
+ osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=PARSER
+ stage: extended-test
- os: osx
- osx_image: xcode8.2
+ osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=RECURSION
+ stage: extended-test
- os: linux
- jdk: oraclejdk7
+ jdk: openjdk7
env: TARGET=java
+ stage: extended-test
+ - os: linux
+ jdk: openjdk8
+ env: TARGET=java
+ stage: extended-test
- os: linux
jdk: oraclejdk8
env: TARGET=java
+ stage: smoke-test
- os: linux
- jdk: oraclejdk7
+ jdk: openjdk7
env: TARGET=csharp
+ stage: extended-test
- os: linux
jdk: oraclejdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=LEXER
+ stage: main-test
- os: linux
- jdk: oraclejdk8
+ jdk: openjdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=PARSER
+ stage: main-test
- os: linux
jdk: oraclejdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=RECURSION
+ stage: main-test
- os: linux
- jdk: oraclejdk7
+ jdk: openjdk7
env: TARGET=python2
+ stage: extended-test
- os: linux
- jdk: oraclejdk7
+ jdk: openjdk7
env: TARGET=python3
addons:
apt:
@@ -142,16 +182,20 @@ matrix:
- deadsnakes # source required so it finds the package definition below
packages:
- python3.5
+ stage: main-test
- os: linux
- jdk: oraclejdk7
+ dist: trusty
+ jdk: openjdk8
env: TARGET=javascript
+ stage: main-test
- os: linux
- jdk: oraclejdk7
+ dist: trusty
+ jdk: openjdk8
env: TARGET=go
+ stage: main-test
before_install:
- - ./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh
+ - f="./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh"; ! [ -x "$f" ] || "$f"
script:
- - cd runtime-testsuite; ../.travis/run-tests-$TARGET.sh
-
+ - cd runtime-testsuite; travis_wait 40 ../.travis/run-tests-$TARGET.sh
diff --git a/.travis/before-install-linux-swift.sh b/.travis/before-install-linux-swift.sh
index 607f04449..1a2b2a555 100755
--- a/.travis/before-install-linux-swift.sh
+++ b/.travis/before-install-linux-swift.sh
@@ -1,14 +1,12 @@
set -euo pipefail
-# make sure we use trusty repositories (travis by default uses precise)
-curl https://repogen.simplylinux.ch/txt/trusty/sources_c4aa56bd26c0f54f391d8fae3e687ef5f6e97c26.txt | sudo tee /etc/apt/sources.list
-
# install dependencies
# some packages below will be update, swift assumes newer versions
# of, for example, sqlite3 and libicu, without the update some
# tools will not work
sudo apt-get update
-sudo apt-get install clang libicu-dev libxml2 sqlite3
+sudo apt-get install clang-3.6 libxml2
+sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 100
# This would fix a know linker issue mentioned in:
# https://bugs.swift.org/browse/SR-2299
diff --git a/.travis/before-install-osx-cpp.sh b/.travis/before-install-osx-cpp.sh
deleted file mode 100755
index 48152d221..000000000
--- a/.travis/before-install-osx-cpp.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-thisdir=$(dirname "$0")
-
-brew update
-brew install cmake
-
-# Work around apparent rvm bug that is in Travis's Xcode image.
-# https://github.com/direnv/direnv/issues/210
-# https://github.com/travis-ci/travis-ci/issues/6307
-shell_session_update() { :; }
diff --git a/.travis/before-install-osx-dotnet.sh b/.travis/before-install-osx-dotnet.sh
index 428016fa6..c784ba091 100755
--- a/.travis/before-install-osx-dotnet.sh
+++ b/.travis/before-install-osx-dotnet.sh
@@ -4,9 +4,7 @@ set -euo pipefail
thisdir=$(dirname "$0")
-# pre-requisites for dotnet core
-brew update
-brew install openssl
+# OpenSSL setup for dotnet core
mkdir -p /usr/local/lib
ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/
ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/
@@ -19,9 +17,3 @@ sudo installer -pkg /tmp/dotnet-dev-osx-x64.1.0.4.pkg -target /
# make the link
ln -s /usr/local/share/dotnet/dotnet /usr/local/bin/
-
-# Work around apparent rvm bug that is in Travis's Xcode image.
-# https://github.com/direnv/direnv/issues/210
-# https://github.com/travis-ci/travis-ci/issues/6307
-shell_session_update() { :; }
-
diff --git a/.travis/before-install-osx-swift.sh b/.travis/before-install-osx-swift.sh
deleted file mode 100755
index 145a505c6..000000000
--- a/.travis/before-install-osx-swift.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-thisdir=$(dirname "$0")
-
-brew update
-
-# Work around apparent rvm bug that is in Travis's Xcode image.
-# https://github.com/direnv/direnv/issues/210
-# https://github.com/travis-ci/travis-ci/issues/6307
-shell_session_update() { :; }
diff --git a/.travis/run-tests-swift.sh b/.travis/run-tests-swift.sh
index 56d2cec65..8c63070aa 100755
--- a/.travis/run-tests-swift.sh
+++ b/.travis/run-tests-swift.sh
@@ -4,7 +4,7 @@
# here since environment variables doesn't pass
# across scripts
if [ $TRAVIS_OS_NAME == "linux" ]; then
- export SWIFT_VERSION=swift-3.1.1
+ export SWIFT_VERSION=swift-4.0
export SWIFT_HOME=$(pwd)/swift/$SWIFT_VERSION-RELEASE-ubuntu14.04/usr/bin/
export PATH=$SWIFT_HOME:$PATH
diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java
index dcdc0a29d..c0926fe6c 100644
--- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java
+++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/Antlr4Mojo.java
@@ -395,7 +395,7 @@ public class Antlr4Mojo extends AbstractMojo {
String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens";
File outputFile = new File(outputDirectory, tokensFileName);
if ( (! outputFile.exists()) ||
- outputFile.lastModified() < grammarFile.lastModified() ||
+ outputFile.lastModified() <= grammarFile.lastModified() ||
dependencies.isDependencyChanged(grammarFile)) {
grammarFilesToProcess.add(grammarFile);
}
@@ -412,10 +412,7 @@ public class Antlr4Mojo extends AbstractMojo {
// Iterate each grammar file we were given and add it into the tool's list of
// grammars to process.
for (File grammarFile : grammarFiles) {
- if (!buildContext.hasDelta(grammarFile)) {
- continue;
- }
-
+ buildContext.refresh(grammarFile);
buildContext.removeMessages(grammarFile);
getLog().debug("Grammar file '" + grammarFile.getPath() + "' detected.");
diff --git a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java
index 2e9e2472c..d21d1ab7f 100644
--- a/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java
+++ b/antlr4-maven-plugin/src/main/java/org/antlr/mojo/antlr4/GrammarDependencies.java
@@ -216,14 +216,14 @@ class GrammarDependencies {
return;
for (GrammarAST importDecl : grammar.getAllChildrenWithType(ANTLRParser.IMPORT)) {
- Tree id = importDecl.getFirstChildWithType(ANTLRParser.ID);
+ for (Tree id: importDecl.getAllChildrenWithType(ANTLRParser.ID)) {
+ // missing id is not valid, but we don't want to prevent the root cause from
+ // being reported by the ANTLR tool
+ if (id != null) {
+ String grammarPath = getRelativePath(grammarFile);
- // missing id is not valid, but we don't want to prevent the root cause from
- // being reported by the ANTLR tool
- if (id != null) {
- String grammarPath = getRelativePath(grammarFile);
-
- graph.addEdge(id.getText() + ".g4", grammarPath);
+ graph.addEdge(id.getText() + ".g4", grammarPath);
+ }
}
}
diff --git a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java
index d90728922..da38c582a 100644
--- a/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java
+++ b/antlr4-maven-plugin/src/test/java/org/antlr/mojo/antlr4/Antlr4MojoTest.java
@@ -202,6 +202,7 @@ public class Antlr4MojoTest {
Path genHello = generatedSources.resolve("test/HelloParser.java");
Path baseGrammar = antlrDir.resolve("imports/TestBaseLexer.g4");
+ Path baseGrammar2 = antlrDir.resolve("imports/TestBaseLexer2.g4");
Path lexerGrammar = antlrDir.resolve("test/TestLexer.g4");
Path parserGrammar = antlrDir.resolve("test/TestParser.g4");
@@ -222,21 +223,20 @@ public class Antlr4MojoTest {
assertTrue(Files.exists(genHello));
assertTrue(Files.exists(genTestParser));
assertTrue(Files.exists(genTestLexer));
+ byte[] origTestLexerSum = checksum(genTestLexer);
+ byte[] origTestParserSum = checksum(genTestParser);
+ byte[] origHelloSum = checksum(genHello);
////////////////////////////////////////////////////////////////////////
// 2nd - nothing has been modified, no grammars have to be processed
////////////////////////////////////////////////////////////////////////
{
- byte[] testLexerSum = checksum(genTestLexer);
- byte[] testParserSum = checksum(genTestParser);
- byte[] helloSum = checksum(genHello);
-
maven.executeMojo(session, project, exec);
- assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer)));
- assertTrue(Arrays.equals(testParserSum, checksum(genTestParser)));
- assertTrue(Arrays.equals(helloSum, checksum(genHello)));
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
////////////////////////////////////////////////////////////////////////
@@ -245,50 +245,71 @@ public class Antlr4MojoTest {
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(baseGrammar, "DOT: '.' ;")) {
- byte[] testLexerSum = checksum(genTestLexer);
- byte[] testParserSum = checksum(genTestParser);
- byte[] helloSum = checksum(genHello);
-
maven.executeMojo(session, project, exec);
- assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer)));
- assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
- assertTrue(Arrays.equals(helloSum, checksum(genHello)));
+ assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
+ // Restore file and confirm it was restored.
+ maven.executeMojo(session, project, exec);
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
////////////////////////////////////////////////////////////////////////
- // 4th - the lexer grammar changed, the parser grammar has to be processed as well
+ // 4th - the second imported grammar changed, every dependency has to be processed
////////////////////////////////////////////////////////////////////////
// modify the grammar to make checksum comparison detect a change
- try(Change change = Change.of(lexerGrammar)) {
- byte[] testLexerSum = checksum(genTestLexer);
- byte[] testParserSum = checksum(genTestParser);
- byte[] helloSum = checksum(genHello);
-
+ try(Change change = Change.of(baseGrammar2, "BANG: '!' ;")) {
maven.executeMojo(session, project, exec);
- assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer)));
- assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
- assertTrue(Arrays.equals(helloSum, checksum(genHello)));
+ assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
+ // Restore file and confirm it was restored.
+ maven.executeMojo(session, project, exec);
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
////////////////////////////////////////////////////////////////////////
- // 5th - the parser grammar changed, no other grammars have to be processed
+ // 5th - the lexer grammar changed, the parser grammar has to be processed as well
+ ////////////////////////////////////////////////////////////////////////
+
+ // modify the grammar to make checksum comparison detect a change
+ try(Change change = Change.of(lexerGrammar, "FOO: 'foo' ;")) {
+ maven.executeMojo(session, project, exec);
+
+ assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
+ }
+ // Restore file and confirm it was restored.
+ maven.executeMojo(session, project, exec);
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
+
+ ////////////////////////////////////////////////////////////////////////
+ // 6th - the parser grammar changed, no other grammars have to be processed
////////////////////////////////////////////////////////////////////////
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(parserGrammar, " t : WS* ;")) {
- byte[] testLexerSum = checksum(genTestLexer);
- byte[] testParserSum = checksum(genTestParser);
- byte[] helloSum = checksum(genHello);
-
maven.executeMojo(session, project, exec);
- assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer)));
- assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
- assertTrue(Arrays.equals(helloSum, checksum(genHello)));
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
+ // Restore file and confirm it was restored.
+ maven.executeMojo(session, project, exec);
+ assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
+ assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
+ assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
@Test
diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4
index 5fcc6d353..6c3164de3 100644
--- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4
+++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer.g4
@@ -10,7 +10,4 @@ fragment
Whitespace : ' ' | '\n' | '\t' | '\r' ;
fragment
-Hexdigit : [a-fA-F0-9] ;
-
-fragment
-Digit : [0-9] ;
+Hexdigit : [a-fA-F0-9] ;
\ No newline at end of file
diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4
new file mode 100644
index 000000000..18aa0c4f3
--- /dev/null
+++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/imports/TestBaseLexer2.g4
@@ -0,0 +1,4 @@
+lexer grammar TestBaseLexer2;
+
+fragment
+Digit : [0-9] ;
diff --git a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4 b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4
index 668b76496..b9c07b3df 100644
--- a/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4
+++ b/antlr4-maven-plugin/src/test/projects/importsStandard/src/main/antlr4/test/TestLexer.g4
@@ -1,6 +1,6 @@
lexer grammar TestLexer;
-import TestBaseLexer;
+import TestBaseLexer, TestBaseLexer2;
WS : Whitespace+ -> skip;
-TEXT : ~[<&]+ ; // match any 16 bit char other than < and &
\ No newline at end of file
+TEXT : ~[<&]+ ; // match any 16 bit char other than < and &
diff --git a/appveyor.yml b/appveyor.yml
index 57184557b..bf850aac9 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,8 +1,8 @@
-version: '4.6-SNAPSHOT+AppVeyor.{build}'
-os: Windows Server 2012
+version: '4.7.1-SNAPSHOT+AppVeyor.{build}'
+build: off
build_script:
- - mvn -DskipTests install -q --batch-mode
+ - mvn -DskipTests install --batch-mode
+ - msbuild runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed
+ - msbuild ./runtime-testsuite/target/classes/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed
test_script:
- - mvn install -q -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode
-build:
- verbosity: minimal
+ - mvn install -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode
diff --git a/contributors.txt b/contributors.txt
index 21a3c0cdb..caf196135 100644
--- a/contributors.txt
+++ b/contributors.txt
@@ -151,4 +151,20 @@ YYYY/MM/DD, github id, Full name, email
2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net
2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com
2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com
+2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me
+2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
+2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in
+2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com
+2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com
+2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
+2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
+2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com
+2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
+2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com
+2017/08/29, Eddy Reyes, eddy@mindsight.io
+2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com
+2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com
+2017/10/06, bramp, Andrew Brampton, brampton@gmail.com
+2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com
2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com
+2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de
\ No newline at end of file
diff --git a/doc/getting-started.md b/doc/getting-started.md
index eaf2141fb..5c57119f7 100644
--- a/doc/getting-started.md
+++ b/doc/getting-started.md
@@ -6,7 +6,7 @@ Hi and welcome to the version 4 release of ANTLR! It's named after the fearless
ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library.
-The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.5.3-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3).
+The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.7-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3).
If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems.
@@ -16,19 +16,21 @@ If you are going to integrate ANTLR into your existing build system using mvn, a
1. Download
```
$ cd /usr/local/lib
-$ curl -O http://www.antlr.org/download/antlr-4.5.3-complete.jar
+$ curl -O http://www.antlr.org/download/antlr-4.7-complete.jar
```
Or just download in browser from website:
[http://www.antlr.org/download.html](http://www.antlr.org/download.html)
and put it somewhere rational like `/usr/local/lib`.
-2. Add `antlr-4.5.3-complete.jar` to your `CLASSPATH`:
+
+2. Add `antlr-4.7-complete.jar` to your `CLASSPATH`:
```
-$ export CLASSPATH=".:/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH"
+$ export CLASSPATH=".:/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH"
```
It's also a good idea to put this in your `.bash_profile` or whatever your startup script is.
+
3. Create aliases for the ANTLR Tool, and `TestRig`.
```
-$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
$ alias grun='java org.antlr.v4.gui.TestRig'
```
@@ -39,11 +41,11 @@ $ alias grun='java org.antlr.v4.gui.TestRig'
0. Install Java (version 1.6 or higher)
1. Download antlr-4.5.3-complete.jar (or whatever version) from [http://www.antlr.org/download/](http://www.antlr.org/download/)
Save to your directory for 3rd party Java libraries, say `C:\Javalib`
-2. Add `antlr-4.5-complete.jar` to CLASSPATH, either:
+2. Add `antlr-4.5.3-complete.jar` to CLASSPATH, either:
* Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable
* Temporarily, at command line:
```
-SET CLASSPATH=.;C:\Javalib\antlr-4.5.3-complete.jar;%CLASSPATH%
+SET CLASSPATH=.;C:\Javalib\antlr-4.7-complete.jar;%CLASSPATH%
```
3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands:
* Batch files (in directory in system PATH) antlr4.bat and grun.bat
@@ -65,7 +67,7 @@ Either launch org.antlr.v4.Tool directly:
```
$ java org.antlr.v4.Tool
-ANTLR Parser Generator Version 4.5.3
+ANTLR Parser Generator Version 4.7
-o ___ specify output directory where all output is generated
-lib ___ specify location of .tokens files
...
@@ -74,8 +76,8 @@ ANTLR Parser Generator Version 4.5.3
or use -jar option on java:
```
-$ java -jar /usr/local/lib/antlr-4.5.3-complete.jar
-ANTLR Parser Generator Version 4.5.3
+$ java -jar /usr/local/lib/antlr-4.7-complete.jar
+ANTLR Parser Generator Version 4.7
-o ___ specify output directory where all output is generated
-lib ___ specify location of .tokens files
...
diff --git a/doc/images/gen_spm_module.png b/doc/images/gen_spm_module.png
new file mode 100644
index 000000000..0798c37b6
Binary files /dev/null and b/doc/images/gen_spm_module.png differ
diff --git a/doc/swift-target.md b/doc/swift-target.md
index 69eb88e4d..4f4e6e7c1 100644
--- a/doc/swift-target.md
+++ b/doc/swift-target.md
@@ -1,9 +1,15 @@
# ANTLR4 Language Target, Runtime for Swift
+## Performance Note
+
+To use ANTLR4 Swift target in production environment, make sure to turn on compiler optimizations by following [these instructions](https://github.com/apple/swift-package-manager/blob/master/Documentation/Usage.md#build-configurations) if you use SwiftPM to build your project. If you are using Xcode to build your project, it's unlikely you will not use `release` build for production build.
+
+Conclusion is, you need to turn on `release` mode (which will have all the optimization pre configured for you) so the ANTLR4 Swift target can have reasonable parsing speed.
+
## Install ANTLR4
Make sure you have the ANTLR
-installed.[The getting started guide](getting-started.md) should get
+installed. [The getting started guide](getting-started.md) should get
you started.
## Create a Swift lexer or parser
@@ -18,82 +24,120 @@ For a full list of antlr4 tool options, please visit the
## Build your Swift project with ANTLR runtime
-The following instructions are assuming Xcode as the IDE:
+### Note
-* __Add parser/lexer to project__. Make sure the parsers/lexers
+We use __boot.py__ script located at the root of the Swift runtime folder
+`antlr4/runtime/Swift` to provide additional support for both Xcode-based
+projects and SPM-based projects. Below sections are organized for both of
+the flavors. If you want to quickly get started, try:
+
+```
+python boot.py --help
+```
+
+for information about this script.
+
+### Xcode Projects
+
+Note that even if you are otherwise using ANTLR from a binary distribution,
+you should compile the ANTLR Swift runtime from source, because the Swift
+language does not yet have a stable ABI.
+
+ANTLR uses Swift Package Manager to generate Xcode project files. Note that
+Swift Package Manager does not currently support iOS, watchOS, or tvOS, so
+if you wish to use those platforms, you will need to alter the project build
+settings manually as appropriate.
+
+#### Download source code for ANTLR
+
+```
+git clone https://github.com/antlr/antlr4
+```
+
+#### Generate Xcode project for ANTLR runtime
+
+The `boot.py` script includes a wrapper around `swift package
+generate-xcodeproj`. Use this to generate `Antlr4.xcodeproj` for the ANTLR
+Swift runtime. (using _swift package generate-xcodeproj_ is not recommended)
+since the project is dependent on some parser files generated by _boot.py_.
+
+```
+cd antlr4/runtime/Swift
+python boot.py --gen-xcodeproj
+```
+
+#### Import ANTLR Swift runtime into your project
+
+Open your own project in Xcode.
+
+Open Finder in the `runtime/Swift` directory:
+
+```
+# From antlr4/runtime/Swift
+open .
+```
+
+Drag `Antlr4.xcodeproj` into your project.
+
+After this is done, your Xcode project navigator will be something like the
+screenshot below. In this example, your own project is "Smalltalk", and you
+will be able to see `Antlr4.xcodeproj` shown as a contained project.
+
+
+
+#### Edit the build settings if necessary
+
+Swift Package Manager currently does not support iOS, watchOS, or tvOS. If
+you wish to build for those platforms, you will need to alter the project
+build settings manually.
+
+#### Add generated parser and lexer to project
+
+Make sure the parsers/lexers
generated in __step 2__ are added to the project. To do this, you can
drag the generated files from Finder to the Xcode IDE. Remember to
check __Copy items if needed__ to make sure the files are actually
moved into the project folder instead of symbolic links (see the
screenshot below). After moving you will be able to see your files in
-the project navigator. But when you open one of the files, you will
-see Xcode complaining the module "Antlr4" could not be found at the
-import statement. This is expected, since we still need the ANTLR
-Swift runtime for those missing symbols.
+the project navigator. Make sure that the Target Membership settings
+are correct for your project.
-* __Download ANTLR runtime__. Due to unstable ABI of Swift language,
-there will not be a single "library" for the Swift ANTLR runtime for
-now. To get Swift ANTLR runtime, clone the ANTLR repository. Open it
-in finder. From the root directory of the repo, go to runtime/Swift
-folder. You will see the Xcode project manifest file:
-__Antlr4.xcodeproj__.
+#### Add the ANTLR Swift runtime as a dependency
-* __Import ANTLR Swift runtime into project__. Drag Antlr4.xcodeproj
-into your project, after this is done, your Xcode project navigator
-will be something like the screenshot below. In this case, your own
-project is "Smalltalk", and you will be able to see the
-Antlr4.xcodeproj shown as a contained project. The error message will
-still be there, that's because we still need to tell Xcode how to find
-the runtime.
-
-
-
-* __Build ANTLR runtime__. By expanding the "Products" folder in the
-inner project (Antlr4.xcodeproj), you will see two Antlr4.framework
-files. ".framework" file is the swift version of ".jar", ".a" as in
-JAVA, C/C++ Initially those two files should be red, that's because
-they are not built. To build, click the "target selection" button
-right next to your Xcode run button. And in the drop down select the
-target you want to build. And you will see the two Antlr4.framework
-files are for iOS and OSX, as shown below. After target selection,
-press "CMD+B", and Xcode will build the framework for you. Then you
-will see one of the frameworks become black.
-
-
-
-* __Add dependencies__. Simply adding ANTLR Swift runtime and build
-the artifact is not enough. You still need to specify
-dependencies. Click your own project (Smalltalk), and you will see
-project setting page. Go to "Build Phase", and inside it make sure
-your ANTLR Swift runtime framework is added to both "__Target
-Dependencies__" and "__Link Binary With Libraries__" sections, as
-shown below. After correctly added dependencies, the error message for
-importing library will be gone.
+Select your own project in Xcode and go to the Build Phases settings panel.
+Add the ANTLR runtime under __Target Dependencies__ and __Link Binary With
+Libraries__.
-## Example playground
+#### Build your project
-The Swift runtime includes an Xcode playground to get started with.
+The runtime and generated grammar should now build correctly.
-First go to the ANTLR4 repository, and open
-`runtime/Swift/Antlr4.xcworkspace` in Xcode. Select "Antlr4 OSX > My
-Mac" as the build target, and build the project as normal. The
-playground should then be active.
+### Swift Package Manager Projects
-The playground includes a simple grammar called "Hello", and an
-example for walking the parse tree. You should see in the playground
-output that it is printing messages for each node in the parse tree as
-it walks.
+Since we cannot have a separate repository for Swift target (see issue [#1774](https://github.com/antlr/antlr4/issues/1774)),
+and Swift is currently not ABI stable. We currently support support SPM-based
+projects by creating temporary local repository.
-The grammar is defined in the playground's `Resources/Hello.g4`. The
-parser was generated from the grammar using ANTLR like this:
+For people using [Swift Package Manager](https://swift.org/package-manager/),
+the __boot.py__ script supports generating local repository that can be used
+as a dependency to your project. Simply run:
-```
-antlr4 -Dlanguage=Swift -visitor -o ../Sources/Autogen Hello.g4
+```
+python boot.py --gen-spm-module
```
-The example tree walker is in Sources/HelloWalker.swift.
-
+The prompt will show something like below:
+
+
+
+Put the SPM directive that contains the url to temporary repository to your
+project's Package.swift. And run `swift build` in your project.
+
+The project is generated in your system's `/tmp/` directory, if you find it
+inconvenient, consider copy that generated ANTLR repository to some place
+that won't be cleaned automatically and update `url` parameter in your
+`Package.swift` file.
diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg
index 2a203e969..64d560df5 100755
--- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg
+++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Swift.test.stg
@@ -72,7 +72,7 @@ TokenStartColumnEquals(i) ::= <%self._tokenStartCharPositionInLine == %>
ImportListener(X) ::= ""
-GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.tokenNames)"
+GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.getVocabulary())"
RuleInvocationStack() ::= "getRuleInvocationStack().description.replacingOccurrences(of: \"\\\"\", with: \"\")"
diff --git a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java
index 25c4c0919..c40c4048c 100644
--- a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java
+++ b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java
@@ -23,6 +23,7 @@ public class TestCodePointCharStream {
CodePointCharStream s = CharStreams.fromString("");
assertEquals(0, s.size());
assertEquals(0, s.index());
+ assertEquals("", s.toString());
}
@Test
diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java
index 0b53e994e..26352d317 100644
--- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java
+++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/ParserErrorsDescriptors.java
@@ -618,4 +618,28 @@ public class ParserErrorsDescriptors {
public String grammar;
}
+
+ public static class ExtraneousInput extends BaseParserTestDescriptor {
+ public String input = "baa";
+ public String output = null;
+ public String errors = "line 1:0 mismatched input 'b' expecting {, 'a'}\n";
+ public String startRule = "file";
+ public String grammarName = "T";
+
+ /**
+ grammar T;
+
+ member : 'a';
+ body : member*;
+ file : body EOF;
+ B : 'b';
+ */
+ @CommentHasStringValue
+ public String grammar;
+
+ @Override
+ public boolean ignore(String targetName) {
+ return !"Java".equals(targetName);
+ }
+ }
}
diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java
index fbf6cfbfc..218bdb789 100644
--- a/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java
+++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/descriptors/SemPredEvalParserDescriptors.java
@@ -283,11 +283,16 @@ public class SemPredEvalParserDescriptors {
public String input = "s\n\n\nx\n";
public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) )\n";
/**
- line 5:0 mismatched input '' expecting '
- '
+ line 5:0 mismatched input '' expecting {'s', '
+ ', 'x'}
*/
@CommentHasStringValue
public String errors;
+
+ @Override
+ public boolean ignore(String targetName) {
+ return !"Java".equals(targetName);
+ }
}
public static class PredFromAltTestedInLoopBack_2 extends PredFromAltTestedInLoopBack {
diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java
index f6b890931..90dc05245 100644
--- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java
+++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java
@@ -145,7 +145,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
String projectName = "testcase-" + System.currentTimeMillis();
String projectDir = getTmpDir() + "/" + projectName;
- buildProject(projectDir);
+ buildProject(projectDir, projectName);
return execTest(projectDir, projectName);
}
@@ -183,12 +183,12 @@ public class BaseSwiftTest implements RuntimeTestSupport {
Collections.addAll(this.sourceFiles, files);
}
- private void buildProject(String projectDir) {
+ private void buildProject(String projectDir, String projectName) {
mkdir(projectDir);
fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable");
for (String sourceFile: sourceFiles) {
String absPath = getTmpDir() + "/" + sourceFile;
- fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/");
+ fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName);
}
fastFailRunProcess(getTmpDir(), "mv", "-f", "input", projectDir);
@@ -201,7 +201,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
"-Xlinker", "-rpath",
"-Xlinker", dylibPath);
if (buildResult.b.length() > 0) {
- throw new RuntimeException("unit test build failed: " + buildResult.b);
+ throw new RuntimeException("unit test build failed: " + buildResult.a + "\n" + buildResult.b);
}
} catch (IOException | InterruptedException e) {
e.printStackTrace();
@@ -251,7 +251,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
addSourceFiles("main.swift");
String projectName = "testcase-" + System.currentTimeMillis();
String projectDir = getTmpDir() + "/" + projectName;
- buildProject(projectDir);
+ buildProject(projectDir, projectName);
return execTest(projectDir, projectName);
}
diff --git a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs
index 9009b9f43..3ce2e87d2 100644
--- a/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs
+++ b/runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Atn/ATNDeserializer.cs
@@ -1092,7 +1092,10 @@ nextTransition_continue: ;
protected internal Guid ReadUUID()
{
byte[] d = BitConverter.GetBytes (ReadLong ());
- Array.Reverse(d);
+ if(BitConverter.IsLittleEndian)
+ {
+ Array.Reverse(d);
+ }
short c = (short)ReadInt();
short b = (short)ReadInt();
int a = ReadInt32();
diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt
index 65e704516..c91e38e38 100644
--- a/runtime/Cpp/CMakeLists.txt
+++ b/runtime/Cpp/CMakeLists.txt
@@ -33,6 +33,7 @@ endif()
if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR
CMAKE_VERSION VERSION_GREATER "3.3.0")
CMAKE_POLICY(SET CMP0059 OLD)
+ CMAKE_POLICY(SET CMP0054 OLD)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
@@ -61,7 +62,11 @@ if (WITH_DEMO)
endif()
endif(WITH_DEMO)
-set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W")
+if (MSVC_VERSION)
+ set(MY_CXX_WARNING_FLAGS " /W4")
+else()
+ set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W")
+endif()
# Initialize CXXFLAGS.
if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0)
@@ -75,11 +80,18 @@ else()
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11")
endif()
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${MY_CXX_WARNING_FLAGS}")
-set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}")
-set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}")
-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}")
-set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}")
+if (MSVC_VERSION)
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}")
+ set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}")
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Oi /Ob2 /Gy /MP /Zi ${MY_CXX_WARNING_FLAGS}")
+else()
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}")
+ set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}")
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}")
+endif()
# Compiler-specific C++11 activation.
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
@@ -101,6 +113,8 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND CMAKE_SYSTEM_NAME MATCHES
if (WITH_LIBCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
+elseif ( MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800 )
+ # Visual Studio 2012+ supports c++11 features
else ()
message(FATAL_ERROR "Your C++ compiler does not support C++11.")
endif ()
diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm
index 7b13ae83d..647f73fed 100644
--- a/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm
+++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/InputHandlingTests.mm
@@ -91,7 +91,7 @@ using namespace antlr4::misc;
- (void)testANTLRInputStreamUse {
std::string text(u8"🚧Lorem ipsum dolor sit amet🕶");
- std::u32string wtext = utfConverter.from_bytes(text); // Convert to UTF-32.
+ std::u32string wtext = utf8_to_utf32(text.c_str(), text.c_str() + text.size()); // Convert to UTF-32.
ANTLRInputStream stream(text);
XCTAssertEqual(stream.index(), 0U);
XCTAssertEqual(stream.size(), wtext.size());
@@ -116,8 +116,8 @@ using namespace antlr4::misc;
XCTAssertEqual(stream.LA(0), 0ULL);
for (size_t i = 1; i < wtext.size(); ++i) {
- XCTAssertEqual(stream.LA((ssize_t)i), wtext[i - 1]); // LA(1) means: current char.
- XCTAssertEqual(stream.LT((ssize_t)i), wtext[i - 1]); // LT is mapped to LA.
+ XCTAssertEqual(stream.LA(static_cast(i)), wtext[i - 1]); // LA(1) means: current char.
+ XCTAssertEqual(stream.LT(static_cast(i)), wtext[i - 1]); // LT is mapped to LA.
XCTAssertEqual(stream.index(), 0U); // No consumption when looking ahead.
}
@@ -128,7 +128,7 @@ using namespace antlr4::misc;
XCTAssertEqual(stream.index(), wtext.size() / 2);
stream.seek(wtext.size() - 1);
- for (ssize_t i = 1; i < (ssize_t)wtext.size() - 1; ++i) {
+ for (ssize_t i = 1; i < static_cast(wtext.size()) - 1; ++i) {
XCTAssertEqual(stream.LA(-i), wtext[wtext.size() - i - 1]); // LA(-1) means: previous char.
XCTAssertEqual(stream.LT(-i), wtext[wtext.size() - i - 1]); // LT is mapped to LA.
XCTAssertEqual(stream.index(), wtext.size() - 1); // No consumption when looking ahead.
@@ -150,7 +150,7 @@ using namespace antlr4::misc;
misc::Interval interval1(2, 10UL); // From - to, inclusive.
std::string output = stream.getText(interval1);
- std::string sub = utfConverter.to_bytes(wtext.substr(2, 9));
+ std::string sub = utf32_to_utf8(wtext.substr(2, 9));
XCTAssertEqual(output, sub);
misc::Interval interval2(200, 10UL); // Start beyond bounds.
diff --git a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm
index 063616a1d..58cac4be4 100644
--- a/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm
+++ b/runtime/Cpp/demo/Mac/antlrcpp Tests/MiscClassTests.mm
@@ -92,7 +92,7 @@ using namespace antlrcpp;
// in a deterministic and a random sequence of 100K values each.
std::set hashs;
for (size_t i = 0; i < 100000; ++i) {
- std::vector data = { i, (size_t)(i * M_PI), arc4random()};
+ std::vector data = { i, static_cast(i * M_PI), arc4random() };
size_t hash = 0;
for (auto value : data)
hash = MurmurHash::update(hash, value);
@@ -103,7 +103,7 @@ using namespace antlrcpp;
hashs.clear();
for (size_t i = 0; i < 100000; ++i) {
- std::vector data = { i, (size_t)(i * M_PI)};
+ std::vector data = { i, static_cast(i * M_PI) };
size_t hash = 0;
for (auto value : data)
hash = MurmurHash::update(hash, value);
@@ -232,19 +232,25 @@ using namespace antlrcpp;
{ 78, Interval(1000, 1000UL), Interval(20, 100UL), { false, false, true, true, false, true, false, false } },
// It's possible to add more tests with borders that touch each other (e.g. first starts before/on/after second
- // and first ends directly before/after second. However, such cases are not handled differently in the Interval class
+ // and first ends directly before/after second. However, such cases are not handled differently in the Interval
+ // class
// (only adjacent intervals, where first ends directly before second starts and vice versa. So I ommitted them here.
};
for (auto &entry : testData) {
- XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", entry.runningNumber);
- XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", entry.runningNumber);
+ XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu",
+ entry.runningNumber);
+ XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu",
+ entry.runningNumber);
XCTAssert(entry.interval1.startsAfter(entry.interval2) == entry.results[2], @"entry: %zu", entry.runningNumber);
- XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", entry.runningNumber);
- XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", entry.runningNumber);
+ XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu",
+ entry.runningNumber);
+ XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu",
+ entry.runningNumber);
XCTAssert(entry.interval1.disjoint(entry.interval2) == entry.results[5], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.adjacent(entry.interval2) == entry.results[6], @"entry: %zu", entry.runningNumber);
- XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", entry.runningNumber);
+ XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu",
+ entry.runningNumber);
}
XCTAssert(Interval().Union(Interval(10, 100UL)) == Interval(-1L, 100));
@@ -327,30 +333,34 @@ using namespace antlrcpp;
try {
set4.clear();
XCTFail(@"Expected exception");
- }
- catch (IllegalStateException &e) {
+ } catch (IllegalStateException &e) {
}
try {
set4.setReadOnly(false);
XCTFail(@"Expected exception");
+ } catch (IllegalStateException &e) {
}
- catch (IllegalStateException &e) {
- }
-
- set4 = IntervalSet::of(12345);
- XCTAssertEqual(set4.getSingleElement(), 12345);
- XCTAssertEqual(set4.getMinElement(), 12345);
- XCTAssertEqual(set4.getMaxElement(), 12345);
- IntervalSet set5(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50);
- XCTAssertEqual(set5.getMinElement(), 5);
- XCTAssertEqual(set5.getMaxElement(), 50);
- XCTAssertEqual(set5.size(), 10U);
- set5.add(12, 18);
- XCTAssertEqual(set5.size(), 16U); // (15, 15) replaced by (12, 18)
- set5.add(9, 33);
- XCTAssertEqual(set5.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33)
+ try {
+ set4 = IntervalSet::of(12345);
+ XCTFail(@"Expected exception");
+ } catch (IllegalStateException &e) {
+ }
+
+ IntervalSet set5 = IntervalSet::of(12345);
+ XCTAssertEqual(set5.getSingleElement(), 12345);
+ XCTAssertEqual(set5.getMinElement(), 12345);
+ XCTAssertEqual(set5.getMaxElement(), 12345);
+
+ IntervalSet set6(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50);
+ XCTAssertEqual(set6.getMinElement(), 5);
+ XCTAssertEqual(set6.getMaxElement(), 50);
+ XCTAssertEqual(set6.size(), 10U);
+ set6.add(12, 18);
+ XCTAssertEqual(set6.size(), 16U); // (15, 15) replaced by (12, 18)
+ set6.add(9, 33);
+ XCTAssertEqual(set6.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33)
XCTAssert(IntervalSet(3, 1, 2, 10).Or(IntervalSet(3, 1, 2, 5)) == IntervalSet(4, 1, 2, 5, 10));
XCTAssert(IntervalSet({ Interval(2, 10UL) }).Or(IntervalSet({ Interval(5, 8UL) })) == IntervalSet({ Interval(2, 10UL) }));
@@ -358,8 +368,10 @@ using namespace antlrcpp;
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(7, 55)) == IntervalSet::of(11, 55));
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(20, 55)) == IntervalSet::of(20, 55));
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(5, 6)) == IntervalSet::EMPTY_SET);
- XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) }));
- XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) }));
+ XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) ==
+ IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) }));
+ XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) ==
+ IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) }));
XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(7, 55)) == IntervalSet::of(7, 10));
XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(20, 55)) == IntervalSet::EMPTY_SET);
@@ -368,7 +380,8 @@ using namespace antlrcpp;
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(7, 55)) == IntervalSet::of(1, 6));
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(20, 55)) == IntervalSet::of(1, 10));
- XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) }));
+ XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) ==
+ IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) }));
XCTAssert(IntervalSet::of(15, 20).subtract(IntervalSet::of(7, 55)) == IntervalSet::EMPTY_SET);
}
diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd
index ec81b5940..5660f26a2 100644
--- a/runtime/Cpp/deploy-windows.cmd
+++ b/runtime/Cpp/deploy-windows.cmd
@@ -12,7 +12,8 @@ rem Headers
xcopy runtime\src\*.h antlr4-runtime\ /s
rem Binaries
-if exist "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" (
+rem VS 2013 disabled by default. Change the X to a C to enable it.
+if exist "X:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" (
call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat"
pushd runtime
diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt
index b2a4fbd02..dcd21b8b0 100644
--- a/runtime/Cpp/runtime/CMakeLists.txt
+++ b/runtime/Cpp/runtime/CMakeLists.txt
@@ -44,7 +44,11 @@ elseif(APPLE)
target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY})
endif()
-set(disabled_compile_warnings "-Wno-overloaded-virtual")
+if (MSVC_VERSION)
+ set(disabled_compile_warnings "/wd4251")
+else()
+ set(disabled_compile_warnings "-Wno-overloaded-virtual")
+endif()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants")
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
@@ -57,6 +61,15 @@ if (WIN32)
set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS")
set(extra_static_compile_flags "-DANTLR4CPP_STATIC")
endif(WIN32)
+if (MSVC_VERSION)
+ target_compile_options(antlr4_shared PRIVATE "/MD$<$:d>")
+ target_compile_options(antlr4_static PRIVATE "/MT$<$:d>")
+endif()
+
+set(static_lib_suffix "")
+if (MSVC_VERSION)
+ set(static_lib_suffix "-static")
+endif()
set_target_properties(antlr4_shared
PROPERTIES VERSION ${ANTLR_VERSION}
@@ -72,7 +85,7 @@ set_target_properties(antlr4_shared
set_target_properties(antlr4_static
PROPERTIES VERSION ${ANTLR_VERSION}
SOVERSION ${ANTLR_VERSION}
- OUTPUT_NAME antlr4-runtime
+ OUTPUT_NAME "antlr4-runtime${static_lib_suffix}"
ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR}
COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}")
diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj
index 50ab20c8b..80f9ebf77 100644
--- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj
+++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj
@@ -321,6 +321,8 @@
+
+
@@ -339,6 +341,7 @@
+
@@ -346,6 +349,7 @@
+
@@ -412,6 +416,7 @@
+
@@ -422,16 +427,23 @@
+
+
+
+
+
+
+
@@ -439,6 +451,7 @@
+
@@ -454,6 +467,7 @@
+
@@ -620,4 +634,4 @@
-
+
\ No newline at end of file
diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters
index d3b301654..499a82ed4 100644
--- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters
+++ b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters
@@ -938,5 +938,47 @@
Source Files\tree
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\support
+
+
+ Source Files\atn
+
+
+ Source Files\atn
+
+
+ Source Files\tree\pattern
+
+
+ Source Files\misc
+
-
+
\ No newline at end of file
diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj
index e549a78b6..f9bebf6fe 100644
--- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj
+++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj
@@ -334,6 +334,8 @@
+
+
@@ -352,6 +354,7 @@
+
@@ -359,6 +362,7 @@
+
@@ -425,6 +429,7 @@
+
@@ -435,16 +440,23 @@
+
+
+
+
+
+
+
@@ -452,6 +464,7 @@
+
@@ -467,6 +480,7 @@
+
@@ -633,4 +647,4 @@
-
+
\ No newline at end of file
diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters
index 21eaaf722..26db5b9c4 100644
--- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters
+++ b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters
@@ -938,5 +938,47 @@
Source Files\tree
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files\atn
+
+
+ Source Files\atn
+
+
+ Source Files\misc
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files\support
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\tree
+
+
+ Source Files\tree\pattern
+
-
+
\ No newline at end of file
diff --git a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj
index 643c05885..ced55cf90 100644
--- a/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj
+++ b/runtime/Cpp/runtime/antlrcpp.xcodeproj/project.pbxproj
@@ -534,9 +534,6 @@
276E5F411CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; };
276E5F421CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; };
276E5F431CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; };
- 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; };
- 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; };
276E5F471CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
276E5F481CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
276E5F491CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
@@ -800,6 +797,45 @@
27745F081CE49C000067C6A3 /* RuntimeMetaData.h in Headers */ = {isa = PBXBuildFile; fileRef = 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */; };
27874F1E1CCB7A0700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; };
27874F211CCB7B1700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; };
+ 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
+ 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
+ 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
+ 2793DC891F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
+ 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
+ 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
+ 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
+ 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
+ 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
+ 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
+ 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
+ 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
+ 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
+ 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
+ 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
+ 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
+ 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
+ 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
+ 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
+ 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
+ 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
+ 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
+ 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
+ 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
+ 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
+ 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
+ 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
+ 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
+ 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
+ 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
+ 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
+ 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
+ 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
+ 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
+ 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
+ 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
+ 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
+ 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
+ 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
2794D8561CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
2794D8571CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
2794D8581CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
@@ -1061,7 +1097,6 @@
276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InterpreterRuleContext.h; sourceTree = ""; wrapsLines = 0; };
276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IntStream.cpp; sourceTree = ""; };
276E5CBF1CDB57AA003FF4B4 /* IntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntStream.h; sourceTree = ""; };
- 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IRecognizer.h; sourceTree = ""; };
276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Lexer.cpp; sourceTree = ""; wrapsLines = 0; };
276E5CC21CDB57AA003FF4B4 /* Lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Lexer.h; sourceTree = ""; };
276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerInterpreter.cpp; sourceTree = ""; wrapsLines = 0; };
@@ -1152,6 +1187,19 @@
27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
278E313E1D9D6534001C28F9 /* Tests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Tests.m; sourceTree = ""; };
278E31401D9D6534001C28F9 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; };
+ 2793DC841F08083F00A84290 /* TokenSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokenSource.cpp; sourceTree = ""; };
+ 2793DC881F08087500A84290 /* Chunk.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Chunk.cpp; sourceTree = ""; };
+ 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeListener.cpp; sourceTree = ""; };
+ 2793DC901F0808A200A84290 /* TerminalNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TerminalNode.cpp; sourceTree = ""; };
+ 2793DC941F0808E100A84290 /* ErrorNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorNode.cpp; sourceTree = ""; };
+ 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeVisitor.cpp; sourceTree = ""; };
+ 2793DC9C1F08090D00A84290 /* Any.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Any.cpp; sourceTree = ""; };
+ 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorListener.cpp; sourceTree = ""; };
+ 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorStrategy.cpp; sourceTree = ""; };
+ 2793DCA21F08095F00A84290 /* Token.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Token.cpp; sourceTree = ""; };
+ 2793DCA31F08095F00A84290 /* WritableToken.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WritableToken.cpp; sourceTree = ""; };
+ 2793DCB01F08099C00A84290 /* BlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockStartState.cpp; sourceTree = ""; };
+ 2793DCB11F08099C00A84290 /* LexerAction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerAction.cpp; sourceTree = ""; };
2794D8551CE7821B00FADD0F /* antlr4-common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-common.h"; sourceTree = ""; };
27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-runtime.h"; sourceTree = ""; };
27B36AC41DACE7AF0069C868 /* RuleContextWithAltNum.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleContextWithAltNum.cpp; sourceTree = ""; };
@@ -1230,7 +1278,9 @@
276E5CF91CDB57AA003FF4B4 /* tree */,
2794D8551CE7821B00FADD0F /* antlr4-common.h */,
27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */,
+ 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */,
276E5C0C1CDB57AA003FF4B4 /* ANTLRErrorListener.h */,
+ 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */,
276E5C0D1CDB57AA003FF4B4 /* ANTLRErrorStrategy.h */,
276E5C0E1CDB57AA003FF4B4 /* ANTLRFileStream.cpp */,
276E5C0F1CDB57AA003FF4B4 /* ANTLRFileStream.h */,
@@ -1266,7 +1316,6 @@
276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */,
276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */,
276E5CBF1CDB57AA003FF4B4 /* IntStream.h */,
- 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */,
276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */,
276E5CC21CDB57AA003FF4B4 /* Lexer.h */,
276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */,
@@ -1295,8 +1344,10 @@
27B36AC51DACE7AF0069C868 /* RuleContextWithAltNum.h */,
27745EFB1CE49C000067C6A3 /* RuntimeMetaData.cpp */,
27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */,
+ 2793DCA21F08095F00A84290 /* Token.cpp */,
276E5CF01CDB57AA003FF4B4 /* Token.h */,
276E5CF21CDB57AA003FF4B4 /* TokenFactory.h */,
+ 2793DC841F08083F00A84290 /* TokenSource.cpp */,
276E5CF41CDB57AA003FF4B4 /* TokenSource.h */,
276E5CF51CDB57AA003FF4B4 /* TokenStream.cpp */,
276E5CF61CDB57AA003FF4B4 /* TokenStream.h */,
@@ -1308,6 +1359,7 @@
276E5D251CDB57AA003FF4B4 /* UnbufferedTokenStream.h */,
276E5D271CDB57AA003FF4B4 /* Vocabulary.cpp */,
276E5D281CDB57AA003FF4B4 /* Vocabulary.h */,
+ 2793DCA31F08095F00A84290 /* WritableToken.cpp */,
276E5D2A1CDB57AA003FF4B4 /* WritableToken.h */,
);
name = runtime;
@@ -1350,6 +1402,7 @@
276E5C321CDB57AA003FF4B4 /* BasicState.h */,
276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */,
276E5C341CDB57AA003FF4B4 /* BlockEndState.h */,
+ 2793DCB01F08099C00A84290 /* BlockStartState.cpp */,
276E5C351CDB57AA003FF4B4 /* BlockStartState.h */,
276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */,
276E5C381CDB57AA003FF4B4 /* ContextSensitivityInfo.h */,
@@ -1365,6 +1418,7 @@
276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */,
276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */,
276E5C441CDB57AA003FF4B4 /* ErrorInfo.h */,
+ 2793DCB11F08099C00A84290 /* LexerAction.cpp */,
276E5C451CDB57AA003FF4B4 /* LexerAction.h */,
276E5C461CDB57AA003FF4B4 /* LexerActionExecutor.cpp */,
276E5C471CDB57AA003FF4B4 /* LexerActionExecutor.h */,
@@ -1483,6 +1537,7 @@
276E5CE41CDB57AA003FF4B4 /* support */ = {
isa = PBXGroup;
children = (
+ 2793DC9C1F08090D00A84290 /* Any.cpp */,
27F4A8551D4CEB2A00E067EE /* Any.h */,
276E5CE51CDB57AA003FF4B4 /* Arrays.cpp */,
276E5CE61CDB57AA003FF4B4 /* Arrays.h */,
@@ -1504,6 +1559,7 @@
276E5D061CDB57AA003FF4B4 /* pattern */,
27DB448A1D045537007E790B /* xpath */,
276E5CFA1CDB57AA003FF4B4 /* AbstractParseTreeVisitor.h */,
+ 2793DC941F0808E100A84290 /* ErrorNode.cpp */,
276E5CFB1CDB57AA003FF4B4 /* ErrorNode.h */,
276E5CFC1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp */,
276E5CFD1CDB57AA003FF4B4 /* ErrorNodeImpl.h */,
@@ -1511,11 +1567,14 @@
27D414511DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.h */,
276566DF1DA93BFB000869BE /* ParseTree.cpp */,
276E5CFE1CDB57AA003FF4B4 /* ParseTree.h */,
+ 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */,
276E5D001CDB57AA003FF4B4 /* ParseTreeListener.h */,
276E5D021CDB57AA003FF4B4 /* ParseTreeProperty.h */,
+ 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */,
276E5D031CDB57AA003FF4B4 /* ParseTreeVisitor.h */,
276E5D041CDB57AA003FF4B4 /* ParseTreeWalker.cpp */,
276E5D051CDB57AA003FF4B4 /* ParseTreeWalker.h */,
+ 2793DC901F0808A200A84290 /* TerminalNode.cpp */,
276E5D181CDB57AA003FF4B4 /* TerminalNode.h */,
276E5D191CDB57AA003FF4B4 /* TerminalNodeImpl.cpp */,
276E5D1A1CDB57AA003FF4B4 /* TerminalNodeImpl.h */,
@@ -1529,6 +1588,7 @@
isa = PBXGroup;
children = (
276E5D071CDB57AA003FF4B4 /* Chunk.h */,
+ 2793DC881F08087500A84290 /* Chunk.cpp */,
276E5D081CDB57AA003FF4B4 /* ParseTreeMatch.cpp */,
276E5D091CDB57AA003FF4B4 /* ParseTreeMatch.h */,
276E5D0A1CDB57AA003FF4B4 /* ParseTreePattern.cpp */,
@@ -1707,7 +1767,6 @@
27DB44CC1D0463DB007E790B /* XPathElement.h in Headers */,
276E5F581CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D811CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
- 276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
27DB44B61D0463CC007E790B /* XPathLexer.h in Headers */,
276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */,
276E602D1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
@@ -1875,7 +1934,6 @@
276E60141CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */,
276E5F571CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D801CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
- 276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */,
276E602C1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */,
@@ -2033,7 +2091,6 @@
276E60131CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */,
276E5F561CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D7F1CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
- 276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */,
276E602B1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */,
@@ -2225,10 +2282,12 @@
276E60451CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD21CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
276E5F551CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
+ 2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1D1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
276E5D721CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
+ 2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */,
276E5E2F1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFF1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
276E60511CDB57AA003FF4B4 /* Trees.cpp in Sources */,
@@ -2256,6 +2315,8 @@
276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */,
276E60631CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DDB1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
+ 2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */,
+ 2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9E1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
276E5EC81CDB57AA003FF4B4 /* Transition.cpp in Sources */,
276E601E1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */,
@@ -2263,12 +2324,15 @@
276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED41CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
+ 2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
+ 2793DCAC1F08095F00A84290 /* Token.cpp in Sources */,
276E5FA31CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6C1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60361CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
27DB44D51D0463DB007E790B /* XPathTokenElement.cpp in Sources */,
27DB44D11D0463DB007E790B /* XPathRuleElement.cpp in Sources */,
276E5DED1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
+ 2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606C1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1C1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60181CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@@ -2293,7 +2357,9 @@
276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F051CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAE1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
+ 2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D661CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
+ 2793DC9F1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAF1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECE1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E861CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@@ -2301,6 +2367,7 @@
276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E801CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
+ 2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F401CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F5B1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6D1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@@ -2315,6 +2382,7 @@
27DB44CF1D0463DB007E790B /* XPathRuleAnywhereElement.cpp in Sources */,
276E5E441CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */,
276E5DCC1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */,
+ 2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5D5A1CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE61CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
276E5EE01CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
@@ -2333,6 +2401,8 @@
276E5DC01CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E981CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF81CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
+ 2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */,
+ 2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60121CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEC1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,
@@ -2365,10 +2435,12 @@
276E60441CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD11CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
276E5F541CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
+ 2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1C1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
276E5D711CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
+ 2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */,
276E5E2E1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFE1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
276E60501CDB57AA003FF4B4 /* Trees.cpp in Sources */,
@@ -2396,6 +2468,8 @@
276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */,
276E60621CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DDA1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
+ 2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */,
+ 2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9D1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
276E5EC71CDB57AA003FF4B4 /* Transition.cpp in Sources */,
276E601D1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */,
@@ -2403,12 +2477,15 @@
276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED31CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
+ 2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
+ 2793DCAB1F08095F00A84290 /* Token.cpp in Sources */,
276E5FA21CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6B1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60351CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
27DB44C31D0463DA007E790B /* XPathTokenElement.cpp in Sources */,
27DB44BF1D0463DA007E790B /* XPathRuleElement.cpp in Sources */,
276E5DEC1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
+ 2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606B1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1B1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60171CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@@ -2433,7 +2510,9 @@
276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F041CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAD1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
+ 2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D651CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
+ 2793DC9E1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAE1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECD1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E851CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@@ -2441,6 +2520,7 @@
276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E7F1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
+ 2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F3F1CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F5A1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6C1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@@ -2455,6 +2535,7 @@
27DB44BD1D0463DA007E790B /* XPathRuleAnywhereElement.cpp in Sources */,
276E5E431CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */,
276E5DCB1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */,
+ 2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5D591CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE51CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
276E5EDF1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
@@ -2473,6 +2554,8 @@
276E5DBF1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E971CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF71CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
+ 2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */,
+ 2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60111CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEB1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,
@@ -2505,10 +2588,12 @@
276E5DB21CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */,
276E60431CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD01CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
+ 2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5F531CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1B1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
+ 2793DC891F08087500A84290 /* Chunk.cpp in Sources */,
276E5D701CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
276E5E2D1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFD1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
@@ -2536,6 +2621,8 @@
276E60611CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DD91CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
27DB449D1D045537007E790B /* XPath.cpp in Sources */,
+ 2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */,
+ 2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9C1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
27DB44AD1D045537007E790B /* XPathWildcardElement.cpp in Sources */,
276E5EC61CDB57AA003FF4B4 /* Transition.cpp in Sources */,
@@ -2543,12 +2630,15 @@
27DB44A51D045537007E790B /* XPathRuleElement.cpp in Sources */,
276E5F201CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */,
276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
+ 2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
+ 2793DCAA1F08095F00A84290 /* Token.cpp in Sources */,
276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED21CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
276E5FA11CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6A1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60341CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
276E5DEB1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
+ 2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606A1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1A1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60161CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@@ -2573,7 +2663,9 @@
276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F031CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAC1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
+ 2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D641CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
+ 2793DC9D1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAD1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECC1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E841CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@@ -2581,6 +2673,7 @@
276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
+ 2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F3E1CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F591CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6B1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@@ -2595,6 +2688,7 @@
276E5D581CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE41CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
27DB44AB1D045537007E790B /* XPathWildcardAnywhereElement.cpp in Sources */,
+ 2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5EDE1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
276E5F021CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */,
276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */,
@@ -2613,6 +2707,8 @@
276E5DBE1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E961CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF61CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
+ 2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */,
+ 2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60101CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEA1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,
diff --git a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp
index ab0d40328..6ceadb87f 100644
--- a/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp
+++ b/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "ANTLRErrorListener.h"
antlr4::ANTLRErrorListener::~ANTLRErrorListener()
diff --git a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp
index 04af575c3..1655a5731 100644
--- a/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp
+++ b/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "ANTLRErrorStrategy.h"
antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy()
diff --git a/runtime/Cpp/runtime/src/IntStream.h b/runtime/Cpp/runtime/src/IntStream.h
index 7c7401074..9932a9722 100755
--- a/runtime/Cpp/runtime/src/IntStream.h
+++ b/runtime/Cpp/runtime/src/IntStream.h
@@ -27,7 +27,7 @@ namespace antlr4 {
///
class ANTLR4CPP_PUBLIC IntStream {
public:
- static const size_t EOF = std::numeric_limits::max();
+ static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013
/// The value returned by when the end of the stream is
/// reached.
diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h
index dbffde2e7..8c0bcb0ba 100755
--- a/runtime/Cpp/runtime/src/Recognizer.h
+++ b/runtime/Cpp/runtime/src/Recognizer.h
@@ -11,7 +11,7 @@ namespace antlr4 {
class ANTLR4CPP_PUBLIC Recognizer {
public:
- static const size_t EOF = std::numeric_limits::max();
+ static const size_t EOF = static_cast(-1); // std::numeric_limits::max(); doesn't work in VS 2013.
Recognizer();
Recognizer(Recognizer const&) = delete;
diff --git a/runtime/Cpp/runtime/src/Token.cpp b/runtime/Cpp/runtime/src/Token.cpp
index 06047867a..31266b42d 100644
--- a/runtime/Cpp/runtime/src/Token.cpp
+++ b/runtime/Cpp/runtime/src/Token.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "Token.h"
antlr4::Token::~Token() {
diff --git a/runtime/Cpp/runtime/src/Token.h b/runtime/Cpp/runtime/src/Token.h
index 2560c7f1b..a7c1594ff 100755
--- a/runtime/Cpp/runtime/src/Token.h
+++ b/runtime/Cpp/runtime/src/Token.h
@@ -18,7 +18,7 @@ namespace antlr4 {
/// During lookahead operations, this "token" signifies we hit rule end ATN state
/// and did not follow it despite needing to.
- static const size_t EPSILON = std::numeric_limits::max() - 1;
+ static const size_t EPSILON = static_cast(-2);
static const size_t MIN_USER_TOKEN_TYPE = 1;
static const size_t EOF = IntStream::EOF;
diff --git a/runtime/Cpp/runtime/src/TokenSource.cpp b/runtime/Cpp/runtime/src/TokenSource.cpp
index 50b9684ec..6b9d7af2f 100644
--- a/runtime/Cpp/runtime/src/TokenSource.cpp
+++ b/runtime/Cpp/runtime/src/TokenSource.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "TokenSource.h"
antlr4::TokenSource::~TokenSource() {
diff --git a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp
index 6a9152b50..1f18d3843 100755
--- a/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp
+++ b/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp
@@ -52,7 +52,7 @@ void UnbufferedCharStream::sync(size_t want) {
size_t UnbufferedCharStream::fill(size_t n) {
for (size_t i = 0; i < n; i++) {
- if (_data.size() > 0 && _data.back() == (uint32_t)EOF) {
+ if (_data.size() > 0 && _data.back() == 0xFFFF) {
return i;
}
@@ -89,23 +89,23 @@ size_t UnbufferedCharStream::LA(ssize_t i) {
}
// We can look back only as many chars as we have buffered.
- ssize_t index = (ssize_t)_p + i - 1;
+ ssize_t index = static_cast(_p) + i - 1;
if (index < 0) {
throw IndexOutOfBoundsException();
}
if (i > 0) {
- sync((size_t)i); // No need to sync if we look back.
+ sync(static_cast(i)); // No need to sync if we look back.
}
- if ((size_t)index >= _data.size()) {
+ if (static_cast(index) >= _data.size()) {
return EOF;
}
- if (_data[(size_t)index] == (uint32_t)EOF) {
+ if (_data[static_cast(index)] == 0xFFFF) {
return EOF;
}
- return _data[(size_t)index];
+ return _data[static_cast(index)];
}
ssize_t UnbufferedCharStream::mark() {
@@ -113,13 +113,13 @@ ssize_t UnbufferedCharStream::mark() {
_lastCharBufferStart = _lastChar;
}
- ssize_t mark = -(ssize_t)_numMarkers - 1;
+ ssize_t mark = -static_cast(_numMarkers) - 1;
_numMarkers++;
return mark;
}
void UnbufferedCharStream::release(ssize_t marker) {
- ssize_t expectedMark = -(ssize_t)_numMarkers;
+ ssize_t expectedMark = -static_cast(_numMarkers);
if (marker != expectedMark) {
throw IllegalStateException("release() called with an invalid marker.");
}
@@ -147,16 +147,16 @@ void UnbufferedCharStream::seek(size_t index) {
}
// index == to bufferStartIndex should set p to 0
- ssize_t i = (ssize_t)index - (ssize_t)getBufferStartIndex();
+ ssize_t i = static_cast(index) - static_cast(getBufferStartIndex());
if (i < 0) {
throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index));
- } else if (i >= (ssize_t)_data.size()) {
+ } else if (i >= static_cast(_data.size())) {
throw UnsupportedOperationException("Seek to index outside buffer: " + std::to_string(index) +
" not in " + std::to_string(getBufferStartIndex()) + ".." +
std::to_string(getBufferStartIndex() + _data.size()));
}
- _p = (size_t)i;
+ _p = static_cast(i);
_currentCharIndex = index;
if (_p == 0) {
_lastChar = _lastCharBufferStart;
@@ -189,7 +189,7 @@ std::string UnbufferedCharStream::getText(const misc::Interval &interval) {
}
}
- if (interval.a < (ssize_t)bufferStartIndex || interval.b >= ssize_t(bufferStartIndex + _data.size())) {
+ if (interval.a < static_cast(bufferStartIndex) || interval.b >= ssize_t(bufferStartIndex + _data.size())) {
throw UnsupportedOperationException("interval " + interval.toString() + " outside buffer: " +
std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _data.size() - 1));
}
diff --git a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp
index fb9a59f35..98e952a0a 100755
--- a/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp
+++ b/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp
@@ -46,17 +46,17 @@ Token* UnbufferedTokenStream::LT(ssize_t i)
}
sync(i);
- ssize_t index = (ssize_t)_p + i - 1;
+ ssize_t index = static_cast(_p) + i - 1;
if (index < 0) {
throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) + std::string(") gives negative index"));
}
- if (index >= (ssize_t)_tokens.size()) {
+ if (index >= static_cast(_tokens.size())) {
assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF);
return _tokens.back().get();
}
- return _tokens[(size_t)index].get();
+ return _tokens[static_cast(index)].get();
}
size_t UnbufferedTokenStream::LA(ssize_t i)
@@ -113,9 +113,9 @@ void UnbufferedTokenStream::consume()
///
void UnbufferedTokenStream::sync(ssize_t want)
{
- ssize_t need = ((ssize_t)_p + want - 1) - (ssize_t)_tokens.size() + 1; // how many more elements we need?
+ ssize_t need = (static_cast(_p) + want - 1) - static_cast(_tokens.size()) + 1; // how many more elements we need?
if (need > 0) {
- fill((size_t)need);
+ fill(static_cast(need));
}
}
@@ -177,7 +177,7 @@ void UnbufferedTokenStream::release(ssize_t marker)
if (_p > 0) {
// Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs
// p is last valid token; move nothing if p==n as we have no valid char
- _tokens.erase(_tokens.begin(), _tokens.begin() + (ssize_t)_p);
+ _tokens.erase(_tokens.begin(), _tokens.begin() + static_cast(_p));
_p = 0;
}
diff --git a/runtime/Cpp/runtime/src/WritableToken.cpp b/runtime/Cpp/runtime/src/WritableToken.cpp
index 2e3b01241..a30cd96f1 100644
--- a/runtime/Cpp/runtime/src/WritableToken.cpp
+++ b/runtime/Cpp/runtime/src/WritableToken.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "WritableToken.h"
antlr4::WritableToken::~WritableToken() {
diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h
index dc0596f1d..316256276 100644
--- a/runtime/Cpp/runtime/src/antlr4-common.h
+++ b/runtime/Cpp/runtime/src/antlr4-common.h
@@ -63,6 +63,8 @@
typedef std::basic_string<__int32> i32string;
typedef i32string UTF32String;
+ #else
+ typedef std::u32string UTF32String;
#endif
#ifdef ANTLR4CPP_EXPORTS
diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp
index ea2e79266..c6cceda13 100755
--- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp
+++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp
@@ -752,6 +752,7 @@ Ref ATNDeserializer::lexerActionFactory(LexerActionType type, int d
return std::make_shared(data1);
default:
- throw IllegalArgumentException("The specified lexer action type " + std::to_string((size_t)type) + " is not valid.");
+ throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast(type)) +
+ " is not valid.");
}
}
diff --git a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp
index 6eec3ed7f..206c74281 100755
--- a/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp
+++ b/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp
@@ -58,7 +58,7 @@ std::vector ATNSerializer::serialize() {
serializeUUID(data, ATNDeserializer::SERIALIZED_UUID());
// convert grammar type to ATN const to avoid dependence on ANTLRParser
- data.push_back((size_t)atn->grammarType);
+ data.push_back(static_cast(atn->grammarType));
data.push_back(atn->maxTokenType);
size_t nedges = 0;
@@ -288,7 +288,7 @@ std::vector ATNSerializer::serialize() {
if (atn->grammarType == ATNType::LEXER) {
data.push_back(atn->lexerActions.size());
for (Ref &action : atn->lexerActions) {
- data.push_back((size_t)action->getActionType());
+ data.push_back(static_cast(action->getActionType()));
switch (action->getActionType()) {
case LexerActionType::CHANNEL:
{
@@ -348,7 +348,8 @@ std::vector ATNSerializer::serialize() {
default:
throw IllegalArgumentException("The specified lexer action type " +
- std::to_string((size_t)action->getActionType()) + " is not valid.");
+ std::to_string(static_cast(action->getActionType())) +
+ " is not valid.");
}
}
}
diff --git a/runtime/Cpp/runtime/src/atn/ATNState.h b/runtime/Cpp/runtime/src/atn/ATNState.h
index a6035b4c6..96e8fedb7 100755
--- a/runtime/Cpp/runtime/src/atn/ATNState.h
+++ b/runtime/Cpp/runtime/src/atn/ATNState.h
@@ -77,7 +77,7 @@ namespace atn {
virtual ~ATNState();
static const size_t INITIAL_NUM_TRANSITIONS = 4;
- static const size_t INVALID_STATE_NUMBER = std::numeric_limits::max();
+ static const size_t INVALID_STATE_NUMBER = static_cast(-1); // std::numeric_limits::max();
enum {
ATN_INVALID_TYPE = 0,
diff --git a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp
index b8ec09440..44cca8f77 100644
--- a/runtime/Cpp/runtime/src/atn/BlockStartState.cpp
+++ b/runtime/Cpp/runtime/src/atn/BlockStartState.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "BlockStartState.h"
antlr4::atn::BlockStartState::~BlockStartState() {
diff --git a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp
index 6f39129e5..d7949cd1e 100755
--- a/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp
+++ b/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp
@@ -144,12 +144,12 @@ void LL1Analyzer::_LOOK(ATNState *s, ATNState *stopState, Ref
} else if (t->isEpsilon()) {
_LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
} else if (t->getSerializationType() == Transition::WILDCARD) {
- look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType));
+ look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType)));
} else {
misc::IntervalSet set = t->label();
if (!set.isEmpty()) {
if (is(t)) {
- set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType));
+ set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast(_atn.maxTokenType)));
}
look.addAll(set);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerAction.cpp b/runtime/Cpp/runtime/src/atn/LexerAction.cpp
index 5c98cfe43..983ba6d52 100644
--- a/runtime/Cpp/runtime/src/atn/LexerAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerAction.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "LexerAction.h"
antlr4::atn::LexerAction::~LexerAction() {
diff --git a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp
index dac78fe0c..959beab3d 100755
--- a/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp
@@ -32,7 +32,7 @@ void LexerChannelAction::execute(Lexer *lexer) {
size_t LexerChannelAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
hash = MurmurHash::update(hash, _channel);
return MurmurHash::finish(hash, 2);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp
index 00df7df76..1e977a310 100755
--- a/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp
@@ -38,7 +38,7 @@ void LexerCustomAction::execute(Lexer *lexer) {
size_t LexerCustomAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
hash = MurmurHash::update(hash, _ruleIndex);
hash = MurmurHash::update(hash, _actionIndex);
return MurmurHash::finish(hash, 3);
diff --git a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp
index bfd6ea9b3..0bda8b7af 100755
--- a/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp
@@ -33,7 +33,7 @@ void LexerModeAction::execute(Lexer *lexer) {
size_t LexerModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
hash = MurmurHash::update(hash, _mode);
return MurmurHash::finish(hash, 2);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp
index e7b01e078..99b2dd99b 100755
--- a/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp
@@ -34,7 +34,7 @@ void LexerMoreAction::execute(Lexer *lexer) {
size_t LexerMoreAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
return MurmurHash::finish(hash, 1);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp
index 3d584a3d1..cac0996f4 100755
--- a/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp
@@ -34,7 +34,7 @@ void LexerPopModeAction::execute(Lexer *lexer) {
size_t LexerPopModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
return MurmurHash::finish(hash, 1);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp
index 641537a1b..017abed04 100755
--- a/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp
@@ -33,7 +33,7 @@ void LexerPushModeAction::execute(Lexer *lexer) {
size_t LexerPushModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
hash = MurmurHash::update(hash, _mode);
return MurmurHash::finish(hash, 2);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp
index 28cda7cc3..01947ce78 100755
--- a/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp
@@ -34,7 +34,7 @@ void LexerSkipAction::execute(Lexer *lexer) {
size_t LexerSkipAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
return MurmurHash::finish(hash, 1);
}
diff --git a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp
index c1e054b68..006778adc 100755
--- a/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp
+++ b/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp
@@ -33,7 +33,7 @@ void LexerTypeAction::execute(Lexer *lexer) {
size_t LexerTypeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, (size_t)getActionType());
+ hash = MurmurHash::update(hash, static_cast(getActionType()));
hash = MurmurHash::update(hash, _type);
return MurmurHash::finish(hash, 2);
}
diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp
index 0d7d9c54d..5e82bbaff 100755
--- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp
+++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp
@@ -184,7 +184,7 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream
throw e;
}
- if (D->requiresFullContext && mode != PredictionMode::SLL) {
+ if (D->requiresFullContext && _mode != PredictionMode::SLL) {
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
BitSet conflictingAlts;
if (D->predicates.size() != 0) {
@@ -283,7 +283,7 @@ dfa::DFAState *ParserATNSimulator::computeTargetState(dfa::DFA &dfa, dfa::DFASta
D->isAcceptState = true;
D->configs->uniqueAlt = predictedAlt;
D->prediction = predictedAlt;
- } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(mode, D->configs.get())) {
+ } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(_mode, D->configs.get())) {
// MORE THAN ONE VIABLE ALTERNATIVE
D->configs->conflictingAlts = getConflictingAlts(D->configs.get());
D->requiresFullContext = true;
@@ -370,7 +370,7 @@ size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *
predictedAlt = reach->uniqueAlt;
break;
}
- if (mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
+ if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
predictedAlt = PredictionModeClass::resolvesToJustOneViableAlt(altSubSets);
if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
break;
@@ -1332,11 +1332,11 @@ void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, s
}
void ParserATNSimulator::setPredictionMode(PredictionMode newMode) {
- mode = newMode;
+ _mode = newMode;
}
atn::PredictionMode ParserATNSimulator::getPredictionMode() {
- return mode;
+ return _mode;
}
Parser* ParserATNSimulator::getParser() {
@@ -1352,6 +1352,6 @@ bool ParserATNSimulator::getLrLoopSetting() {
}
void ParserATNSimulator::InitializeInstanceFields() {
- mode = PredictionMode::LL;
+ _mode = PredictionMode::LL;
_startIndex = 0;
}
diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h
index b5c6d98a9..e2a406324 100755
--- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h
+++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h
@@ -243,20 +243,133 @@ namespace atn {
* the input.
*/
class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator {
- protected:
- Parser *const parser;
-
public:
+ /// Testing only!
+ ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA,
+ PredictionContextCache &sharedContextCache);
+
+ ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA,
+ PredictionContextCache &sharedContextCache);
+
+ virtual void reset() override;
+ virtual void clearDFA() override;
+ virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext);
+
static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT;
std::vector &decisionToDFA;
+
+ /** Implements first-edge (loop entry) elimination as an optimization
+ * during closure operations. See antlr/antlr4#1398.
+ *
+ * The optimization is to avoid adding the loop entry config when
+ * the exit path can only lead back to the same
+ * StarLoopEntryState after popping context at the rule end state
+ * (traversing only epsilon edges, so we're still in closure, in
+ * this same rule).
+ *
+ * We need to detect any state that can reach loop entry on
+ * epsilon w/o exiting rule. We don't have to look at FOLLOW
+ * links, just ensure that all stack tops for config refer to key
+ * states in LR rule.
+ *
+ * To verify we are in the right situation we must first check
+ * closure is at a StarLoopEntryState generated during LR removal.
+ * Then we check that each stack top of context is a return state
+ * from one of these cases:
+ *
+ * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
+ * 2. expr op expr. The return state is the block end of internal block of (...)*
+ * 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
+ * That state points at block end of internal block of (...)*.
+ * 4. expr '?' expr ':' expr. The return state points at block end,
+ * which points at loop entry state.
+ *
+ * If any is true for each stack top, then closure does not add a
+ * config to the current config set for edge[0], the loop entry branch.
+ *
+ * Conditions fail if any context for the current config is:
+ *
+ * a. empty (we'd fall out of expr to do a global FOLLOW which could
+ * even be to some weird spot in expr) or,
+ * b. lies outside of expr or,
+ * c. lies within expr but at a state not the BlockEndState
+ * generated during LR removal
+ *
+ * Do we need to evaluate predicates ever in closure for this case?
+ *
+ * No. Predicates, including precedence predicates, are only
+ * evaluated when computing a DFA start state. I.e., only before
+ * the lookahead (but not parser) consumes a token.
+ *
+ * There are no epsilon edges allowed in LR rule alt blocks or in
+ * the "primary" part (ID here). If closure is in
+ * StarLoopEntryState any lookahead operation will have consumed a
+ * token as there are no epsilon-paths that lead to
+ * StarLoopEntryState. We do not have to evaluate predicates
+ * therefore if we are in the generated StarLoopEntryState of a LR
+ * rule. Note that when making a prediction starting at that
+ * decision point, decision d=2, compute-start-state performs
+ * closure starting at edges[0], edges[1] emanating from
+ * StarLoopEntryState. That means it is not performing closure on
+ * StarLoopEntryState during compute-start-state.
+ *
+ * How do we know this always gives same prediction answer?
+ *
+ * Without predicates, loop entry and exit paths are ambiguous
+ * upon remaining input +b (in, say, a+b). Either paths lead to
+ * valid parses. Closure can lead to consuming + immediately or by
+ * falling out of this call to expr back into expr and loop back
+ * again to StarLoopEntryState to match +b. In this special case,
+ * we choose the more efficient path, which is to take the bypass
+ * path.
+ *
+ * The lookahead language has not changed because closure chooses
+ * one path over the other. Both paths lead to consuming the same
+ * remaining input during a lookahead operation. If the next token
+ * is an operator, lookahead will enter the choice block with
+ * operators. If it is not, lookahead will exit expr. Same as if
+ * closure had chosen to enter the choice block immediately.
+ *
+ * Closure is examining one config (some loopentrystate, some alt,
+ * context) which means it is considering exactly one alt. Closure
+ * always copies the same alt to any derived configs.
+ *
+ * How do we know this optimization doesn't mess up precedence in
+ * our parse trees?
+ *
+ * Looking through expr from left edge of stat only has to confirm
+ * that an input, say, a+b+c; begins with any valid interpretation
+ * of an expression. The precedence actually doesn't matter when
+ * making a decision in stat seeing through expr. It is only when
+ * parsing rule expr that we must use the precedence to get the
+ * right interpretation and, hence, parse tree.
+ */
+ bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const;
+ virtual std::string getRuleName(size_t index);
+
+ virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt,
+ bool collectPredicates, bool inContext, bool fullCtx);
+
+ void setPredictionMode(PredictionMode newMode);
+ PredictionMode getPredictionMode();
+
+ Parser* getParser();
+
+ virtual std::string getTokenName(size_t t);
+
+ virtual std::string getLookaheadName(TokenStream *input);
- private:
///
- /// SLL, LL, or LL + exact ambig detection?
- PredictionMode mode;
-
+ /// Used for debugging in adaptivePredict around execATN but I cut
+ /// it out for clarity now that alg. works well. We can leave this
+ /// "dead" code for a bit.
+ ///
+ virtual void dumpDeadEndConfigs(NoViableAltException &nvae);
+
protected:
+ Parser *const parser;
+
///
/// Each prediction operation uses a cache for merge of prediction contexts.
/// Don't keep around as it wastes huge amounts of memory. The merge cache
@@ -273,20 +386,7 @@ namespace atn {
size_t _startIndex;
ParserRuleContext *_outerContext;
dfa::DFA *_dfa; // Reference into the decisionToDFA vector.
-
- public:
- /// Testing only!
- ParserATNSimulator(const ATN &atn, std::vector &decisionToDFA,
- PredictionContextCache &sharedContextCache);
-
- ParserATNSimulator(Parser *parser, const ATN &atn, std::vector &decisionToDFA,
- PredictionContextCache &sharedContextCache);
-
- virtual void reset() override;
- virtual void clearDFA() override;
- virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext);
-
- protected:
+
///
/// Performs ATN simulation to compute a predicted alternative based
/// upon the remaining input, but also updates the DFA cache to avoid
@@ -350,7 +450,7 @@ namespace atn {
// comes back with reach.uniqueAlt set to a valid alt
virtual size_t execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0,
- TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over
+ TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over
virtual std::unique_ptr computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx);
@@ -549,10 +649,10 @@ namespace atn {
virtual ATNState *getReachableTarget(Transition *trans, size_t ttype);
virtual std::vector[> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts,
- ATNConfigSet *configs, size_t nalts);
+ ATNConfigSet *configs, size_t nalts);
virtual std::vector getPredicatePredictions(const antlrcpp::BitSet &ambigAlts,
- std::vector][> altToPred);
+ std::vector][> altToPred);
/**
* This method is used to improve the localization of error messages by
@@ -601,7 +701,7 @@ namespace atn {
* identified and {@link #adaptivePredict} should report an error instead.
*/
size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs,
- ParserRuleContext *outerContext);
+ ParserRuleContext *outerContext);
virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs);
@@ -615,7 +715,7 @@ namespace atn {
* prediction, which is where predicates need to evaluate.
*/
std::pair splitAccordingToSemanticValidity(ATNConfigSet *configs,
- ParserRuleContext *outerContext);
+ ParserRuleContext *outerContext);
///
/// Look through a list of predicate/alt pairs, returning alts for the
@@ -627,7 +727,6 @@ namespace atn {
virtual antlrcpp::BitSet evalSemanticContext(std::vector predPredictions,
ParserRuleContext *outerContext, bool complete);
-
/**
* Evaluate a semantic context within a specific parser context.
*
@@ -672,111 +771,15 @@ namespace atn {
virtual void closureCheckingStopState(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
-
+
/// Do the actual work of walking epsilon edges.
virtual void closure_(Ref const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
-
- public:
- /** Implements first-edge (loop entry) elimination as an optimization
- * during closure operations. See antlr/antlr4#1398.
- *
- * The optimization is to avoid adding the loop entry config when
- * the exit path can only lead back to the same
- * StarLoopEntryState after popping context at the rule end state
- * (traversing only epsilon edges, so we're still in closure, in
- * this same rule).
- *
- * We need to detect any state that can reach loop entry on
- * epsilon w/o exiting rule. We don't have to look at FOLLOW
- * links, just ensure that all stack tops for config refer to key
- * states in LR rule.
- *
- * To verify we are in the right situation we must first check
- * closure is at a StarLoopEntryState generated during LR removal.
- * Then we check that each stack top of context is a return state
- * from one of these cases:
- *
- * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
- * 2. expr op expr. The return state is the block end of internal block of (...)*
- * 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
- * That state points at block end of internal block of (...)*.
- * 4. expr '?' expr ':' expr. The return state points at block end,
- * which points at loop entry state.
- *
- * If any is true for each stack top, then closure does not add a
- * config to the current config set for edge[0], the loop entry branch.
- *
- * Conditions fail if any context for the current config is:
- *
- * a. empty (we'd fall out of expr to do a global FOLLOW which could
- * even be to some weird spot in expr) or,
- * b. lies outside of expr or,
- * c. lies within expr but at a state not the BlockEndState
- * generated during LR removal
- *
- * Do we need to evaluate predicates ever in closure for this case?
- *
- * No. Predicates, including precedence predicates, are only
- * evaluated when computing a DFA start state. I.e., only before
- * the lookahead (but not parser) consumes a token.
- *
- * There are no epsilon edges allowed in LR rule alt blocks or in
- * the "primary" part (ID here). If closure is in
- * StarLoopEntryState any lookahead operation will have consumed a
- * token as there are no epsilon-paths that lead to
- * StarLoopEntryState. We do not have to evaluate predicates
- * therefore if we are in the generated StarLoopEntryState of a LR
- * rule. Note that when making a prediction starting at that
- * decision point, decision d=2, compute-start-state performs
- * closure starting at edges[0], edges[1] emanating from
- * StarLoopEntryState. That means it is not performing closure on
- * StarLoopEntryState during compute-start-state.
- *
- * How do we know this always gives same prediction answer?
- *
- * Without predicates, loop entry and exit paths are ambiguous
- * upon remaining input +b (in, say, a+b). Either paths lead to
- * valid parses. Closure can lead to consuming + immediately or by
- * falling out of this call to expr back into expr and loop back
- * again to StarLoopEntryState to match +b. In this special case,
- * we choose the more efficient path, which is to take the bypass
- * path.
- *
- * The lookahead language has not changed because closure chooses
- * one path over the other. Both paths lead to consuming the same
- * remaining input during a lookahead operation. If the next token
- * is an operator, lookahead will enter the choice block with
- * operators. If it is not, lookahead will exit expr. Same as if
- * closure had chosen to enter the choice block immediately.
- *
- * Closure is examining one config (some loopentrystate, some alt,
- * context) which means it is considering exactly one alt. Closure
- * always copies the same alt to any derived configs.
- *
- * How do we know this optimization doesn't mess up precedence in
- * our parse trees?
- *
- * Looking through expr from left edge of stat only has to confirm
- * that an input, say, a+b+c; begins with any valid interpretation
- * of an expression. The precedence actually doesn't matter when
- * making a decision in stat seeing through expr. It is only when
- * parsing rule expr that we must use the precedence to get the
- * right interpretation and, hence, parse tree.
- */
- bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const;
- virtual std::string getRuleName(size_t index);
-
- protected:
+
virtual Ref getEpsilonTarget(Ref const& config, Transition *t, bool collectPredicates,
bool inContext, bool fullCtx, bool treatEofAsEpsilon);
virtual Ref actionTransition(Ref const& config, ActionTransition *t);
- public:
- virtual Ref precedenceTransition(Ref const& config, PrecedencePredicateTransition *pt,
- bool collectPredicates, bool inContext, bool fullCtx);
-
- protected:
virtual Ref predTransition(Ref const& config, PredicateTransition *pt, bool collectPredicates,
bool inContext, bool fullCtx);
@@ -832,19 +835,6 @@ namespace atn {
virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet *configs);
- public:
- virtual std::string getTokenName(size_t t);
-
- virtual std::string getLookaheadName(TokenStream *input);
-
- ///
- /// Used for debugging in adaptivePredict around execATN but I cut
- /// it out for clarity now that alg. works well. We can leave this
- /// "dead" code for a bit.
- ///
- virtual void dumpDeadEndConfigs(NoViableAltException &nvae);
-
- protected:
virtual NoViableAltException noViableAlt(TokenStream *input, ParserRuleContext *outerContext,
ATNConfigSet *configs, size_t startIndex);
@@ -901,13 +891,10 @@ namespace atn {
const antlrcpp::BitSet &ambigAlts,
ATNConfigSet *configs); // configs that LL not SLL considered conflicting
- public:
- void setPredictionMode(PredictionMode newMode);
- PredictionMode getPredictionMode();
-
- Parser* getParser();
-
private:
+ // SLL, LL, or LL + exact ambig detection?
+ PredictionMode _mode;
+
static bool getLrLoopSetting();
void InitializeInstanceFields();
};
diff --git a/runtime/Cpp/runtime/src/atn/PredictionContext.h b/runtime/Cpp/runtime/src/atn/PredictionContext.h
index fb053f14a..9a52e00e5 100755
--- a/runtime/Cpp/runtime/src/atn/PredictionContext.h
+++ b/runtime/Cpp/runtime/src/atn/PredictionContext.h
@@ -17,7 +17,6 @@ namespace atn {
class PredictionContextMergeCache;
typedef std::unordered_set][, PredictionContextHasher, PredictionContextComparer> PredictionContextCache;
- //typedef std::map, Ref>, Ref> PredictionContextMergeCache;
class ANTLR4CPP_PUBLIC PredictionContext {
public:
@@ -28,10 +27,10 @@ namespace atn {
/// Represents $ in an array in full context mode, when $
/// doesn't mean wildcard: $ + x = [$,x]. Here,
/// $ = EMPTY_RETURN_STATE.
- // ml: originally Integer.MAX_VALUE, which would be (size_t)-1 for us, but this is already used in places where
+ // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where
// -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't
// conflict with real return states.
- static const size_t EMPTY_RETURN_STATE = std::numeric_limits::max() - 9;
+ static const size_t EMPTY_RETURN_STATE = static_cast(-10); // std::numeric_limits::max() - 9;
private:
static const size_t INITIAL_HASH = 1;
diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.h b/runtime/Cpp/runtime/src/atn/PredictionMode.h
index d3de2e952..726f4cf40 100755
--- a/runtime/Cpp/runtime/src/atn/PredictionMode.h
+++ b/runtime/Cpp/runtime/src/atn/PredictionMode.h
@@ -15,7 +15,7 @@ namespace atn {
* utility methods for analyzing configuration sets for conflicts and/or
* ambiguities.
*/
- enum class ANTLR4CPP_PUBLIC PredictionMode {
+ enum class PredictionMode {
/**
* The SLL(*) prediction mode. This prediction mode ignores the current
* parser context when making predictions. This is the fastest prediction
diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp
index fdc272f84..0531e37f8 100755
--- a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp
+++ b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp
@@ -82,7 +82,7 @@ int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate *o) {
size_t SemanticContext::PrecedencePredicate::hashCode() const {
size_t hashCode = 1;
- hashCode = 31 * hashCode + (size_t)precedence;
+ hashCode = 31 * hashCode + static_cast(precedence);
return hashCode;
}
diff --git a/runtime/Cpp/runtime/src/misc/Interval.cpp b/runtime/Cpp/runtime/src/misc/Interval.cpp
index 325b8621f..97486bf3f 100755
--- a/runtime/Cpp/runtime/src/misc/Interval.cpp
+++ b/runtime/Cpp/runtime/src/misc/Interval.cpp
@@ -10,16 +10,16 @@ using namespace antlr4::misc;
Interval::~Interval() = default;
size_t antlr4::misc::numericToSymbol(ssize_t v) {
- return (size_t)v;
+ return static_cast(v);
}
ssize_t antlr4::misc::symbolToNumeric(size_t v) {
- return (ssize_t)v;
+ return static_cast(v);
}
Interval const Interval::INVALID;
-Interval::Interval() : Interval((ssize_t)-1, -2) { // Need an explicit cast here for VS.
+Interval::Interval() : Interval(static_cast(-1), -2) { // Need an explicit cast here for VS.
}
Interval::Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) {
@@ -41,8 +41,8 @@ bool Interval::operator == (const Interval &other) const {
size_t Interval::hashCode() const {
size_t hash = 23;
- hash = hash * 31 + (size_t)a;
- hash = hash * 31 + (size_t)b;
+ hash = hash * 31 + static_cast(a);
+ hash = hash * 31 + static_cast(b);
return hash;
}
diff --git a/runtime/Cpp/runtime/src/support/Any.cpp b/runtime/Cpp/runtime/src/support/Any.cpp
index 1404343d3..3dd1a94bf 100644
--- a/runtime/Cpp/runtime/src/support/Any.cpp
+++ b/runtime/Cpp/runtime/src/support/Any.cpp
@@ -1,9 +1,16 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "Any.h"
-antlrcpp::Any::~Any()
+using namespace antlrcpp;
+
+Any::~Any()
{
delete _ptr;
}
-antlrcpp::Any::Base::~Base() {
+Any::Base::~Base() {
}
diff --git a/runtime/Cpp/runtime/src/support/Any.h b/runtime/Cpp/runtime/src/support/Any.h
index f9559b30d..3d8845c70 100644
--- a/runtime/Cpp/runtime/src/support/Any.h
+++ b/runtime/Cpp/runtime/src/support/Any.h
@@ -19,7 +19,7 @@ namespace antlrcpp {
template
using StorageType = typename std::decay::type;
-struct Any
+struct ANTLR4CPP_PUBLIC Any
{
bool isNull() const { return _ptr == nullptr; }
bool isNotNull() const { return _ptr != nullptr; }
diff --git a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp
index 685047d20..ade2539af 100644
--- a/runtime/Cpp/runtime/src/tree/ErrorNode.cpp
+++ b/runtime/Cpp/runtime/src/tree/ErrorNode.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "tree/ErrorNode.h"
antlr4::tree::ErrorNode::~ErrorNode() {
diff --git a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp
index 5ce30d3a7..a4b3efd73 100644
--- a/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp
+++ b/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp
@@ -1,31 +1,6 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2012 Terence Parr
- * Copyright (c) 2012 Sam Harwell
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
*/
#include "support/CPPUtils.h"
diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp
index 820962118..ce1229758 100644
--- a/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp
+++ b/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "ParseTreeListener.h"
antlr4::tree::ParseTreeListener::~ParseTreeListener() {
diff --git a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp
index 5298eee09..a329919c1 100644
--- a/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp
+++ b/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "ParseTreeVisitor.h"
antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() {
diff --git a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp
index e41ff7e9d..d630469c7 100644
--- a/runtime/Cpp/runtime/src/tree/TerminalNode.cpp
+++ b/runtime/Cpp/runtime/src/tree/TerminalNode.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "tree/TerminalNode.h"
antlr4::tree::TerminalNode::~TerminalNode() {
diff --git a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp
index 7997ce867..5320f910b 100644
--- a/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp
+++ b/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp
@@ -1,3 +1,8 @@
+/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
#include "tree/pattern/Chunk.h"
antlr4::tree::pattern::Chunk::~Chunk() {
diff --git a/runtime/Go/antlr/common_token_stream.go b/runtime/Go/antlr/common_token_stream.go
index 0121fe8e4..3154e00ac 100644
--- a/runtime/Go/antlr/common_token_stream.go
+++ b/runtime/Go/antlr/common_token_stream.go
@@ -337,8 +337,8 @@ func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
interval = NewInterval(0, len(c.tokens)-1)
}
- start := interval.start
- stop := interval.stop
+ start := interval.Start
+ stop := interval.Stop
if start < 0 || stop < 0 {
return ""
diff --git a/runtime/Go/antlr/common_token_stream_test.go b/runtime/Go/antlr/common_token_stream_test.go
new file mode 100644
index 000000000..27cf42111
--- /dev/null
+++ b/runtime/Go/antlr/common_token_stream_test.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "testing"
+)
+
+type commonTokenStreamTestLexer struct {
+ *BaseLexer
+
+ tokens []Token
+ i int
+}
+
+func (l *commonTokenStreamTestLexer) NextToken() Token {
+ tmp := l.tokens[l.i]
+ l.i++
+ return tmp
+}
+
+func TestCommonTokenStreamOffChannel(t *testing.T) {
+ assert := assertNew(t)
+ lexEngine := &commonTokenStreamTestLexer{
+ tokens: []Token{
+ newTestCommonToken(1, " ", LexerHidden), // 0
+ newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1
+ newTestCommonToken(1, " ", LexerHidden), // 2
+ newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3
+ newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4
+ newTestCommonToken(1, " ", LexerHidden), // 5
+ newTestCommonToken(1, " ", LexerHidden), // 6
+ newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7
+ newTestCommonToken(1, "\n", LexerHidden), // 9
+ newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10
+ },
+ }
+ tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
+
+ assert.Equal("x", tokens.LT(1).GetText()) // must skip first off channel token
+ tokens.Consume()
+ assert.Equal("=", tokens.LT(1).GetText())
+ assert.Equal("x", tokens.LT(-1).GetText())
+
+ tokens.Consume()
+ assert.Equal("34", tokens.LT(1).GetText())
+ assert.Equal("=", tokens.LT(-1).GetText())
+
+ tokens.Consume()
+ assert.Equal(";", tokens.LT(1).GetText())
+ assert.Equal("34", tokens.LT(-1).GetText())
+
+ tokens.Consume()
+ assert.Equal(TokenEOF, tokens.LT(1).GetTokenType())
+ assert.Equal(";", tokens.LT(-1).GetText())
+
+ assert.Equal("34", tokens.LT(-2).GetText())
+ assert.Equal("=", tokens.LT(-3).GetText())
+ assert.Equal("x", tokens.LT(-4).GetText())
+}
+
+func TestCommonTokenStreamFetchOffChannel(t *testing.T) {
+ assert := assertNew(t)
+ lexEngine := &commonTokenStreamTestLexer{
+ tokens: []Token{
+ newTestCommonToken(1, " ", LexerHidden), // 0
+ newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1
+ newTestCommonToken(1, " ", LexerHidden), // 2
+ newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3
+ newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4
+ newTestCommonToken(1, " ", LexerHidden), // 5
+ newTestCommonToken(1, " ", LexerHidden), // 6
+ newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7
+ newTestCommonToken(1, " ", LexerHidden), // 8
+ newTestCommonToken(1, "\n", LexerHidden), // 9
+ newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10
+ },
+ }
+ tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
+ tokens.Fill()
+
+ assert.Nil(tokens.getHiddenTokensToLeft(0, -1))
+ assert.Nil(tokens.getHiddenTokensToRight(0, -1))
+
+ assert.Equal("[[@0,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(1, -1)))
+ assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToRight(1, -1)))
+
+ assert.Nil(tokens.getHiddenTokensToLeft(2, -1))
+ assert.Nil(tokens.getHiddenTokensToRight(2, -1))
+
+ assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(3, -1)))
+ assert.Nil(tokens.getHiddenTokensToRight(3, -1))
+
+ assert.Nil(tokens.getHiddenTokensToLeft(4, -1))
+ assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToRight(4, -1)))
+
+ assert.Nil(tokens.getHiddenTokensToLeft(5, -1))
+ assert.Equal("[[@6,0:0=' ',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToRight(5, -1)))
+
+ assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToLeft(6, -1)))
+ assert.Nil(tokens.getHiddenTokensToRight(6, -1))
+
+ assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToLeft(7, -1)))
+ assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToRight(7, -1)))
+
+ assert.Nil(tokens.getHiddenTokensToLeft(8, -1))
+ assert.Equal("[[@9,0:0='\\n',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToRight(8, -1)))
+
+ assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1]]",
+ tokensToString(tokens.getHiddenTokensToLeft(9, -1)))
+ assert.Nil(tokens.getHiddenTokensToRight(9, -1))
+
+}
+
+type commonTokenStreamTestLexerSingleEOF struct {
+ *BaseLexer
+
+ tokens []Token
+ i int
+}
+
+func (l *commonTokenStreamTestLexerSingleEOF) NextToken() Token {
+ return newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel)
+}
+
+func TestCommonTokenStreamSingleEOF(t *testing.T) {
+ assert := assertNew(t)
+ lexEngine := &commonTokenStreamTestLexerSingleEOF{}
+ tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
+ tokens.Fill()
+
+ assert.Equal(TokenEOF, tokens.LA(1))
+ assert.Equal(0, tokens.index)
+ assert.Equal(1, tokens.Size())
+}
+
+func TestCommonTokenStreamCannotConsumeEOF(t *testing.T) {
+ assert := assertNew(t)
+ lexEngine := &commonTokenStreamTestLexerSingleEOF{}
+ tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
+ tokens.Fill()
+ assert.Equal(TokenEOF, tokens.LA(1))
+ assert.Equal(0, tokens.index)
+ assert.Equal(1, tokens.Size())
+ assert.Panics(tokens.Consume)
+}
diff --git a/runtime/Go/antlr/input_stream.go b/runtime/Go/antlr/input_stream.go
index da9d2f7f4..5ff270f53 100644
--- a/runtime/Go/antlr/input_stream.go
+++ b/runtime/Go/antlr/input_stream.go
@@ -101,7 +101,7 @@ func (is *InputStream) GetTextFromTokens(start, stop Token) string {
}
func (is *InputStream) GetTextFromInterval(i *Interval) string {
- return is.GetText(i.start, i.stop)
+ return is.GetText(i.Start, i.Stop)
}
func (*InputStream) GetSourceName() string {
diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go
index 749ec1cb3..510d90911 100644
--- a/runtime/Go/antlr/interval_set.go
+++ b/runtime/Go/antlr/interval_set.go
@@ -10,33 +10,33 @@ import (
)
type Interval struct {
- start int
- stop int
+ Start int
+ Stop int
}
/* stop is not included! */
func NewInterval(start, stop int) *Interval {
i := new(Interval)
- i.start = start
- i.stop = stop
+ i.Start = start
+ i.Stop = stop
return i
}
-func (i *Interval) contains(item int) bool {
- return item >= i.start && item < i.stop
+func (i *Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
}
func (i *Interval) String() string {
- if i.start == i.stop-1 {
- return strconv.Itoa(i.start)
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
}
- return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1)
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
func (i *Interval) length() int {
- return i.stop - i.start
+ return i.Stop - i.Start
}
type IntervalSet struct {
@@ -59,7 +59,7 @@ func (i *IntervalSet) first() int {
return TokenInvalidType
}
- return i.intervals[0].start
+ return i.intervals[0].Start
}
func (i *IntervalSet) addOne(v int) {
@@ -78,24 +78,24 @@ func (i *IntervalSet) addInterval(v *Interval) {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
- if v.stop < interval.start {
+ if v.Stop < interval.Start {
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
- } else if v.stop == interval.start {
- i.intervals[k].start = v.start
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
return
- } else if v.start <= interval.stop {
- i.intervals[k] = NewInterval(intMin(interval.start, v.start), intMax(interval.stop, v.stop))
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
// if not applying to end, merge potential overlaps
if k < len(i.intervals)-1 {
l := i.intervals[k]
r := i.intervals[k+1]
// if r contained in l
- if l.stop >= r.stop {
+ if l.Stop >= r.Stop {
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
- } else if l.stop >= r.start { // partial overlap
- i.intervals[k] = NewInterval(l.start, r.stop)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
}
}
@@ -111,7 +111,7 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
if other.intervals != nil {
for k := 0; k < len(other.intervals); k++ {
i2 := other.intervals[k]
- i.addInterval(NewInterval(i2.start, i2.stop))
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
}
}
return i
@@ -131,7 +131,7 @@ func (i *IntervalSet) contains(item int) bool {
return false
}
for k := 0; k < len(i.intervals); k++ {
- if i.intervals[k].contains(item) {
+ if i.intervals[k].Contains(item) {
return true
}
}
@@ -149,29 +149,29 @@ func (i *IntervalSet) length() int {
}
func (i *IntervalSet) removeRange(v *Interval) {
- if v.start == v.stop-1 {
- i.removeOne(v.start)
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
} else if i.intervals != nil {
k := 0
for n := 0; n < len(i.intervals); n++ {
ni := i.intervals[k]
// intervals are ordered
- if v.stop <= ni.start {
+ if v.Stop <= ni.Start {
return
- } else if v.start > ni.start && v.stop < ni.stop {
- i.intervals[k] = NewInterval(ni.start, v.start)
- x := NewInterval(v.stop, ni.stop)
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
- } else if v.start <= ni.start && v.stop >= ni.stop {
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
k = k - 1 // need another pass
- } else if v.start < ni.stop {
- i.intervals[k] = NewInterval(ni.start, v.start)
- } else if v.stop < ni.stop {
- i.intervals[k] = NewInterval(v.stop, ni.stop)
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
}
k++
}
@@ -183,21 +183,21 @@ func (i *IntervalSet) removeOne(v int) {
for k := 0; k < len(i.intervals); k++ {
ki := i.intervals[k]
// intervals i ordered
- if v < ki.start {
+ if v < ki.Start {
return
- } else if v == ki.start && v == ki.stop-1 {
+ } else if v == ki.Start && v == ki.Stop-1 {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
return
- } else if v == ki.start {
- i.intervals[k] = NewInterval(ki.start+1, ki.stop)
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
return
- } else if v == ki.stop-1 {
- i.intervals[k] = NewInterval(ki.start, ki.stop-1)
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
return
- } else if v < ki.stop-1 {
- x := NewInterval(ki.start, v)
- ki.start = v + 1
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
@@ -228,14 +228,14 @@ func (i *IntervalSet) toCharString() string {
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
- if v.stop == v.start+1 {
- if v.start == TokenEOF {
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
names = append(names, "")
} else {
- names = append(names, ("'" + string(v.start) + "'"))
+ names = append(names, ("'" + string(v.Start) + "'"))
}
} else {
- names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'")
+ names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'")
}
}
if len(names) > 1 {
@@ -250,14 +250,14 @@ func (i *IntervalSet) toIndexString() string {
names := make([]string, 0)
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
- if v.stop == v.start+1 {
- if v.start == TokenEOF {
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
names = append(names, "")
} else {
- names = append(names, strconv.Itoa(v.start))
+ names = append(names, strconv.Itoa(v.Start))
}
} else {
- names = append(names, strconv.Itoa(v.start)+".."+strconv.Itoa(v.stop-1))
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
}
}
if len(names) > 1 {
@@ -270,7 +270,7 @@ func (i *IntervalSet) toIndexString() string {
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
names := make([]string, 0)
for _, v := range i.intervals {
- for j := v.start; j < v.stop; j++ {
+ for j := v.Start; j < v.Stop; j++ {
names = append(names, i.elementName(literalNames, symbolicNames, j))
}
}
diff --git a/runtime/Go/antlr/lexer.go b/runtime/Go/antlr/lexer.go
index ec0e27945..02deaf99c 100644
--- a/runtime/Go/antlr/lexer.go
+++ b/runtime/Go/antlr/lexer.go
@@ -21,11 +21,11 @@ type Lexer interface {
Emit() Token
- setChannel(int)
- pushMode(int)
- popMode() int
- setType(int)
- setMode(int)
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
}
type BaseLexer struct {
@@ -150,7 +150,7 @@ func (b *BaseLexer) GetSourceName() string {
return b.GrammarFileName
}
-func (b *BaseLexer) setChannel(v int) {
+func (b *BaseLexer) SetChannel(v int) {
b.channel = v
}
@@ -250,11 +250,11 @@ func (b *BaseLexer) More() {
b.thetype = LexerMore
}
-func (b *BaseLexer) setMode(m int) {
+func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
-func (b *BaseLexer) pushMode(m int) {
+func (b *BaseLexer) PushMode(m int) {
if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
@@ -262,7 +262,7 @@ func (b *BaseLexer) pushMode(m int) {
b.mode = m
}
-func (b *BaseLexer) popMode() int {
+func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
@@ -331,7 +331,7 @@ func (b *BaseLexer) GetType() int {
return b.thetype
}
-func (b *BaseLexer) setType(t int) {
+func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
@@ -361,7 +361,7 @@ func (b *BaseLexer) GetATN() *ATN {
// Return a list of all Token objects in input char stream.
// Forces load of all tokens. Does not include EOF token.
// /
-func (b *BaseLexer) getAllTokens() []Token {
+func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
t := vl.NextToken()
diff --git a/runtime/Go/antlr/lexer_action.go b/runtime/Go/antlr/lexer_action.go
index 3ca5e9ff3..20df84f94 100644
--- a/runtime/Go/antlr/lexer_action.go
+++ b/runtime/Go/antlr/lexer_action.go
@@ -101,7 +101,7 @@ func NewLexerTypeAction(thetype int) *LexerTypeAction {
}
func (l *LexerTypeAction) execute(lexer Lexer) {
- lexer.setType(l.thetype)
+ lexer.SetType(l.thetype)
}
func (l *LexerTypeAction) hash() int {
@@ -145,7 +145,7 @@ func NewLexerPushModeAction(mode int) *LexerPushModeAction {
// ]This action is implemented by calling {@link Lexer//pushMode} with the
// value provided by {@link //getMode}.
func (l *LexerPushModeAction) execute(lexer Lexer) {
- lexer.pushMode(l.mode)
+ lexer.PushMode(l.mode)
}
func (l *LexerPushModeAction) hash() int {
@@ -190,7 +190,7 @@ var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
// This action is implemented by calling {@link Lexer//popMode}.
func (l *LexerPopModeAction) execute(lexer Lexer) {
- lexer.popMode()
+ lexer.PopMode()
}
func (l *LexerPopModeAction) String() string {
@@ -242,7 +242,7 @@ func NewLexerModeAction(mode int) *LexerModeAction {
// This action is implemented by calling {@link Lexer//mode} with the
// value provided by {@link //getMode}.
func (l *LexerModeAction) execute(lexer Lexer) {
- lexer.setMode(l.mode)
+ lexer.SetMode(l.mode)
}
func (l *LexerModeAction) hash() int {
@@ -341,7 +341,7 @@ func NewLexerChannelAction(channel int) *LexerChannelAction {
// This action is implemented by calling {@link Lexer//setChannel} with the
// value provided by {@link //getChannel}.
func (l *LexerChannelAction) execute(lexer Lexer) {
- lexer.setChannel(l.channel)
+ lexer.SetChannel(l.channel)
}
func (l *LexerChannelAction) hash() int {
diff --git a/runtime/Go/antlr/testing_assert_test.go b/runtime/Go/antlr/testing_assert_test.go
new file mode 100644
index 000000000..f3ca0d341
--- /dev/null
+++ b/runtime/Go/antlr/testing_assert_test.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// These assert functions are borrowed from https://github.com/stretchr/testify/ (MIT License)
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+type assert struct {
+ t *testing.T
+}
+
+func assertNew(t *testing.T) *assert {
+ return &assert{
+ t: t,
+ }
+}
+
+func (a *assert) Equal(expected, actual interface{}) bool {
+ if !objectsAreEqual(expected, actual) {
+ return a.Fail(fmt.Sprintf("Not equal:\n"+
+ "expected: %#v\n"+
+ " actual: %#v\n", expected, actual))
+ }
+ return true
+}
+
+func objectsAreEqual(expected, actual interface{}) bool {
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+ return reflect.DeepEqual(expected, actual)
+}
+
+func (a *assert) Nil(object interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ return a.Fail(fmt.Sprintf("Expected nil, but got: %#v", object))
+}
+
+func (a *assert) NotNil(object interface{}) bool {
+ if !isNil(object) {
+ return true
+ }
+ return a.Fail("Expected value not to be nil.")
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+func (a *assert) Panics(f func()) bool {
+ if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+ return a.Fail(fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue))
+ }
+
+ return true
+}
+
+// Fail reports a failure through
+func (a *assert) Fail(failureMessage string) bool {
+ a.t.Errorf("%s", failureMessage)
+ return false
+}
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f func()) (bool, interface{}) {
+ didPanic := false
+ var message interface{}
+ func() {
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ }
+ }()
+ // call the target function
+ f()
+ }()
+ return didPanic, message
+}
diff --git a/runtime/Go/antlr/testing_lexer_b_test.go b/runtime/Go/antlr/testing_lexer_b_test.go
new file mode 100644
index 000000000..4ab9b340d
--- /dev/null
+++ b/runtime/Go/antlr/testing_lexer_b_test.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+/*
+LexerB is a lexer for testing purpose.
+
+This file is generated from this grammer.
+
+lexer grammar LexerB;
+
+ID : 'a'..'z'+;
+INT : '0'..'9'+;
+SEMI : ';';
+ASSIGN : '=';
+PLUS : '+';
+MULT : '*';
+WS : ' '+;
+*/
+
+var lexerB_serializedLexerAtn = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 9, 40, 8,
+ 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9,
+ 7, 4, 8, 9, 8, 3, 2, 6, 2, 19, 10, 2, 13, 2, 14, 2, 20, 3, 3, 6, 3, 24,
+ 10, 3, 13, 3, 14, 3, 25, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7,
+ 3, 8, 6, 8, 37, 10, 8, 13, 8, 14, 8, 38, 2, 2, 9, 3, 3, 5, 4, 7, 5, 9,
+ 6, 11, 7, 13, 8, 15, 9, 3, 2, 2, 2, 42, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2,
+ 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2,
+ 2, 2, 2, 15, 3, 2, 2, 2, 3, 18, 3, 2, 2, 2, 5, 23, 3, 2, 2, 2, 7, 27, 3,
+ 2, 2, 2, 9, 29, 3, 2, 2, 2, 11, 31, 3, 2, 2, 2, 13, 33, 3, 2, 2, 2, 15,
+ 36, 3, 2, 2, 2, 17, 19, 4, 99, 124, 2, 18, 17, 3, 2, 2, 2, 19, 20, 3, 2,
+ 2, 2, 20, 18, 3, 2, 2, 2, 20, 21, 3, 2, 2, 2, 21, 4, 3, 2, 2, 2, 22, 24,
+ 4, 50, 59, 2, 23, 22, 3, 2, 2, 2, 24, 25, 3, 2, 2, 2, 25, 23, 3, 2, 2,
+ 2, 25, 26, 3, 2, 2, 2, 26, 6, 3, 2, 2, 2, 27, 28, 7, 61, 2, 2, 28, 8, 3,
+ 2, 2, 2, 29, 30, 7, 63, 2, 2, 30, 10, 3, 2, 2, 2, 31, 32, 7, 45, 2, 2,
+ 32, 12, 3, 2, 2, 2, 33, 34, 7, 44, 2, 2, 34, 14, 3, 2, 2, 2, 35, 37, 7,
+ 34, 2, 2, 36, 35, 3, 2, 2, 2, 37, 38, 3, 2, 2, 2, 38, 36, 3, 2, 2, 2, 38,
+ 39, 3, 2, 2, 2, 39, 16, 3, 2, 2, 2, 6, 2, 20, 25, 38, 2,
+}
+
+var lexerB_lexerDeserializer = NewATNDeserializer(nil)
+var lexerB_lexerAtn = lexerB_lexerDeserializer.DeserializeFromUInt16(lexerB_serializedLexerAtn)
+
+var lexerB_lexerChannelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+}
+
+var lexerB_lexerModeNames = []string{
+ "DEFAULT_MODE",
+}
+
+var lexerB_lexerLiteralNames = []string{
+ "", "", "", "';'", "'='", "'+'", "'*'",
+}
+
+var lexerB_lexerSymbolicNames = []string{
+ "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS",
+}
+
+var lexerB_lexerRuleNames = []string{
+ "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS",
+}
+
+type LexerB struct {
+ *BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+var lexerB_lexerDecisionToDFA = make([]*DFA, len(lexerB_lexerAtn.DecisionToState))
+
+func init() {
+ for index, ds := range lexerB_lexerAtn.DecisionToState {
+ lexerB_lexerDecisionToDFA[index] = NewDFA(ds, index)
+ }
+}
+
+func NewLexerB(input CharStream) *LexerB {
+ l := new(LexerB)
+
+ l.BaseLexer = NewBaseLexer(input)
+ l.Interpreter = NewLexerATNSimulator(l, lexerB_lexerAtn, lexerB_lexerDecisionToDFA, NewPredictionContextCache())
+
+ l.channelNames = lexerB_lexerChannelNames
+ l.modeNames = lexerB_lexerModeNames
+ l.RuleNames = lexerB_lexerRuleNames
+ l.LiteralNames = lexerB_lexerLiteralNames
+ l.SymbolicNames = lexerB_lexerSymbolicNames
+ l.GrammarFileName = "LexerB.g4"
+ // TODO: l.EOF = TokenEOF
+
+ return l
+}
+
+// LexerB tokens.
+const (
+ LexerBID = 1
+ LexerBINT = 2
+ LexerBSEMI = 3
+ LexerBASSIGN = 4
+ LexerBPLUS = 5
+ LexerBMULT = 6
+ LexerBWS = 7
+)
diff --git a/runtime/Go/antlr/testing_util_test.go b/runtime/Go/antlr/testing_util_test.go
new file mode 100644
index 000000000..20428831b
--- /dev/null
+++ b/runtime/Go/antlr/testing_util_test.go
@@ -0,0 +1,30 @@
+package antlr
+
+import (
+ "fmt"
+ "strings"
+)
+
+// newTestCommonToken create common token with tokentype, text and channel
+// notice: test purpose only
+func newTestCommonToken(tokenType int, text string, channel int) *CommonToken {
+ t := new(CommonToken)
+ t.BaseToken = new(BaseToken)
+ t.tokenType = tokenType
+ t.channel = channel
+ t.text = text
+ t.line = 0
+ t.column = -1
+ return t
+}
+
+// tokensToString returnes []Tokens string
+// notice: test purpose only
+func tokensToString(tokens []Token) string {
+ buf := make([]string, len(tokens))
+ for i, token := range tokens {
+ buf[i] = fmt.Sprintf("%v", token)
+ }
+
+ return "[" + strings.Join(buf, ", ") + "]"
+}
diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go
new file mode 100644
index 000000000..96a03f02a
--- /dev/null
+++ b/runtime/Go/antlr/tokenstream_rewriter.go
@@ -0,0 +1,649 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+package antlr
+
+import (
+"bytes"
+"fmt"
+)
+
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+
+
+const(
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instruction_index int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ op_name string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation)GetInstructionIndex() int{
+ return op.instruction_index
+}
+
+func (op *BaseRewriteOperation)GetIndex() int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation)GetText() string{
+ return op.text
+}
+
+func (op *BaseRewriteOperation)GetOpName() string{
+ return op.op_name
+}
+
+func (op *BaseRewriteOperation)GetTokens() TokenStream{
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+ op.instruction_index = val
+}
+
+func (op *BaseRewriteOperation)SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation)SetText(val string){
+ op.text = val
+}
+
+func (op *BaseRewriteOperation)SetOpName(val string){
+ op.op_name = val
+}
+
+func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.op_name,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
+ return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index,
+ text:text,
+ op_name:"InsertBeforeOp",
+ tokens:stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
+ return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index+1,
+ text:text,
+ tokens:stream,
+ }}
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct{
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation:BaseRewriteOperation{
+ index:from,
+ text:text,
+ op_name:"ReplaceOp",
+ tokens:stream,
+ },
+ LastIndex:to,
+ }
+}
+
+func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
+ if op.text != ""{
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex +1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ },
+ last_rewrite_token_indexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
+ is, ok := tsr.programs[program_name]
+ if ok{
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+ tsr.Rollback(Default_Program_Name, instruction_index)
+}
+//Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+ tsr.DeleteProgram(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+ tsr.InsertAfter(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+ tsr.InsertBefore(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+ tsr.Replace(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
+ tsr.Replace(program_name, from, to, "" )
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+ tsr.Delete(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
+ tsr.DeleteDefault(index,index)
+}
+
+func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+ tsr.DeleteToken(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
+ if !ok{
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+ tsr.last_rewrite_token_indexes[program_name] = i
+}
+
+func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+ is := make([]RewriteOperation, 0, Program_Init_Size)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok{
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetTextDefault() string{
+ return tsr.GetText(
+ Default_Program_Name,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start,0)
+ if rewrites == nil || len(rewrites) == 0{
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ }
+ }
+ return buf.String()
+}
+
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
+//
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// Return a map from token index to operation.
+//
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+ // WALK REPLACES
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil{continue}
+ rop, ok := op.(*ReplaceOp)
+ if !ok{continue}
+ // Wipe prior inserts within range
+ for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ // delete insert as it's a no-op.
+ rewrites[iop.instruction_index] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ // delete replace as it's a no-op.
+ rewrites[prevop.instruction_index] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint{
+ rewrites[prevop.instruction_index] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
+ }else if !disjoint{
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i:=0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil{continue}
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok{continue}
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
+ panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil {continue}
+ if _, ok := m[op.GetIndex()]; ok{
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+
+/*
+ Quick fixing Go lack of overloads
+ */
+
+func max(a,b int)int{
+ if a>b{
+ return a
+ }else {
+ return b
+ }
+}
+func min(a,b int)int{
+ if aaa", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder",
+ func(r *TokenStreamRewriter){
+ r.InsertBeforeDefault(0, "")
+ r.InsertAfterDefault(0, "")
+ r.InsertBeforeDefault(1, "")
+ r.InsertAfterDefault(1,"")
+ }),
+ NewLexerTest("aa", "a
a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2",
+ func(r *TokenStreamRewriter){
+ r.InsertBeforeDefault(0, "")
+ r.InsertBeforeDefault(0, "")
+ r.InsertAfterDefault(0, "
")
+ r.InsertAfterDefault(0, "")
+ r.InsertBeforeDefault(1, "")
+ r.InsertAfterDefault(1,"")
+ }),
+ NewLexerTest("ab", "!b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2",
+ func(r *TokenStreamRewriter){
+ r.InsertBeforeDefault(0, "")
+ r.InsertBeforeDefault(0, "")
+ r.InsertBeforeDefault(0, "
")
+ r.InsertAfterDefault(0, "")
+ r.InsertAfterDefault(0, "")
+ r.InsertAfterDefault(0, "
")
+ r.InsertBeforeDefault(1, "!")
+ }),
+ }
+
+
+ for _,c := range tests{
+ t.Run(c.description,func(t *testing.T) {
+ rewriter := prepare_rewriter(c.input)
+ c.ops(rewriter)
+ if len(c.expected_exception)>0{
+ panic_tester(t, c.expected_exception, rewriter)
+ }else{
+ result := rewriter.GetTextDefault()
+ if result!=c.expected{
+ t.Errorf("Expected:%s | Result: %s", c.expected, result)
+ }
+ }
+ } )
+ }
+}
+
+
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = unicode.IsLetter
+
+var serializedLexerAtn = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 5, 15, 8,
+ 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3,
+ 4, 2, 2, 5, 3, 3, 5, 4, 7, 5, 3, 2, 2, 2, 14, 2, 3, 3, 2, 2, 2, 2, 5, 3,
+ 2, 2, 2, 2, 7, 3, 2, 2, 2, 3, 9, 3, 2, 2, 2, 5, 11, 3, 2, 2, 2, 7, 13,
+ 3, 2, 2, 2, 9, 10, 7, 99, 2, 2, 10, 4, 3, 2, 2, 2, 11, 12, 7, 100, 2, 2,
+ 12, 6, 3, 2, 2, 2, 13, 14, 7, 101, 2, 2, 14, 8, 3, 2, 2, 2, 3, 2, 2,
+}
+
+var lexerDeserializer = NewATNDeserializer(nil)
+var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
+
+var lexerChannelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+}
+
+var lexerModeNames = []string{
+ "DEFAULT_MODE",
+}
+
+var lexerLiteralNames = []string{
+ "", "'a'", "'b'", "'c'",
+}
+
+var lexerSymbolicNames = []string{
+ "", "A", "B", "C",
+}
+
+var lexerRuleNames = []string{
+ "A", "B", "C",
+}
+
+type LexerA struct {
+ *BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+var lexerDecisionToDFA = make([]*DFA, len(lexerAtn.DecisionToState))
+
+func init() {
+ for index, ds := range lexerAtn.DecisionToState {
+ lexerDecisionToDFA[index] = NewDFA(ds, index)
+ }
+}
+
+func NewLexerA(input CharStream) *LexerA {
+
+ l := new(LexerA)
+
+ l.BaseLexer = NewBaseLexer(input)
+ l.Interpreter = NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, NewPredictionContextCache())
+
+ l.channelNames = lexerChannelNames
+ l.modeNames = lexerModeNames
+ l.RuleNames = lexerRuleNames
+ l.LiteralNames = lexerLiteralNames
+ l.SymbolicNames = lexerSymbolicNames
+ l.GrammarFileName = "LexerA.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// LexerA tokens.
+const (
+ LexerAA = 1
+ LexerAB = 2
+ LexerAC = 3
+)
+
diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml
index c42015629..3eb60b2df 100644
--- a/runtime/Java/pom.xml
+++ b/runtime/Java/pom.xml
@@ -27,6 +27,7 @@
org.apache.maven.plugins
maven-source-plugin
+ 3.0.1
diff --git a/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java b/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java
index 491ef6918..107faa7b1 100644
--- a/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java
+++ b/runtime/Java/src/org/antlr/v4/runtime/CodePointCharStream.java
@@ -151,8 +151,8 @@ public abstract class CodePointCharStream implements CharStream {
/** Return the UTF-16 encoded string for the given interval */
@Override
public String getText(Interval interval) {
- int startIdx = Math.min(interval.a, size - 1);
- int len = Math.min(interval.b - interval.a + 1, size);
+ int startIdx = Math.min(interval.a, size);
+ int len = Math.min(interval.b - interval.a + 1, size - startIdx);
// We know the maximum code point in byteArray is U+00FF,
// so we can treat this as if it were ISO-8859-1, aka Latin-1,
diff --git a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java
index 819538539..02b5ee510 100644
--- a/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java
+++ b/runtime/Java/src/org/antlr/v4/runtime/DefaultErrorStrategy.java
@@ -36,6 +36,21 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
protected IntervalSet lastErrorStates;
+ /**
+ * This field is used to propagate information about the lookahead following
+ * the previous match. Since prediction prefers completing the current rule
+ * to error recovery efforts, error reporting may occur later than the
+ * original point where it was discoverable. The original context is used to
+ * compute the true expected sets as though the reporting occurred as early
+ * as possible.
+ */
+ protected ParserRuleContext nextTokensContext;
+
+ /**
+ * @see #nextTokensContext
+ */
+ protected int nextTokensState;
+
/**
* {@inheritDoc}
*
@@ -225,7 +240,20 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
// try cheaper subset first; might get lucky. seems to shave a wee bit off
IntervalSet nextTokens = recognizer.getATN().nextTokens(s);
- if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) {
+ if (nextTokens.contains(la)) {
+ // We are sure the token matches
+ nextTokensContext = null;
+ nextTokensState = ATNState.INVALID_STATE_NUMBER;
+ return;
+ }
+
+ if (nextTokens.contains(Token.EPSILON)) {
+ if (nextTokensContext == null) {
+ // It's possible the next token won't match; information tracked
+ // by sync is restricted for performance.
+ nextTokensContext = recognizer.getContext();
+ nextTokensState = recognizer.getState();
+ }
return;
}
@@ -450,7 +478,14 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
}
// even that didn't work; must throw the exception
- throw new InputMismatchException(recognizer);
+ InputMismatchException e;
+ if (nextTokensContext == null) {
+ e = new InputMismatchException(recognizer);
+ } else {
+ e = new InputMismatchException(recognizer, nextTokensState, nextTokensContext);
+ }
+
+ throw e;
}
/**
diff --git a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java
index fc4261558..08ef67c58 100644
--- a/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java
+++ b/runtime/Java/src/org/antlr/v4/runtime/InputMismatchException.java
@@ -13,4 +13,10 @@ public class InputMismatchException extends RecognitionException {
super(recognizer, recognizer.getInputStream(), recognizer._ctx);
this.setOffendingToken(recognizer.getCurrentToken());
}
+
+ public InputMismatchException(Parser recognizer, int state, ParserRuleContext ctx) {
+ super(recognizer, recognizer.getInputStream(), ctx);
+ this.setOffendingState(state);
+ this.setOffendingToken(recognizer.getCurrentToken());
+ }
}
diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java
index 46840ab7c..76524ebd8 100755
--- a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java
+++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserATNSimulator.java
@@ -270,7 +270,7 @@ public class ParserATNSimulator extends ATNSimulator {
public static final boolean retry_debug = false;
/** Just in case this optimization is bad, add an ENV variable to turn it off */
- public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(System.getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"));
+ public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(getSafeEnv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"));
protected final Parser parser;
@@ -1541,11 +1541,6 @@ public class ParserATNSimulator extends ATNSimulator {
ATNConfig c = getEpsilonTarget(config, t, continueCollecting,
depth == 0, fullCtx, treatEofAsEpsilon);
if ( c!=null ) {
- if (!t.isEpsilon() && !closureBusy.add(c)) {
- // avoid infinite recursion for EOF* and EOF+
- continue;
- }
-
int newDepth = depth;
if ( config.state instanceof RuleStopState) {
assert !fullCtx;
@@ -1555,11 +1550,6 @@ public class ParserATNSimulator extends ATNSimulator {
// come in handy and we avoid evaluating context dependent
// preds if this is > 0.
- if (!closureBusy.add(c)) {
- // avoid infinite recursion for right-recursive rules
- continue;
- }
-
if (_dfa != null && _dfa.isPrecedenceDfa()) {
int outermostPrecedenceReturn = ((EpsilonTransition)t).outermostPrecedenceReturn();
if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) {
@@ -1568,15 +1558,28 @@ public class ParserATNSimulator extends ATNSimulator {
}
c.reachesIntoOuterContext++;
+
+ if (!closureBusy.add(c)) {
+ // avoid infinite recursion for right-recursive rules
+ continue;
+ }
+
configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method
assert newDepth > Integer.MIN_VALUE;
newDepth--;
if ( debug ) System.out.println("dips into outer ctx: "+c);
}
- else if (t instanceof RuleTransition) {
- // latch when newDepth goes negative - once we step out of the entry context we can't return
- if (newDepth >= 0) {
- newDepth++;
+ else {
+ if (!t.isEpsilon() && !closureBusy.add(c)) {
+ // avoid infinite recursion for EOF* and EOF+
+ continue;
+ }
+
+ if (t instanceof RuleTransition) {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if (newDepth >= 0) {
+ newDepth++;
+ }
}
}
@@ -2178,4 +2181,14 @@ public class ParserATNSimulator extends ATNSimulator {
public Parser getParser() {
return parser;
}
+
+ public static String getSafeEnv(String envName) {
+ try {
+ return System.getenv(envName);
+ }
+ catch(SecurityException e) {
+ // use the default value
+ }
+ return null;
+ }
}
diff --git a/runtime/JavaScript/src/antlr4/Utils.js b/runtime/JavaScript/src/antlr4/Utils.js
index d7627be60..2cb939a66 100644
--- a/runtime/JavaScript/src/antlr4/Utils.js
+++ b/runtime/JavaScript/src/antlr4/Utils.js
@@ -401,11 +401,11 @@ DoubleDict.prototype.set = function (a, b, o) {
function escapeWhitespace(s, escapeSpaces) {
- s = s.replace("\t", "\\t");
- s = s.replace("\n", "\\n");
- s = s.replace("\r", "\\r");
+ s = s.replace(/\t/g, "\\t")
+ .replace(/\n/g, "\\n")
+ .replace(/\r/g, "\\r");
if (escapeSpaces) {
- s = s.replace(" ", "\u00B7");
+ s = s.replace(/ /g, "\u00B7");
}
return s;
}
@@ -443,4 +443,4 @@ exports.hashStuff = hashStuff;
exports.escapeWhitespace = escapeWhitespace;
exports.arrayToString = arrayToString;
exports.titleCase = titleCase;
-exports.equalArrays = equalArrays;
\ No newline at end of file
+exports.equalArrays = equalArrays;
diff --git a/runtime/Python2/src/antlr4/Parser.py b/runtime/Python2/src/antlr4/Parser.py
index d88f77918..69abe739b 100644
--- a/runtime/Python2/src/antlr4/Parser.py
+++ b/runtime/Python2/src/antlr4/Parser.py
@@ -218,6 +218,13 @@ class Parser (Recognizer):
self._ctx.exitRule(listener)
listener.exitEveryRule(self._ctx)
+ # Gets the number of syntax errors reported during parsing. This value is
+ # incremented each time {@link #notifyErrorListeners} is called.
+ #
+ # @see #notifyErrorListeners
+ #
+ def getNumberOfSyntaxErrors(self):
+ return self._syntaxErrors
def getTokenFactory(self):
return self._input.tokenSource._factory
diff --git a/runtime/Python2/src/antlr4/tree/RuleTagToken.py b/runtime/Python2/src/antlr4/tree/RuleTagToken.py
index 2043c1625..d63a3a53b 100644
--- a/runtime/Python2/src/antlr4/tree/RuleTagToken.py
+++ b/runtime/Python2/src/antlr4/tree/RuleTagToken.py
@@ -36,14 +36,13 @@ class RuleTagToken(Token):
self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
self.line = 0 # line=1..n of the 1st character
self.column = -1 # beginning of the line at which it occurs, 0..n-1
- self.label = label
+ self.label = unicode(label)
self._text = self.getText() # text of the token.
-
- self.ruleName = ruleName
+ self.ruleName = unicode(ruleName)
def getText(self):
if self.label is None:
- return "<" + self.ruleName + ">"
+ return u"<" + self.ruleName + u">"
else:
- return "<" + self.label + ":" + self.ruleName + ">"
+ return u"<" + self.label + ":" + self.ruleName + u">"
diff --git a/runtime/Python2/src/antlr4/tree/TokenTagToken.py b/runtime/Python2/src/antlr4/tree/TokenTagToken.py
index 2ffc79f6f..dba41f785 100644
--- a/runtime/Python2/src/antlr4/tree/TokenTagToken.py
+++ b/runtime/Python2/src/antlr4/tree/TokenTagToken.py
@@ -24,8 +24,8 @@ class TokenTagToken(CommonToken):
#
def __init__(self, tokenName, type, label=None):
super(TokenTagToken, self).__init__(type=type)
- self.tokenName = tokenName
- self.label = label
+ self.tokenName = unicode(tokenName)
+ self.label = unicode(label)
self._text = self.getText()
#
@@ -36,9 +36,9 @@ class TokenTagToken(CommonToken):
#
def getText(self):
if self.label is None:
- return "<" + self.tokenName + ">"
+ return u"<" + self.tokenName + u">"
else:
- return "<" + self.label + ":" + self.tokenName + ">"
+ return u"<" + self.label + u":" + self.tokenName + u">"
# The implementation for {@link TokenTagToken} returns a string of the form
# {@code tokenName:type}.
diff --git a/runtime/Python2/src/antlr4/tree/Tree.py b/runtime/Python2/src/antlr4/tree/Tree.py
index 26e959612..14b5f29ec 100644
--- a/runtime/Python2/src/antlr4/tree/Tree.py
+++ b/runtime/Python2/src/antlr4/tree/Tree.py
@@ -108,13 +108,13 @@ class TerminalNodeImpl(TerminalNode):
return visitor.visitTerminal(self)
def getText(self):
- return self.symbol.text
+ return unicode(self.symbol.text)
def __unicode__(self):
if self.symbol.type == Token.EOF:
- return ""
+ return u""
else:
- return self.symbol.text
+ return unicode(self.symbol.text)
# Represents a token that was consumed during resynchronization
# rather than during a valid match operation. For example,
diff --git a/runtime/Python3/src/antlr4/Parser.py b/runtime/Python3/src/antlr4/Parser.py
index 03f10a438..c461bbdc0 100644
--- a/runtime/Python3/src/antlr4/Parser.py
+++ b/runtime/Python3/src/antlr4/Parser.py
@@ -227,6 +227,14 @@ class Parser (Recognizer):
listener.exitEveryRule(self._ctx)
+ # Gets the number of syntax errors reported during parsing. This value is
+ # incremented each time {@link #notifyErrorListeners} is called.
+ #
+ # @see #notifyErrorListeners
+ #
+ def getNumberOfSyntaxErrors(self):
+ return self._syntaxErrors
+
def getTokenFactory(self):
return self._input.tokenSource._factory
diff --git a/runtime/Python3/src/antlr4/__init__.py b/runtime/Python3/src/antlr4/__init__.py
index 4eac6c579..37c834202 100644
--- a/runtime/Python3/src/antlr4/__init__.py
+++ b/runtime/Python3/src/antlr4/__init__.py
@@ -12,7 +12,7 @@ from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.atn.ParserATNSimulator import ParserATNSimulator
from antlr4.atn.PredictionMode import PredictionMode
from antlr4.PredictionContext import PredictionContextCache
-from antlr4.ParserRuleContext import ParserRuleContext
+from antlr4.ParserRuleContext import RuleContext, ParserRuleContext
from antlr4.tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode
from antlr4.error.Errors import RecognitionException, IllegalStateException, NoViableAltException
from antlr4.error.ErrorStrategy import BailErrorStrategy
diff --git a/runtime/Swift/.gitignore b/runtime/Swift/.gitignore
index c54511205..e4a84b226 100644
--- a/runtime/Swift/.gitignore
+++ b/runtime/Swift/.gitignore
@@ -1 +1,4 @@
+.build/
+Antlr4.xcodeproj/
+Tests/Antlr4Tests/gen/
xcuserdata/
diff --git a/runtime/Swift/Package.swift b/runtime/Swift/Package.swift
index 5c2e28b12..0d72a47dd 100644
--- a/runtime/Swift/Package.swift
+++ b/runtime/Swift/Package.swift
@@ -1,3 +1,4 @@
+// swift-tools-version:4.0
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -5,15 +6,19 @@
import PackageDescription
let package = Package(
- name: "Antlr4"
-)
-
-products.append(
- Product(
- name: "Antlr4",
- type: .Library(.Dynamic),
- modules: [
- "Antlr4"
- ]
- )
+ name: "Antlr4",
+ products: [
+ .library(
+ name: "Antlr4",
+ type: .dynamic,
+ targets: ["Antlr4"]),
+ ],
+ targets: [
+ .target(
+ name: "Antlr4",
+ dependencies: []),
+ .testTarget(
+ name: "Antlr4Tests",
+ dependencies: ["Antlr4"]),
+ ]
)
diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift
index 942986b5e..486ae0f2c 100644
--- a/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift
+++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorListener.swift
@@ -1,20 +1,23 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
/// How to emit recognition errors.
+///
public protocol ANTLRErrorListener: class {
+ ///
/// Upon syntax error, notify any interested parties. This is not how to
- /// recover from errors or compute error messages. {@link org.antlr.v4.runtime.ANTLRErrorStrategy}
+ /// recover from errors or compute error messages. _org.antlr.v4.runtime.ANTLRErrorStrategy_
/// specifies how to recover from syntax errors and how to compute error
/// messages. This listener's job is simply to emit a computed message,
/// though it has enough information to create its own message in many cases.
- ///
- /// The {@link org.antlr.v4.runtime.RecognitionException} is non-null for all syntax errors except
+ ///
+ /// The _RecognitionException_ is non-null for all syntax errors except
/// when we discover mismatched token errors that we can recover from
/// in-line, without returning from the surrounding rule (via the single
- /// token insertion and deletion mechanism).
- ///
+ /// token insertion and deletion mechanism).
+ ///
/// - parameter recognizer:
/// What parser got the error. From this
/// object, you can access the context as well
@@ -22,7 +25,7 @@ public protocol ANTLRErrorListener: class {
/// - parameter offendingSymbol:
/// The offending token in the input token
/// stream, unless recognizer is a lexer (then it's null). If
- /// no viable alternative error, {@code e} has token at which we
+ /// no viable alternative error, `e` has token at which we
/// started production for the decision.
/// - parameter line:
/// The line number in the input where the error occurred.
@@ -35,116 +38,122 @@ public protocol ANTLRErrorListener: class {
/// the reporting of an error. It is null in the case where
/// the parser was able to recover in line without exiting the
/// surrounding rule.
- func syntaxError(_ recognizer: Recognizer,
- _ offendingSymbol: AnyObject?,
- _ line: Int,
- _ charPositionInLine: Int,
- _ msg: String,
- _ e: AnyObject?// RecognitionException?
+ ///
+ func syntaxError(_ recognizer: Recognizer,
+ _ offendingSymbol: AnyObject?,
+ _ line: Int,
+ _ charPositionInLine: Int,
+ _ msg: String,
+ _ e: AnyObject?
)
+ ///
/// This method is called by the parser when a full-context prediction
/// results in an ambiguity.
- ///
- /// Each full-context prediction which does not result in a syntax error
- /// will call either {@link #reportContextSensitivity} or
- /// {@link #reportAmbiguity}.
- ///
- /// When {@code ambigAlts} is not null, it contains the set of potentially
+ ///
+ /// Each full-context prediction which does not result in a syntax error
+ /// will call either _#reportContextSensitivity_ or
+ /// _#reportAmbiguity_.
+ ///
+ /// When `ambigAlts` is not null, it contains the set of potentially
/// viable alternatives identified by the prediction algorithm. When
- /// {@code ambigAlts} is null, use {@link org.antlr.v4.runtime.atn.ATNConfigSet#getAlts} to obtain the
- /// represented alternatives from the {@code configs} argument.
- ///
- /// When {@code exact} is {@code true}, all of the potentially
+ /// `ambigAlts` is null, use _org.antlr.v4.runtime.atn.ATNConfigSet#getAlts_ to obtain the
+ /// represented alternatives from the `configs` argument.
+ ///
+ /// When `exact` is `true`, __all__ of the potentially
/// viable alternatives are truly viable, i.e. this is reporting an exact
- /// ambiguity. When {@code exact} is {@code false}, at least two of
+ /// ambiguity. When `exact` is `false`, __at least two__ of
/// the potentially viable alternatives are viable for the current input, but
/// the prediction algorithm terminated as soon as it determined that at
- /// least the minimum potentially viable alternative is truly
- /// viable.
- ///
- /// When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
+ /// least the __minimum__ potentially viable alternative is truly
+ /// viable.
+ ///
+ /// When the _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ prediction
/// mode is used, the parser is required to identify exact ambiguities so
- /// {@code exact} will always be {@code true}.
- ///
- /// This method is not used by lexers.
- ///
+ /// `exact` will always be `true`.
+ ///
+ /// This method is not used by lexers.
+ ///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
/// - parameter stopIndex: the input input where the ambiguity was identified
- /// - parameter exact: {@code true} if the ambiguity is exactly known, otherwise
- /// {@code false}. This is always {@code true} when
- /// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
- /// - parameter ambigAlts: the potentially ambiguous alternatives, or {@code null}
+ /// - parameter exact: `true` if the ambiguity is exactly known, otherwise
+ /// `false`. This is always `true` when
+ /// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ is used.
+ /// - parameter ambigAlts: the potentially ambiguous alternatives, or `null`
/// to indicate that the potentially ambiguous alternatives are the complete
- /// set of represented alternatives in {@code configs}
+ /// set of represented alternatives in `configs`
/// - parameter configs: the ATN configuration set where the ambiguity was
/// identified
+ ///
func reportAmbiguity(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ exact: Bool,
_ ambigAlts: BitSet,
- _ configs: ATNConfigSet) throws
+ _ configs: ATNConfigSet)
+ ///
/// This method is called when an SLL conflict occurs and the parser is about
/// to use the full context information to make an LL decision.
- ///
- /// If one or more configurations in {@code configs} contains a semantic
+ ///
+ /// If one or more configurations in `configs` contains a semantic
/// predicate, the predicates are evaluated before this method is called. The
/// subset of alternatives which are still viable after predicates are
- /// evaluated is reported in {@code conflictingAlts}.
- ///
- /// This method is not used by lexers.
- ///
+ /// evaluated is reported in `conflictingAlts`.
+ ///
+ /// This method is not used by lexers.
+ ///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
/// - parameter stopIndex: the input index where the SLL conflict occurred
/// - parameter conflictingAlts: The specific conflicting alternatives. If this is
- /// {@code null}, the conflicting alternatives are all alternatives
- /// represented in {@code configs}. At the moment, conflictingAlts is non-null
+ /// `null`, the conflicting alternatives are all alternatives
+ /// represented in `configs`. At the moment, conflictingAlts is non-null
/// (for the reference implementation, but Sam's optimized version can see this
/// as null).
/// - parameter configs: the ATN configuration set where the SLL conflict was
/// detected
+ ///
func reportAttemptingFullContext(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ conflictingAlts: BitSet?,
- _ configs: ATNConfigSet) throws
+ _ configs: ATNConfigSet)
+ ///
/// This method is called by the parser when a full-context prediction has a
/// unique result.
- ///
- /// Each full-context prediction which does not result in a syntax error
- /// will call either {@link #reportContextSensitivity} or
- /// {@link #reportAmbiguity}.
- ///
- /// For prediction implementations that only evaluate full-context
+ ///
+ /// Each full-context prediction which does not result in a syntax error
+ /// will call either _#reportContextSensitivity_ or
+ /// _#reportAmbiguity_.
+ ///
+ /// For prediction implementations that only evaluate full-context
/// predictions when an SLL conflict is found (including the default
- /// {@link org.antlr.v4.runtime.atn.ParserATNSimulator} implementation), this method reports cases
+ /// _org.antlr.v4.runtime.atn.ParserATNSimulator_ implementation), this method reports cases
/// where SLL conflicts were resolved to unique full-context predictions,
/// i.e. the decision was context-sensitive. This report does not necessarily
/// indicate a problem, and it may appear even in completely unambiguous
- /// grammars.
- ///
- /// {@code configs} may have more than one represented alternative if the
+ /// grammars.
+ ///
+ /// `configs` may have more than one represented alternative if the
/// full-context prediction algorithm does not evaluate predicates before
/// beginning the full-context prediction. In all cases, the final prediction
- /// is passed as the {@code prediction} argument.
- ///
- /// Note that the definition of "context sensitivity" in this method
- /// differs from the concept in {@link org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities}.
+ /// is passed as the `prediction` argument.
+ ///
+ /// Note that the definition of "context sensitivity" in this method
+ /// differs from the concept in _org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities_.
/// This method reports all instances where an SLL conflict occurred but LL
/// parsing produced a unique result, whether or not that unique result
- /// matches the minimum alternative in the SLL conflicting set.
- ///
- /// This method is not used by lexers.
- ///
+ /// matches the minimum alternative in the SLL conflicting set.
+ ///
+ /// This method is not used by lexers.
+ ///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
@@ -153,10 +162,11 @@ public protocol ANTLRErrorListener: class {
/// - parameter prediction: the unambiguous result of the full-context prediction
/// - parameter configs: the ATN configuration set where the unambiguous prediction
/// was determined
+ ///
func reportContextSensitivity(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ prediction: Int,
- _ configs: ATNConfigSet) throws
+ _ configs: ATNConfigSet)
}
diff --git a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift
index 541a2a3b4..7ca228ff8 100644
--- a/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift
+++ b/runtime/Swift/Sources/Antlr4/ANTLRErrorStrategy.swift
@@ -1,99 +1,119 @@
+///
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
-/// The interface for defining strategies to deal with syntax errors encountered
-/// during a parse by ANTLR-generated parsers. We distinguish between three
-/// different kinds of errors:
-///
-///
-/// - The parser could not figure out which path to take in the ATN (none of
-/// the available alternatives could possibly match)
-/// - The current input does not match what we were looking for
-/// - A predicate evaluated to false
-///
-///
-/// Implementations of this interface report syntax errors by calling
-/// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.
-///
-/// TODO: what to do about lexers
+///
+///
+
+///
+///
+/// The interface for defining strategies to deal with syntax errors
+/// encountered during a parse by ANTLR-generated parsers. We distinguish between three
+/// different kinds of errors:
+///
+/// * The parser could not figure out which path to take in the ATN (none of
+/// the available alternatives could possibly match)
+/// * The current input does not match what we were looking for
+/// * A predicate evaluated to false
+///
+/// Implementations of this interface report syntax errors by calling
+/// _org.antlr.v4.runtime.Parser#notifyErrorListeners_.
+///
+/// TODO: what to do about lexers
+///
public protocol ANTLRErrorStrategy {
- /// Reset the error handler state for the specified {@code recognizer}.
+ ///
+ /// Reset the error handler state for the specified `recognizer`.
/// - parameter recognizer: the parser instance
+ ///
func reset(_ recognizer: Parser)
+ ///
/// This method is called when an unexpected symbol is encountered during an
- /// inline match operation, such as {@link org.antlr.v4.runtime.Parser#match}. If the error
+ /// inline match operation, such as _org.antlr.v4.runtime.Parser#match_. If the error
/// strategy successfully recovers from the match failure, this method
- /// returns the {@link org.antlr.v4.runtime.Token} instance which should be treated as the
+ /// returns the _org.antlr.v4.runtime.Token_ instance which should be treated as the
/// successful result of the match.
- ///
- /// This method handles the consumption of any tokens - the caller should
- /// not call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.
- ///
- /// Note that the calling code will not report an error if this method
+ ///
+ /// This method handles the consumption of any tokens - the caller should
+ /// __not__ call _org.antlr.v4.runtime.Parser#consume_ after a successful recovery.
+ ///
+ /// Note that the calling code will not report an error if this method
/// returns successfully. The error strategy implementation is responsible
- /// for calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} as appropriate.
- ///
+ /// for calling _org.antlr.v4.runtime.Parser#notifyErrorListeners_ as appropriate.
+ ///
/// - parameter recognizer: the parser instance
- /// - org.antlr.v4.runtime.RecognitionException if the error strategy was not able to
+ /// - throws: _RecognitionException_ if the error strategy was not able to
/// recover from the unexpected input symbol
+ ///
@discardableResult
- func recoverInline(_ recognizer: Parser) throws -> Token // RecognitionException;
+ func recoverInline(_ recognizer: Parser) throws -> Token
- /// This method is called to recover from exception {@code e}. This method is
- /// called after {@link #reportError} by the default exception handler
+ ///
+ /// This method is called to recover from exception `e`. This method is
+ /// called after _#reportError_ by the default exception handler
/// generated for a rule method.
- ///
+ ///
/// - seealso: #reportError
- ///
+ ///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception to recover from
- /// - org.antlr.v4.runtime.RecognitionException if the error strategy could not recover from
+ /// - throws: _RecognitionException_ if the error strategy could not recover from
/// the recognition exception
- func recover(_ recognizer: Parser, _ e: AnyObject) throws // RecognitionException;
+ ///
+ func recover(_ recognizer: Parser, _ e: RecognitionException) throws
+ ///
/// This method provides the error handler with an opportunity to handle
/// syntactic or semantic errors in the input stream before they result in a
- /// {@link org.antlr.v4.runtime.RecognitionException}.
- ///
- /// The generated code currently contains calls to {@link #sync} after
- /// entering the decision state of a closure block ({@code (...)*} or
- /// {@code (...)+}).
- ///
- /// For an implementation based on Jim Idle's "magic sync" mechanism, see
- /// {@link org.antlr.v4.runtime.DefaultErrorStrategy#sync}.
- ///
+ /// _org.antlr.v4.runtime.RecognitionException_.
+ ///
+ /// The generated code currently contains calls to _#sync_ after
+ /// entering the decision state of a closure block (`(...)*` or
+ /// `(...)+`).
+ ///
+ /// For an implementation based on Jim Idle's "magic sync" mechanism, see
+ /// _org.antlr.v4.runtime.DefaultErrorStrategy#sync_.
+ ///
/// - seealso: org.antlr.v4.runtime.DefaultErrorStrategy#sync
- ///
+ ///
/// - parameter recognizer: the parser instance
- /// - org.antlr.v4.runtime.RecognitionException if an error is detected by the error
+ /// - throws: _RecognitionException_ if an error is detected by the error
/// strategy but cannot be automatically recovered at the current state in
/// the parsing process
- func sync(_ recognizer: Parser) throws // RecognitionException;
+ ///
+ func sync(_ recognizer: Parser) throws
+ ///
/// Tests whether or not recognizer} is in the process of recovering
- /// from an error. In error recovery mode, {@link org.antlr.v4.runtime.Parser#consume} adds
+ /// from an error. In error recovery mode, _org.antlr.v4.runtime.Parser#consume_ adds
/// symbols to the parse tree by calling
- /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
- /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
- ///
+ /// _Parser#createErrorNode(ParserRuleContext, Token)_ then
+ /// _ParserRuleContext#addErrorNode(ErrorNode)_ instead of
+ /// _Parser#createTerminalNode(ParserRuleContext, Token)_.
+ ///
/// - parameter recognizer: the parser instance
- /// - returns: {@code true} if the parser is currently recovering from a parse
- /// error, otherwise {@code false}
+ /// - returns: `true` if the parser is currently recovering from a parse
+ /// error, otherwise `false`
+ ///
func inErrorRecoveryMode(_ recognizer: Parser) -> Bool
+ ///
/// This method is called by when the parser successfully matches an input
/// symbol.
- ///
+ ///
/// - parameter recognizer: the parser instance
+ ///
func reportMatch(_ recognizer: Parser)
- /// Report any kind of {@link org.antlr.v4.runtime.RecognitionException}. This method is called by
+ ///
+ /// Report any kind of _org.antlr.v4.runtime.RecognitionException_. This method is called by
/// the default exception handler generated for a rule method.
- ///
+ ///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception to report
- func reportError(_ recognizer: Parser, _ e: AnyObject)
+ ///
+ func reportError(_ recognizer: Parser, _ e: RecognitionException)
}
diff --git a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift
index 053b6348d..9ed7ac9ef 100644
--- a/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift
+++ b/runtime/Swift/Sources/Antlr4/ANTLRFileStream.swift
@@ -1,8 +1,10 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
-/// This is an {@link org.antlr.v4.runtime.ANTLRInputStream} that is loaded from a file all at once
+/// This is an _org.antlr.v4.runtime.ANTLRInputStream_ that is loaded from a file all at once
/// when you construct the object.
+///
import Foundation
@@ -10,7 +12,6 @@ public class ANTLRFileStream: ANTLRInputStream {
internal var fileName: String
public convenience override init(_ fileName: String) {
- // throws; IOException
self.init(fileName, nil)
}
diff --git a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift
index 3b73981fd..17bbd8096 100644
--- a/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift
+++ b/runtime/Swift/Sources/Antlr4/ANTLRInputStream.swift
@@ -1,26 +1,36 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
-/// Vacuum all input from a {@link java.io.Reader}/{@link java.io.InputStream} and then treat it
-/// like a {@code char[]} buffer. Can also pass in a {@link String} or
-/// {@code char[]} to use.
-///
-/// If you need encoding, pass in stream/reader with correct encoding.
+/// Vacuum all input from a _java.io.Reader_/_java.io.InputStream_ and then treat it
+/// like a `char[]` buffer. Can also pass in a _String_ or
+/// `char[]` to use.
+///
+/// If you need encoding, pass in stream/reader with correct encoding.
+///
public class ANTLRInputStream: CharStream {
public static let READ_BUFFER_SIZE: Int = 1024
public static let INITIAL_BUFFER_SIZE: Int = 1024
+ ///
/// The data being scanned
+ ///
internal var data: [Character]
+ ///
/// How many characters are actually in the buffer
+ ///
internal var n: Int
+ ///
/// 0..n-1 index into string of next char
+ ///
internal var p: Int = 0
+ ///
/// What is name or source of this char stream?
+ ///
public var name: String?
public init() {
@@ -28,87 +38,26 @@ public class ANTLRInputStream: CharStream {
data = [Character]()
}
+ ///
/// Copy data in string to a local char array
+ ///
public init(_ input: String) {
self.data = Array(input.characters) // input.toCharArray();
self.n = input.length
}
+ ///
/// This is the preferred constructor for strings as no data is copied
+ ///
public init(_ data: [Character], _ numberOfActualCharsInArray: Int) {
self.data = data
self.n = numberOfActualCharsInArray
}
- /// public convenience init(_ r : Reader) throws; IOException {
- /// self.init(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE);
- /// }
- ///
- /// public convenience init(_ r : Reader, _ initialSize : Int) throws; IOException {
- /// self.init(r, initialSize, READ_BUFFER_SIZE);
- /// }
- ///
- /// public init(_ r : Reader, _ initialSize : Int, _ readChunkSize : Int) throws; IOException {
- /// load(r, initialSize, readChunkSize);
- /// }
- ///
- /// public convenience init(_ input : InputStream) throws; IOException {
- /// self.init(InputStreamReader(input), INITIAL_BUFFER_SIZE);
- /// }
- ///
- /// public convenience init(_ input : InputStream, _ initialSize : Int) throws; IOException {
- /// self.init(InputStreamReader(input), initialSize);
- /// }
- ///
- /// public convenience init(_ input : InputStream, _ initialSize : Int, _ readChunkSize : Int) throws; IOException {
- /// self.init(InputStreamReader(input), initialSize, readChunkSize);
- /// }
- ///
- /// public func load(r : Reader, _ size : Int, _ readChunkSize : Int)
- /// throws; IOException
- /// {
- /// if ( r==nil ) {
- /// return;
- /// }
- /// if ( size<=0 ) {
- /// size = INITIAL_BUFFER_SIZE;
- /// }
- /// if ( readChunkSize<=0 ) {
- /// readChunkSize = READ_BUFFER_SIZE;
- /// }
- /// // print("load "+size+" in chunks of "+readChunkSize);
- /// try {
- /// // alloc initial buffer size.
- /// data = new char[size];
- /// // read all the data in chunks of readChunkSize
- /// var numRead : Int=0;
- /// var p : Int = 0;
- /// do {
- /// if ( p+readChunkSize > data.length ) { // overflow?
- /// // print("### overflow p="+p+", data.length="+data.length);
- /// data = Arrays.copyOf(data, data.length * 2);
- /// }
- /// numRead = r.read(data, p, readChunkSize);
- /// // print("read "+numRead+" chars; p was "+p+" is now "+(p+numRead));
- /// p += numRead;
- /// } while (numRead!=-1); // while not EOF
- /// // set the actual size of the data available;
- /// // EOF subtracted one above in p+=numRead; add one back
- /// n = p+1;
- /// //print("n="+n);
- /// }
- /// finally {
- /// r.close();
- /// }
- /// }
- /// Reset the stream so that it's in the same state it was
- /// when the object was created *except* the data array is not
- /// touched.
public func reset() {
p = 0
}
-
public func consume() throws {
if p >= n {
assert(LA(1) == ANTLRInputStream.EOF, "Expected: LA(1)==IntStream.EOF")
@@ -124,7 +73,6 @@ public class ANTLRInputStream: CharStream {
}
}
-
public func LA(_ i: Int) -> Int {
var i = i
if i == 0 {
@@ -150,9 +98,11 @@ public class ANTLRInputStream: CharStream {
return LA(i)
}
+ ///
/// Return the current input symbol index 0..n where n indicates the
/// last symbol has been read. The index is the index of char to
/// be returned from LA(1).
+ ///
public func index() -> Int {
return p
}
@@ -161,7 +111,9 @@ public class ANTLRInputStream: CharStream {
return n
}
+ ///
/// mark/release do nothing; we have entire buffer
+ ///
public func mark() -> Int {
return -1
@@ -170,8 +122,10 @@ public class ANTLRInputStream: CharStream {
public func release(_ marker: Int) {
}
+ ///
/// consume() ahead until p==index; can't just set p=index as we must
/// update line and charPositionInLine. If we seek backwards, just set p
+ ///
public func seek(_ index: Int) throws {
var index = index
@@ -186,7 +140,6 @@ public class ANTLRInputStream: CharStream {
}
}
-
public func getText(_ interval: Interval) -> String {
let start: Int = interval.a
var stop: Int = interval.b
@@ -201,7 +154,6 @@ public class ANTLRInputStream: CharStream {
return String(data[start ..< (start + count)])
}
-
public func getSourceName() -> String {
guard let name = name , !name.isEmpty else {
return ANTLRInputStream.UNKNOWN_SOURCE_NAME
@@ -209,7 +161,6 @@ public class ANTLRInputStream: CharStream {
return name
}
-
public func toString() -> String {
return String(data)
}
diff --git a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift
index eca1de705..d1e81140c 100644
--- a/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift
+++ b/runtime/Swift/Sources/Antlr4/BailErrorStrategy.swift
@@ -1,69 +1,74 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-
-/// This implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} responds to syntax errors
+///
+///
+/// This implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy_ responds to syntax errors
/// by immediately canceling the parse operation with a
-/// {@link org.antlr.v4.runtime.misc.ParseCancellationException}. The implementation ensures that the
-/// {@link org.antlr.v4.runtime.ParserRuleContext#exception} field is set for all parse tree nodes
+/// _org.antlr.v4.runtime.misc.ParseCancellationException_. The implementation ensures that the
+/// _org.antlr.v4.runtime.ParserRuleContext#exception_ field is set for all parse tree nodes
/// that were not completed prior to encountering the error.
-///
-///
-/// This error strategy is useful in the following scenarios.
-///
-///
-/// - Two-stage parsing: This error strategy allows the first
+///
+/// This error strategy is useful in the following scenarios.
+///
+/// * __Two-stage parsing:__ This error strategy allows the first
/// stage of two-stage parsing to immediately terminate if an error is
/// encountered, and immediately fall back to the second stage. In addition to
/// avoiding wasted work by attempting to recover from errors here, the empty
-/// implementation of {@link org.antlr.v4.runtime.BailErrorStrategy#sync} improves the performance of
-/// the first stage.
-/// - Silent validation: When syntax errors are not being
+/// implementation of _org.antlr.v4.runtime.BailErrorStrategy#sync_ improves the performance of
+/// the first stage.
+///
+/// * __Silent validation:__ When syntax errors are not being
/// reported or logged, and the parse result is simply ignored if errors occur,
-/// the {@link org.antlr.v4.runtime.BailErrorStrategy} avoids wasting work on recovering from errors
-/// when the result will be ignored either way.
-///
-///
-///
-/// {@code myparser.setErrorHandler(new BailErrorStrategy());}
-///
+/// the _org.antlr.v4.runtime.BailErrorStrategy_ avoids wasting work on recovering from errors
+/// when the result will be ignored either way.
+///
+/// `myparser.setErrorHandler(new BailErrorStrategy());`
+///
/// - seealso: org.antlr.v4.runtime.Parser#setErrorHandler(org.antlr.v4.runtime.ANTLRErrorStrategy)
-
+///
+///
public class BailErrorStrategy: DefaultErrorStrategy {
public override init(){}
- /// Instead of recovering from exception {@code e}, re-throw it wrapped
- /// in a {@link org.antlr.v4.runtime.misc.ParseCancellationException} so it is not caught by the
- /// rule function catches. Use {@link Exception#getCause()} to get the
- /// original {@link org.antlr.v4.runtime.RecognitionException}.
- override
- public func recover(_ recognizer: Parser, _ e: AnyObject) throws {
- var context: ParserRuleContext? = recognizer.getContext()
- while let contextWrap = context{
+ ///
+ /// Instead of recovering from exception `e`, re-throw it wrapped
+ /// in a _org.antlr.v4.runtime.misc.ParseCancellationException_ so it is not caught by the
+ /// rule function catches. Use _Exception#getCause()_ to get the
+ /// original _org.antlr.v4.runtime.RecognitionException_.
+ ///
+ override public func recover(_ recognizer: Parser, _ e: RecognitionException) throws {
+ var context = recognizer.getContext()
+ while let contextWrap = context {
contextWrap.exception = e
context = (contextWrap.getParent() as? ParserRuleContext)
}
- throw ANTLRException.recognition(e: e)
+ throw ANTLRException.recognition(e: e)
}
+ ///
/// Make sure we don't attempt to recover inline; if the parser
/// successfully recovers, it won't throw an exception.
+ ///
override
public func recoverInline(_ recognizer: Parser) throws -> Token {
- let e: InputMismatchException = try InputMismatchException(recognizer)
- var context: ParserRuleContext? = recognizer.getContext()
+ let e = InputMismatchException(recognizer)
+ var context = recognizer.getContext()
while let contextWrap = context {
contextWrap.exception = e
context = (contextWrap.getParent() as? ParserRuleContext)
}
- throw ANTLRException.recognition(e: e)
-
+ throw ANTLRException.recognition(e: e)
}
+ ///
/// Make sure we don't attempt to recover from problems in subrules.
+ ///
override
public func sync(_ recognizer: Parser) {
}
diff --git a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift
index 5a4292c4e..8db84a00b 100644
--- a/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift
+++ b/runtime/Swift/Sources/Antlr4/BaseErrorListener.swift
@@ -1,24 +1,28 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// Provides an empty default implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener}. The
+///
+/// Provides an empty default implementation of _org.antlr.v4.runtime.ANTLRErrorListener_. The
/// default implementation of each method does nothing, but can be overridden as
/// necessary.
-///
+///
/// - Sam Harwell
+///
open class BaseErrorListener: ANTLRErrorListener {
public init() {
}
- open func syntaxError(_ recognizer: Recognizer,
- _ offendingSymbol: AnyObject?,
- _ line: Int,
- _ charPositionInLine: Int,
- _ msg: String,
- _ e: AnyObject?//RecognitionException
+ open func syntaxError(_ recognizer: Recognizer,
+ _ offendingSymbol: AnyObject?,
+ _ line: Int,
+ _ charPositionInLine: Int,
+ _ msg: String,
+ _ e: AnyObject?
) {
}
@@ -29,7 +33,7 @@ open class BaseErrorListener: ANTLRErrorListener {
_ stopIndex: Int,
_ exact: Bool,
_ ambigAlts: BitSet,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
}
@@ -38,7 +42,7 @@ open class BaseErrorListener: ANTLRErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ conflictingAlts: BitSet?,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
}
@@ -47,6 +51,6 @@ open class BaseErrorListener: ANTLRErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ prediction: Int,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
}
}
diff --git a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift
index 8ea114821..2805f8855 100644
--- a/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift
+++ b/runtime/Swift/Sources/Antlr4/BufferedTokenStream.swift
@@ -1,52 +1,63 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// This implementation of {@link org.antlr.v4.runtime.TokenStream} loads tokens from a
-/// {@link org.antlr.v4.runtime.TokenSource} on-demand, and places the tokens in a buffer to provide
+///
+/// This implementation of _org.antlr.v4.runtime.TokenStream_ loads tokens from a
+/// _org.antlr.v4.runtime.TokenSource_ on-demand, and places the tokens in a buffer to provide
/// access to any previous token by index.
-///
-///
-/// This token stream ignores the value of {@link org.antlr.v4.runtime.Token#getChannel}. If your
+///
+///
+/// This token stream ignores the value of _org.antlr.v4.runtime.Token#getChannel_. If your
/// parser requires the token stream filter tokens to only those on a particular
-/// channel, such as {@link org.antlr.v4.runtime.Token#DEFAULT_CHANNEL} or
-/// {@link org.antlr.v4.runtime.Token#HIDDEN_CHANNEL}, use a filtering token stream such a
-/// {@link org.antlr.v4.runtime.CommonTokenStream}.
+/// channel, such as _org.antlr.v4.runtime.Token#DEFAULT_CHANNEL_ or
+/// _org.antlr.v4.runtime.Token#HIDDEN_CHANNEL_, use a filtering token stream such a
+/// _org.antlr.v4.runtime.CommonTokenStream_.
+///
public class BufferedTokenStream: TokenStream {
- /// The {@link org.antlr.v4.runtime.TokenSource} from which tokens for this stream are fetched.
+ ///
+ /// The _org.antlr.v4.runtime.TokenSource_ from which tokens for this stream are fetched.
+ ///
internal var tokenSource: TokenSource
+ ///
/// A collection of all tokens fetched from the token source. The list is
- /// considered a complete view of the input once {@link #fetchedEOF} is set
- /// to {@code true}.
+ /// considered a complete view of the input once _#fetchedEOF_ is set
+ /// to `true`.
+ ///
internal var tokens: Array = Array()
// Array(100
- /// The index into {@link #tokens} of the current token (next token to
- /// {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
- /// {@link #LT LT(1)}.
- ///
- /// This field is set to -1 when the stream is first constructed or when
- /// {@link #setTokenSource} is called, indicating that the first token has
+ ///
+ /// The index into _#tokens_ of the current token (next token to
+ /// _#consume_). _#tokens_`[`_#p_`]` should be
+ /// _#LT LT(1)_.
+ ///
+ /// This field is set to -1 when the stream is first constructed or when
+ /// _#setTokenSource_ is called, indicating that the first token has
/// not yet been fetched from the token source. For additional information,
- /// see the documentation of {@link org.antlr.v4.runtime.IntStream} for a description of
- /// Initializing Methods.
+ /// see the documentation of _org.antlr.v4.runtime.IntStream_ for a description of
+ /// Initializing Methods.
+ ///
internal var p: Int = -1
- /// Indicates whether the {@link org.antlr.v4.runtime.Token#EOF} token has been fetched from
- /// {@link #tokenSource} and added to {@link #tokens}. This field improves
+ ///
+ /// Indicates whether the _org.antlr.v4.runtime.Token#EOF_ token has been fetched from
+ /// _#tokenSource_ and added to _#tokens_. This field improves
/// performance for the following cases:
- ///
- ///
- /// - {@link #consume}: The lookahead check in {@link #consume} to prevent
+ ///
+ /// * _#consume_: The lookahead check in _#consume_ to prevent
/// consuming the EOF symbol is optimized by checking the values of
- /// {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
- /// - {@link #fetch}: The check to prevent adding multiple EOF symbols into
- /// {@link #tokens} is trivial with this field.
- ///
- ///
- ///
- /// line line:charPositionInLine msg
- ///
- override
- public func syntaxError(_ recognizer: Recognizer,
- _ offendingSymbol: AnyObject?,
- _ line: Int,
- _ charPositionInLine: Int,
- _ msg: String,
- _ e: AnyObject?
+ ///
+ ///
+ /// This implementation prints messages to _System#err_ containing the
+ /// values of `line`, `charPositionInLine`, and `msg` using
+ /// the following format.
+ ///
+ /// line __line__:__charPositionInLine__ __msg__
+ ///
+ ///
+ override public func syntaxError(_ recognizer: Recognizer,
+ _ offendingSymbol: AnyObject?,
+ _ line: Int,
+ _ charPositionInLine: Int,
+ _ msg: String,
+ _ e: AnyObject?
) {
if Parser.ConsoleError {
errPrint("line \(line):\(charPositionInLine) \(msg)")
diff --git a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift
index a7221b8dc..d6ac65767 100644
--- a/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift
+++ b/runtime/Swift/Sources/Antlr4/DefaultErrorStrategy.swift
@@ -1,92 +1,96 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-
-
-/// This is the default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} used for
+///
+/// This is the default implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy_ used for
/// error reporting and recovery in ANTLR parsers.
+///
import Foundation
public class DefaultErrorStrategy: ANTLRErrorStrategy {
+ ///
/// Indicates whether the error strategy is currently "recovering from an
/// error". This is used to suppress reporting multiple error messages while
/// attempting to recover from a detected syntax error.
- ///
+ ///
/// - seealso: #inErrorRecoveryMode
+ ///
internal var errorRecoveryMode: Bool = false
+ ///
/// The index into the input stream where the last error occurred.
/// This is used to prevent infinite loops where an error is found
/// but no token is consumed during recovery...another error is found,
/// ad nauseum. This is a failsafe mechanism to guarantee that at least
/// one token/tree node is consumed for two errors.
+ ///
internal var lastErrorIndex: Int = -1
internal var lastErrorStates: IntervalSet?
- /// {@inheritDoc}
- ///
- /// The default implementation simply calls {@link #endErrorCondition} to
- /// ensure that the handler is not in error recovery mode.
-
+ ///
+ /// The default implementation simply calls _#endErrorCondition_ to
+ /// ensure that the handler is not in error recovery mode.
+ ///
public func reset(_ recognizer: Parser) {
endErrorCondition(recognizer)
}
+ ///
/// This method is called to enter error recovery mode when a recognition
/// exception is reported.
- ///
+ ///
/// - parameter recognizer: the parser instance
+ ///
internal func beginErrorCondition(_ recognizer: Parser) {
errorRecoveryMode = true
}
- /// {@inheritDoc}
-
public func inErrorRecoveryMode(_ recognizer: Parser) -> Bool {
return errorRecoveryMode
}
+ ///
/// This method is called to leave error recovery mode after recovering from
/// a recognition exception.
- ///
+ ///
/// - parameter recognizer:
+ ///
internal func endErrorCondition(_ recognizer: Parser) {
errorRecoveryMode = false
lastErrorStates = nil
lastErrorIndex = -1
}
- /// {@inheritDoc}
- ///
- /// The default implementation simply calls {@link #endErrorCondition}.
-
+ ///
+ /// The default implementation simply calls _#endErrorCondition_.
+ ///
public func reportMatch(_ recognizer: Parser) {
endErrorCondition(recognizer)
}
- /// {@inheritDoc}
- ///
- /// The default implementation returns immediately if the handler is already
- /// in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
- /// and dispatches the reporting task based on the runtime type of {@code e}
- /// according to the following table.
- ///
- ///
- /// - {@link org.antlr.v4.runtime.NoViableAltException}: Dispatches the call to
- /// {@link #reportNoViableAlternative}
- /// - {@link org.antlr.v4.runtime.InputMismatchException}: Dispatches the call to
- /// {@link #reportInputMismatch}
- /// - {@link org.antlr.v4.runtime.FailedPredicateException}: Dispatches the call to
- /// {@link #reportFailedPredicate}
- /// - All other types: calls {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} to report
- /// the exception
- ///
-
+ ///
+ ///
+ /// The default implementation returns immediately if the handler is already
+ /// in error recovery mode. Otherwise, it calls _#beginErrorCondition_
+ /// and dispatches the reporting task based on the runtime type of `e`
+ /// according to the following table.
+ ///
+ /// * _org.antlr.v4.runtime.NoViableAltException_: Dispatches the call to
+ /// _#reportNoViableAlternative_
+ /// * _org.antlr.v4.runtime.InputMismatchException_: Dispatches the call to
+ /// _#reportInputMismatch_
+ /// * _org.antlr.v4.runtime.FailedPredicateException_: Dispatches the call to
+ /// _#reportFailedPredicate_
+ /// * All other types: calls _org.antlr.v4.runtime.Parser#notifyErrorListeners_ to report
+ /// the exception
+ ///
public func reportError(_ recognizer: Parser,
- _ e: AnyObject) {
+ _ e: RecognitionException) {
// if we've already reported an error and have not matched a token
// yet successfully, don't report any errors.
if inErrorRecoveryMode(recognizer) {
@@ -94,31 +98,27 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
return // don't report spurious errors
}
beginErrorCondition(recognizer)
- //TODO: exception handler
- if (e is NoViableAltException) {
- try! reportNoViableAlternative(recognizer, e as! NoViableAltException);
- } else {
- if (e is InputMismatchException) {
- reportInputMismatch(recognizer, e as! InputMismatchException);
- } else {
- if (e is FailedPredicateException) {
- reportFailedPredicate(recognizer, e as! FailedPredicateException);
- } else {
- errPrint("unknown recognition error type: " + String(describing: type(of: e)));
- let re = (e as! RecognitionException)
- recognizer.notifyErrorListeners(re.getOffendingToken(), re.message ?? "", e);
- }
- }
+ if let nvae = e as? NoViableAltException {
+ reportNoViableAlternative(recognizer, nvae)
+ }
+ else if let ime = e as? InputMismatchException {
+ reportInputMismatch(recognizer, ime)
+ }
+ else if let fpe = e as? FailedPredicateException {
+ reportFailedPredicate(recognizer, fpe)
+ }
+ else {
+ errPrint("unknown recognition error type: " + String(describing: type(of: e)))
+ recognizer.notifyErrorListeners(e.getOffendingToken(), e.message ?? "", e)
}
}
- /// {@inheritDoc}
- ///
- /// The default implementation resynchronizes the parser by consuming tokens
+ ///
+ /// The default implementation resynchronizes the parser by consuming tokens
/// until we find one in the resynchronization set--loosely the set of tokens
- /// that can follow the current rule.
-
- public func recover(_ recognizer: Parser, _ e: AnyObject) throws {
+ /// that can follow the current rule.
+ ///
+ public func recover(_ recognizer: Parser, _ e: RecognitionException) throws {
// print("recover in "+recognizer.getRuleInvocationStack()+
// " index="+getTokenStream(recognizer).index()+
// ", lastErrorIndex="+
@@ -138,77 +138,74 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
}
lastErrorIndex = getTokenStream(recognizer).index()
if lastErrorStates == nil {
- lastErrorStates = try IntervalSet()
+ lastErrorStates = IntervalSet()
}
try lastErrorStates!.add(recognizer.getState())
- let followSet: IntervalSet = try getErrorRecoverySet(recognizer)
+ let followSet = getErrorRecoverySet(recognizer)
try consumeUntil(recognizer, followSet)
}
- /// The default implementation of {@link org.antlr.v4.runtime.ANTLRErrorStrategy#sync} makes sure
+ ///
+ /// The default implementation of _org.antlr.v4.runtime.ANTLRErrorStrategy#sync_ makes sure
/// that the current lookahead symbol is consistent with what were expecting
/// at this point in the ATN. You can call this anytime but ANTLR only
/// generates code to check before subrules/loops and each iteration.
- ///
- /// Implements Jim Idle's magic sync mechanism in closures and optional
- /// subrules. E.g.,
- ///
- ///
+ ///
+ /// Implements Jim Idle's magic sync mechanism in closures and optional
+ /// subrules. E.g.,
+ ///
+ ///
/// a : sync ( stuff sync )* ;
/// sync : {consume to what can follow sync} ;
- ///
- ///
- /// At the start of a sub rule upon error, {@link #sync} performs single
+ ///
+ ///
+ /// At the start of a sub rule upon error, _#sync_ performs single
/// token deletion, if possible. If it can't do that, it bails on the current
/// rule and uses the default error recovery, which consumes until the
/// resynchronization set of the current rule.
- ///
- /// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+ ///
+ /// If the sub rule is optional (`(...)?`, `(...)*`, or block
/// with an empty alternative), then the expected set includes what follows
- /// the subrule.
- ///
- /// During loop iteration, it consumes until it sees a token that can start a
+ /// the subrule.
+ ///
+ /// During loop iteration, it consumes until it sees a token that can start a
/// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
- /// stay in the loop as long as possible.
- ///
- /// ORIGINS
- ///
- /// Previous versions of ANTLR did a poor job of their recovery within loops.
+ /// stay in the loop as long as possible.
+ ///
+ /// __ORIGINS__
+ ///
+ /// Previous versions of ANTLR did a poor job of their recovery within loops.
/// A single mismatch token or missing token would force the parser to bail
- /// out of the entire rules surrounding the loop. So, for rule
- ///
- ///
+ /// out of the entire rules surrounding the loop. So, for rule
+ ///
+ ///
/// classDef : 'class' ID '{' member* '}'
- ///
- ///
+ ///
+ ///
/// input with an extra token between members would force the parser to
/// consume until it found the next class definition rather than the next
/// member definition of the current class.
- ///
- /// This functionality cost a little bit of effort because the parser has to
+ ///
+ /// This functionality cost a little bit of effort because the parser has to
/// compare token set at the start of the loop and at each iteration. If for
/// some reason speed is suffering for you, you can turn off this
- /// functionality by simply overriding this method as a blank { }.
+ /// functionality by simply overriding this method as a blank { }.
+ ///
public func sync(_ recognizer: Parser) throws {
- let s: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
+ let s = recognizer.getInterpreter().atn.states[recognizer.getState()]!
// errPrint("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName());
// If already recovering, don't try to sync
if inErrorRecoveryMode(recognizer) {
return
}
- let tokens: TokenStream = getTokenStream(recognizer)
- let la: Int = try tokens.LA(1)
+ let tokens = getTokenStream(recognizer)
+ let la = try tokens.LA(1)
// try cheaper subset first; might get lucky. seems to shave a wee bit off
- //let set : IntervalSet = recognizer.getATN().nextTokens(s)
-
- if try recognizer.getATN().nextTokens(s).contains(CommonToken.EPSILON) {
- return
- }
-
- if try recognizer.getATN().nextTokens(s).contains(la) {
+ let nextToks = recognizer.getATN().nextTokens(s)
+ if nextToks.contains(CommonToken.EPSILON) || nextToks.contains(la) {
return
}
@@ -221,15 +218,14 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
if try singleTokenDeletion(recognizer) != nil {
return
}
- throw try ANTLRException.recognition(e: InputMismatchException(recognizer))
+ throw ANTLRException.recognition(e: InputMismatchException(recognizer))
case ATNState.PLUS_LOOP_BACK: fallthrough
case ATNState.STAR_LOOP_BACK:
// errPrint("at loop back: "+s.getClass().getSimpleName());
- try reportUnwantedToken(recognizer)
- let expecting: IntervalSet = try recognizer.getExpectedTokens()
- let whatFollowsLoopIterationOrRule: IntervalSet =
- try expecting.or(try getErrorRecoverySet(recognizer)) as! IntervalSet
+ reportUnwantedToken(recognizer)
+ let expecting = try recognizer.getExpectedTokens()
+ let whatFollowsLoopIterationOrRule = expecting.or(getErrorRecoverySet(recognizer)) as! IntervalSet
try consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
break
@@ -239,258 +235,274 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
}
}
- /// This is called by {@link #reportError} when the exception is a
- /// {@link org.antlr.v4.runtime.NoViableAltException}.
- ///
+ ///
+ /// This is called by _#reportError_ when the exception is a
+ /// _org.antlr.v4.runtime.NoViableAltException_.
+ ///
/// - seealso: #reportError
- ///
+ ///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception
+ ///
internal func reportNoViableAlternative(_ recognizer: Parser,
- _ e: NoViableAltException) throws {
- let tokens: TokenStream? = getTokenStream(recognizer)
+ _ e: NoViableAltException) {
+ let tokens = getTokenStream(recognizer)
var input: String
- if let tokens = tokens {
- if e.getStartToken().getType() == CommonToken.EOF {
- input = ""
- } else {
+ if e.getStartToken().getType() == CommonToken.EOF {
+ input = ""
+ }
+ else {
+ do {
input = try tokens.getText(e.getStartToken(), e.getOffendingToken())
}
- } else {
- input = ""
+ catch {
+ input = ""
+ }
}
- let msg: String = "no viable alternative at input " + escapeWSAndQuote(input)
+ let msg = "no viable alternative at input " + escapeWSAndQuote(input)
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
}
- /// This is called by {@link #reportError} when the exception is an
- /// {@link org.antlr.v4.runtime.InputMismatchException}.
- ///
+ ///
+ /// This is called by _#reportError_ when the exception is an
+ /// _org.antlr.v4.runtime.InputMismatchException_.
+ ///
/// - seealso: #reportError
- ///
+ ///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception
+ ///
internal func reportInputMismatch(_ recognizer: Parser,
_ e: InputMismatchException) {
- let msg: String = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) +
+ let msg = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) +
" expecting " + e.getExpectedTokens()!.toString(recognizer.getVocabulary())
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
}
- /// This is called by {@link #reportError} when the exception is a
- /// {@link org.antlr.v4.runtime.FailedPredicateException}.
- ///
+ ///
+ /// This is called by _#reportError_ when the exception is a
+ /// _org.antlr.v4.runtime.FailedPredicateException_.
+ ///
/// - seealso: #reportError
- ///
+ ///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception
+ ///
internal func reportFailedPredicate(_ recognizer: Parser,
_ e: FailedPredicateException) {
- let ruleName: String = recognizer.getRuleNames()[recognizer._ctx!.getRuleIndex()]
- let msg: String = "rule " + ruleName + " " + e.message! // e.getMessage()
+ let ruleName = recognizer.getRuleNames()[recognizer._ctx!.getRuleIndex()]
+ let msg = "rule \(ruleName) \(e.message!)"
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e)
}
+ ///
/// This method is called to report a syntax error which requires the removal
/// of a token from the input stream. At the time this method is called, the
- /// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+ /// erroneous symbol is current `LT(1)` symbol and has not yet been
/// removed from the input stream. When this method returns,
- /// {@code recognizer} is in error recovery mode.
- ///
- /// This method is called when {@link #singleTokenDeletion} identifies
+ /// `recognizer` is in error recovery mode.
+ ///
+ /// This method is called when _#singleTokenDeletion_ identifies
/// single-token deletion as a viable recovery strategy for a mismatched
- /// input error.
- ///
- /// The default implementation simply returns if the handler is already in
- /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
+ /// input error.
+ ///
+ /// The default implementation simply returns if the handler is already in
+ /// error recovery mode. Otherwise, it calls _#beginErrorCondition_ to
/// enter error recovery mode, followed by calling
- /// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.
- ///
+ /// _org.antlr.v4.runtime.Parser#notifyErrorListeners_.
+ ///
/// - parameter recognizer: the parser instance
- internal func reportUnwantedToken(_ recognizer: Parser) throws {
+ ///
+ internal func reportUnwantedToken(_ recognizer: Parser) {
if inErrorRecoveryMode(recognizer) {
return
}
beginErrorCondition(recognizer)
- let t: Token = try recognizer.getCurrentToken()
- let tokenName: String = getTokenErrorDisplay(t)
- let expecting: IntervalSet = try getExpectedTokens(recognizer)
- let msg: String = "extraneous input " + tokenName + " expecting " +
- expecting.toString(recognizer.getVocabulary())
+ let t = try? recognizer.getCurrentToken()
+ let tokenName = getTokenErrorDisplay(t)
+ let expecting = (try? getExpectedTokens(recognizer)) ?? IntervalSet.EMPTY_SET
+ let msg = "extraneous input \(tokenName) expecting \(expecting.toString(recognizer.getVocabulary()))"
recognizer.notifyErrorListeners(t, msg, nil)
}
+ ///
/// This method is called to report a syntax error which requires the
/// insertion of a missing token into the input stream. At the time this
/// method is called, the missing token has not yet been inserted. When this
- /// method returns, {@code recognizer} is in error recovery mode.
- ///
- /// This method is called when {@link #singleTokenInsertion} identifies
+ /// method returns, `recognizer` is in error recovery mode.
+ ///
+ /// This method is called when _#singleTokenInsertion_ identifies
/// single-token insertion as a viable recovery strategy for a mismatched
- /// input error.
- ///
- /// The default implementation simply returns if the handler is already in
- /// error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
+ /// input error.
+ ///
+ /// The default implementation simply returns if the handler is already in
+ /// error recovery mode. Otherwise, it calls _#beginErrorCondition_ to
/// enter error recovery mode, followed by calling
- /// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.
- ///
+ /// _org.antlr.v4.runtime.Parser#notifyErrorListeners_.
+ ///
/// - parameter recognizer: the parser instance
- internal func reportMissingToken(_ recognizer: Parser) throws {
+ ///
+ internal func reportMissingToken(_ recognizer: Parser) {
if inErrorRecoveryMode(recognizer) {
return
}
beginErrorCondition(recognizer)
- let t: Token = try recognizer.getCurrentToken()
- let expecting: IntervalSet = try getExpectedTokens(recognizer)
- let msg: String = "missing " + expecting.toString(recognizer.getVocabulary()) +
- " at " + getTokenErrorDisplay(t)
+ let t = try? recognizer.getCurrentToken()
+ let expecting = (try? getExpectedTokens(recognizer)) ?? IntervalSet.EMPTY_SET
+ let msg = "missing \(expecting.toString(recognizer.getVocabulary())) at \(getTokenErrorDisplay(t))"
recognizer.notifyErrorListeners(t, msg, nil)
}
- /// {@inheritDoc}
- ///
- /// The default implementation attempts to recover from the mismatched input
+ ///
+ ///
+ ///
+ /// The default implementation attempts to recover from the mismatched input
/// by using single token insertion and deletion as described below. If the
/// recovery attempt fails, this method throws an
- /// {@link org.antlr.v4.runtime.InputMismatchException}.
- ///
- /// EXTRA TOKEN (single token deletion)
- ///
- /// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
- /// right token, however, then assume {@code LA(1)} is some extra spurious
+ /// _org.antlr.v4.runtime.InputMismatchException_.
+ ///
+ /// __EXTRA TOKEN__ (single token deletion)
+ ///
+ /// `LA(1)` is not what we are looking for. If `LA(2)` has the
+ /// right token, however, then assume `LA(1)` is some extra spurious
/// token and delete it. Then consume and return the next token (which was
- /// the {@code LA(2)} token) as the successful result of the match operation.
- ///
- /// This recovery strategy is implemented by {@link #singleTokenDeletion}.
- ///
- /// MISSING TOKEN (single token insertion)
- ///
- /// If current token (at {@code LA(1)}) is consistent with what could come
- /// after the expected {@code LA(1)} token, then assume the token is missing
- /// and use the parser's {@link org.antlr.v4.runtime.TokenFactory} to create it on the fly. The
+ /// the `LA(2)` token) as the successful result of the match operation.
+ ///
+ /// This recovery strategy is implemented by _#singleTokenDeletion_.
+ ///
+ /// __MISSING TOKEN__ (single token insertion)
+ ///
+ /// If current token (at `LA(1)`) is consistent with what could come
+ /// after the expected `LA(1)` token, then assume the token is missing
+ /// and use the parser's _org.antlr.v4.runtime.TokenFactory_ to create it on the fly. The
/// "insertion" is performed by returning the created token as the successful
- /// result of the match operation.
- ///
- /// This recovery strategy is implemented by {@link #singleTokenInsertion}.
- ///
- /// EXAMPLE
- ///
- /// For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
- /// the parser returns from the nested call to {@code expr}, it will have
- /// call chain:
- ///
- ///
+ /// result of the match operation.
+ ///
+ /// This recovery strategy is implemented by _#singleTokenInsertion_.
+ ///
+ /// __EXAMPLE__
+ ///
+ /// For example, Input `i=(3;` is clearly missing the `')'`. When
+ /// the parser returns from the nested call to `expr`, it will have
+ /// call chain:
+ ///
+ ///
/// stat → expr → atom
- ///
- ///
- /// and it will be trying to match the {@code ')'} at this point in the
+ ///
+ ///
+ /// and it will be trying to match the `')'` at this point in the
/// derivation:
- ///
- ///
+ ///
+ ///
/// => ID '=' '(' INT ')' ('+' atom)* ';'
/// ^
- ///
- ///
- /// The attempt to match {@code ')'} will fail when it sees {@code ';'} and
- /// call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
- /// is in the set of tokens that can follow the {@code ')'} token reference
- /// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+ ///
+ ///
+ /// The attempt to match `')'` will fail when it sees `';'` and
+ /// call _#recoverInline_. To recover, it sees that `LA(1)==';'`
+ /// is in the set of tokens that can follow the `')'` token reference
+ /// in rule `atom`. It can assume that you forgot the `')'`.
+ ///
public func recoverInline(_ recognizer: Parser) throws -> Token {
// SINGLE TOKEN DELETION
- let matchedSymbol: Token? = try singleTokenDeletion(recognizer)
- if matchedSymbol != nil {
+ let matchedSymbol = try singleTokenDeletion(recognizer)
+ if let matchedSymbol = matchedSymbol {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
try recognizer.consume()
- return matchedSymbol!
+ return matchedSymbol
}
// SINGLE TOKEN INSERTION
if try singleTokenInsertion(recognizer) {
return try getMissingSymbol(recognizer)
}
- throw try ANTLRException.recognition(e: InputMismatchException(recognizer))
- // throw try ANTLRException.InputMismatch(e: InputMismatchException(recognizer) )
- //RuntimeException("InputMismatchException")
// even that didn't work; must throw the exception
- //throwException() /* throw InputMismatchException(recognizer); */
+ throw ANTLRException.recognition(e: InputMismatchException(recognizer))
}
+ ///
/// This method implements the single-token insertion inline error recovery
- /// strategy. It is called by {@link #recoverInline} if the single-token
+ /// strategy. It is called by _#recoverInline_ if the single-token
/// deletion strategy fails to recover from the mismatched input. If this
- /// method returns {@code true}, {@code recognizer} will be in error recovery
+ /// method returns `true`, `recognizer` will be in error recovery
/// mode.
- ///
- /// This method determines whether or not single-token insertion is viable by
- /// checking if the {@code LA(1)} input symbol could be successfully matched
- /// if it were instead the {@code LA(2)} symbol. If this method returns
- /// {@code true}, the caller is responsible for creating and inserting a
- /// token with the correct type to produce this behavior.
- ///
+ ///
+ /// This method determines whether or not single-token insertion is viable by
+ /// checking if the `LA(1)` input symbol could be successfully matched
+ /// if it were instead the `LA(2)` symbol. If this method returns
+ /// `true`, the caller is responsible for creating and inserting a
+ /// token with the correct type to produce this behavior.
+ ///
/// - parameter recognizer: the parser instance
- /// - returns: {@code true} if single-token insertion is a viable recovery
- /// strategy for the current mismatched input, otherwise {@code false}
+ /// - returns: `true` if single-token insertion is a viable recovery
+ /// strategy for the current mismatched input, otherwise `false`
+ ///
internal func singleTokenInsertion(_ recognizer: Parser) throws -> Bool {
- let currentSymbolType: Int = try getTokenStream(recognizer).LA(1)
+ let currentSymbolType = try getTokenStream(recognizer).LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token; error recovery
// is free to conjure up and insert the missing token
- let currentState: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
- let next: ATNState = currentState.transition(0).target
- let atn: ATN = recognizer.getInterpreter().atn
- let expectingAtLL2: IntervalSet = try atn.nextTokens(next, recognizer._ctx)
+ let currentState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
+ let next = currentState.transition(0).target
+ let atn = recognizer.getInterpreter().atn
+ let expectingAtLL2 = atn.nextTokens(next, recognizer._ctx)
// print("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames()));
if expectingAtLL2.contains(currentSymbolType) {
- try reportMissingToken(recognizer)
+ reportMissingToken(recognizer)
return true
}
return false
}
+ ///
/// This method implements the single-token deletion inline error recovery
- /// strategy. It is called by {@link #recoverInline} to attempt to recover
+ /// strategy. It is called by _#recoverInline_ to attempt to recover
/// from mismatched input. If this method returns null, the parser and error
/// handler state will not have changed. If this method returns non-null,
- /// {@code recognizer} will not be in error recovery mode since the
+ /// `recognizer` will __not__ be in error recovery mode since the
/// returned token was a successful match.
- ///
- /// If the single-token deletion is successful, this method calls
- /// {@link #reportUnwantedToken} to report the error, followed by
- /// {@link org.antlr.v4.runtime.Parser#consume} to actually "delete" the extraneous token. Then,
- /// before returning {@link #reportMatch} is called to signal a successful
- /// match.
- ///
+ ///
+ /// If the single-token deletion is successful, this method calls
+ /// _#reportUnwantedToken_ to report the error, followed by
+ /// _org.antlr.v4.runtime.Parser#consume_ to actually "delete" the extraneous token. Then,
+ /// before returning _#reportMatch_ is called to signal a successful
+ /// match.
+ ///
/// - parameter recognizer: the parser instance
- /// - returns: the successfully matched {@link org.antlr.v4.runtime.Token} instance if single-token
+ /// - returns: the successfully matched _org.antlr.v4.runtime.Token_ instance if single-token
/// deletion successfully recovers from the mismatched input, otherwise
- /// {@code null}
+ /// `null`
+ ///
internal func singleTokenDeletion(_ recognizer: Parser) throws -> Token? {
- let nextTokenType: Int = try getTokenStream(recognizer).LA(2)
- let expecting: IntervalSet = try getExpectedTokens(recognizer)
+ let nextTokenType = try getTokenStream(recognizer).LA(2)
+ let expecting = try getExpectedTokens(recognizer)
if expecting.contains(nextTokenType) {
- try reportUnwantedToken(recognizer)
+ reportUnwantedToken(recognizer)
+ ///
/// errPrint("recoverFromMismatchedToken deleting "+
/// ((TokenStream)getTokenStream(recognizer)).LT(1)+
/// " since "+((TokenStream)getTokenStream(recognizer)).LT(2)+
/// " is what we want");
+ ///
try recognizer.consume() // simply delete extra token
// we want to return the token we're actually matching
- let matchedSymbol: Token = try recognizer.getCurrentToken()
+ let matchedSymbol = try recognizer.getCurrentToken()
reportMatch(recognizer) // we know current token is correct
return matchedSymbol
}
return nil
}
+ ///
/// Conjure up a missing token during error recovery.
- ///
+ ///
/// The recognizer attempts to recover from single missing
/// symbols. But, actions might refer to that missing symbol.
/// For example, x=ID {f($x);}. The action clearly assumes
@@ -507,31 +519,33 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// a CommonToken of the appropriate type. The text will be the token.
/// If you change what tokens must be created by the lexer,
/// override this method to create the appropriate tokens.
-
+ ///
internal func getTokenStream(_ recognizer: Parser) -> TokenStream {
return recognizer.getInputStream() as! TokenStream
}
internal func getMissingSymbol(_ recognizer: Parser) throws -> Token {
- let currentSymbol: Token = try recognizer.getCurrentToken()
- let expecting: IntervalSet = try getExpectedTokens(recognizer)
- let expectedTokenType: Int = expecting.getMinElement() // get any element
+ let currentSymbol = try recognizer.getCurrentToken()
+ let expecting = try getExpectedTokens(recognizer)
+ let expectedTokenType = expecting.getMinElement() // get any element
var tokenText: String
if expectedTokenType == CommonToken.EOF {
tokenText = ""
} else {
tokenText = ""
}
- var current: Token = currentSymbol
- let lookback: Token? = try getTokenStream(recognizer).LT(-1)
+ var current = currentSymbol
+ let lookback = try getTokenStream(recognizer).LT(-1)
if current.getType() == CommonToken.EOF && lookback != nil {
current = lookback!
}
- let token = recognizer.getTokenFactory().create((current.getTokenSource(), current.getTokenSource()!.getInputStream()), expectedTokenType, tokenText,
- CommonToken.DEFAULT_CHANNEL,
- -1, -1,
- current.getLine(), current.getCharPositionInLine())
+ let token = recognizer.getTokenFactory().create(
+ current.getTokenSourceAndStream(),
+ expectedTokenType, tokenText,
+ CommonToken.DEFAULT_CHANNEL,
+ -1, -1,
+ current.getLine(), current.getCharPositionInLine())
return token
}
@@ -541,6 +555,7 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
return try recognizer.getExpectedTokens()
}
+ ///
/// How should a token be displayed in an error message? The default
/// is to display just the text, but during development you might
/// want to have a lot of information spit out. Override in that case
@@ -548,23 +563,24 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// the token). This is better than forcing you to override a method in
/// your token objects because you don't have to go modify your lexer
/// so that it creates a new Java type.
+ ///
internal func getTokenErrorDisplay(_ t: Token?) -> String {
- if t == nil {
+ guard let t = t else {
return ""
}
- var s: String? = getSymbolText(t!)
+ var s = getSymbolText(t)
if s == nil {
- if getSymbolType(t!) == CommonToken.EOF {
+ if getSymbolType(t) == CommonToken.EOF {
s = ""
} else {
- s = "<\(getSymbolType(t!))>"
+ s = "<\(getSymbolType(t))>"
}
}
return escapeWSAndQuote(s!)
}
- internal func getSymbolText(_ symbol: Token) -> String {
- return symbol.getText()!
+ internal func getSymbolText(_ symbol: Token) -> String? {
+ return symbol.getText()
}
internal func getSymbolType(_ symbol: Token) -> Int {
@@ -580,6 +596,7 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
return "'" + s + "'"
}
+ ///
/// Compute the error recovery set for the current rule. During
/// rule invocation, the parser pushes the set of tokens that can
/// follow that rule reference on the stack; this amounts to
@@ -588,9 +605,9 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// This local follow set only includes tokens
/// from within the rule; i.e., the FIRST computation done by
/// ANTLR stops at the end of a rule.
- ///
+ ///
/// EXAMPLE
- ///
+ ///
/// When you find a "no viable alt exception", the input is not
/// consistent with any of the alternatives for rule r. The best
/// thing to do is to consume tokens until you see something that
@@ -598,9 +615,9 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// You don't want the exact set of viable next tokens because the
/// input might just be missing a token--you might consume the
/// rest of the input looking for one of the missing tokens.
- ///
+ ///
/// Consider grammar:
- ///
+ ///
/// a : '[' b ']'
/// | '(' b ')'
/// ;
@@ -608,30 +625,30 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// c : ID
/// | INT
/// ;
- ///
+ ///
/// At each rule invocation, the set of tokens that could follow
/// that rule is pushed on a stack. Here are the various
/// context-sensitive follow sets:
- ///
+ ///
/// FOLLOW(b1_in_a) = FIRST(']') = ']'
/// FOLLOW(b2_in_a) = FIRST(')') = ')'
/// FOLLOW(c_in_b) = FIRST('^') = '^'
- ///
+ ///
/// Upon erroneous input "[]", the call chain is
- ///
+ ///
/// a -> b -> c
- ///
+ ///
/// and, hence, the follow context stack is:
- ///
+ ///
/// depth follow set start of rule execution
/// 0 a (from main())
/// 1 ']' b
/// 2 '^' c
- ///
+ ///
/// Notice that ')' is not included, because b would have to have
/// been called from a different context in rule a for ')' to be
/// included.
- ///
+ ///
/// For error recovery, we cannot consider FOLLOW(c)
/// (context-sensitive or otherwise). We need the combined set of
/// all context-sensitive FOLLOW sets--the set of all tokens that
@@ -648,53 +665,55 @@ public class DefaultErrorStrategy: ANTLRErrorStrategy {
/// the same recovery set and doesn't consume anything. Rule b
/// exits normally returning to rule a. Now it finds the ']' (and
/// with the successful match exits errorRecovery mode).
- ///
+ ///
/// So, you can see that the parser walks up the call chain looking
/// for the token that was a member of the recovery set.
- ///
+ ///
/// Errors are not generated in errorRecovery mode.
- ///
+ ///
/// ANTLR's error recovery mechanism is based upon original ideas:
- ///
+ ///
/// "Algorithms + Data Structures = Programs" by Niklaus Wirth
- ///
+ ///
/// and
- ///
+ ///
/// "A note on error recovery in recursive descent parsers":
/// http://portal.acm.org/citation.cfm?id=947902.947905
- ///
+ ///
/// Later, Josef Grosch had some good ideas:
- ///
+ ///
/// "Efficient and Comfortable Error Recovery in Recursive Descent
/// Parsers":
/// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
- ///
+ ///
/// Like Grosch I implement context-sensitive FOLLOW sets that are combined
/// at run-time upon error to avoid overhead during parsing.
- internal func getErrorRecoverySet(_ recognizer: Parser) throws -> IntervalSet {
- let atn: ATN = recognizer.getInterpreter().atn
+ ///
+ internal func getErrorRecoverySet(_ recognizer: Parser) -> IntervalSet {
+ let atn = recognizer.getInterpreter().atn
var ctx: RuleContext? = recognizer._ctx
- let recoverSet: IntervalSet = try IntervalSet()
- while let ctxWrap = ctx , ctxWrap.invokingState >= 0 {
+ let recoverSet = IntervalSet()
+ while let ctxWrap = ctx, ctxWrap.invokingState >= 0 {
// compute what follows who invoked us
- let invokingState: ATNState = atn.states[ctxWrap.invokingState]!
- let rt: RuleTransition = invokingState.transition(0) as! RuleTransition
- let follow: IntervalSet = try atn.nextTokens(rt.followState)
- try recoverSet.addAll(follow)
+ let invokingState = atn.states[ctxWrap.invokingState]!
+ let rt = invokingState.transition(0) as! RuleTransition
+ let follow = atn.nextTokens(rt.followState)
+ try! recoverSet.addAll(follow)
ctx = ctxWrap.parent
}
- try recoverSet.remove(CommonToken.EPSILON)
+ try! recoverSet.remove(CommonToken.EPSILON)
// print("recover set "+recoverSet.toString(recognizer.getTokenNames()));
return recoverSet
}
+ ///
/// Consume tokens until one matches the given token set.
+ ///
internal func consumeUntil(_ recognizer: Parser, _ set: IntervalSet) throws {
// errPrint("consumeUntil("+set.toString(recognizer.getTokenNames())+")");
- var ttype: Int = try getTokenStream(recognizer).LA(1)
+ var ttype = try getTokenStream(recognizer).LA(1)
while ttype != CommonToken.EOF && !set.contains(ttype) {
//print("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
-// getTokenStream(recognizer).consume();
try recognizer.consume()
ttype = try getTokenStream(recognizer).LA(1)
}
diff --git a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift
index 75681f617..4109f672b 100644
--- a/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift
+++ b/runtime/Swift/Sources/Antlr4/DiagnosticErrorListener.swift
@@ -1,45 +1,53 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// This implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener} can be used to identify
+///
+/// This implementation of _org.antlr.v4.runtime.ANTLRErrorListener_ can be used to identify
/// certain potential correctness and performance problems in grammars. "Reports"
-/// are made by calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} with the appropriate
+/// are made by calling _org.antlr.v4.runtime.Parser#notifyErrorListeners_ with the appropriate
/// message.
-///
-///
-/// - Ambiguities: These are cases where more than one path through the
-/// grammar can match the input.
-/// - Weak context sensitivity: These are cases where full-context
+///
+/// * __Ambiguities__: These are cases where more than one path through the
+/// grammar can match the input.
+/// * __Weak context sensitivity__: These are cases where full-context
/// prediction resolved an SLL conflict to a unique alternative which equaled the
-/// minimum alternative of the SLL conflict.
-/// - Strong (forced) context sensitivity: These are cases where the
+/// minimum alternative of the SLL conflict.
+/// * __Strong (forced) context sensitivity__: These are cases where the
/// full-context prediction resolved an SLL conflict to a unique alternative,
-/// and the minimum alternative of the SLL conflict was found to not be
+/// __and__ the minimum alternative of the SLL conflict was found to not be
/// a truly viable alternative. Two-stage parsing cannot be used for inputs where
-/// this situation occurs.
-///
-///
+/// this situation occurs.
+///
/// - Sam Harwell
+///
import Foundation
public class DiagnosticErrorListener: BaseErrorListener {
- /// When {@code true}, only exactly known ambiguities are reported.
+ ///
+ /// When `true`, only exactly known ambiguities are reported.
+ ///
internal final var exactOnly: Bool
- /// Initializes a new instance of {@link org.antlr.v4.runtime.DiagnosticErrorListener} which only
+ ///
+ /// Initializes a new instance of _org.antlr.v4.runtime.DiagnosticErrorListener_ which only
/// reports exact ambiguities.
+ ///
public convenience override init() {
self.init(true)
}
- /// Initializes a new instance of {@link org.antlr.v4.runtime.DiagnosticErrorListener}, specifying
+ ///
+ /// Initializes a new instance of _org.antlr.v4.runtime.DiagnosticErrorListener_, specifying
/// whether all ambiguities or only exact ambiguities are reported.
- ///
- /// - parameter exactOnly: {@code true} to report only exact ambiguities, otherwise
- /// {@code false} to report all ambiguities.
+ ///
+ /// - parameter exactOnly: `true` to report only exact ambiguities, otherwise
+ /// `false` to report all ambiguities.
+ ///
public init(_ exactOnly: Bool) {
self.exactOnly = exactOnly
}
@@ -51,16 +59,16 @@ public class DiagnosticErrorListener: BaseErrorListener {
_ stopIndex: Int,
_ exact: Bool,
_ ambigAlts: BitSet,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
if exactOnly && !exact {
return
}
let decision = getDecisionDescription(recognizer, dfa)
- let conflictingAlts = try getConflictingAlts(ambigAlts, configs)
- let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex))
+ let conflictingAlts = getConflictingAlts(ambigAlts, configs)
+ let text = getTextInInterval(recognizer, startIndex, stopIndex)
let message = "reportAmbiguity d=\(decision): ambigAlts=\(conflictingAlts), input='\(text)'"
- try recognizer.notifyErrorListeners(message)
+ recognizer.notifyErrorListeners(message)
}
override
@@ -69,11 +77,11 @@ public class DiagnosticErrorListener: BaseErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ conflictingAlts: BitSet?,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
let decision = getDecisionDescription(recognizer, dfa)
- let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex))
+ let text = getTextInInterval(recognizer, startIndex, stopIndex)
let message = "reportAttemptingFullContext d=\(decision), input='\(text)'"
- try recognizer.notifyErrorListeners(message)
+ recognizer.notifyErrorListeners(message)
}
override
@@ -82,11 +90,11 @@ public class DiagnosticErrorListener: BaseErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ prediction: Int,
- _ configs: ATNConfigSet) throws {
+ _ configs: ATNConfigSet) {
let decision = getDecisionDescription(recognizer, dfa)
- let text = try recognizer.getTokenStream()!.getText(Interval.of(startIndex, stopIndex))
+ let text = getTextInInterval(recognizer, startIndex, stopIndex)
let message = "reportContextSensitivity d=\(decision), input='\(text)'"
- try recognizer.notifyErrorListeners(message)
+ recognizer.notifyErrorListeners(message)
}
internal func getDecisionDescription(_ recognizer: Parser, _ dfa: DFA) -> String {
@@ -106,21 +114,28 @@ public class DiagnosticErrorListener: BaseErrorListener {
return "\(decision) (\(ruleName))"
}
+ ///
/// Computes the set of conflicting or ambiguous alternatives from a
/// configuration set, if that information was not already provided by the
/// parser.
- ///
+ ///
/// - parameter reportedAlts: The set of conflicting or ambiguous alternatives, as
/// reported by the parser.
/// - parameter configs: The conflicting or ambiguous configuration set.
- /// - returns: Returns {@code reportedAlts} if it is not {@code null}, otherwise
- /// returns the set of alternatives represented in {@code configs}.
- internal func getConflictingAlts(_ reportedAlts: BitSet?, _ configs: ATNConfigSet) throws -> BitSet {
- if reportedAlts != nil {
- return reportedAlts!
- }
- let result = try configs.getAltBitSet()
- return result
+ /// - returns: Returns `reportedAlts` if it is not `null`, otherwise
+ /// returns the set of alternatives represented in `configs`.
+ ///
+ internal func getConflictingAlts(_ reportedAlts: BitSet?, _ configs: ATNConfigSet) -> BitSet {
+ return reportedAlts ?? configs.getAltBitSet()
+ }
+}
+
+
+fileprivate func getTextInInterval(_ recognizer: Parser, _ startIndex: Int, _ stopIndex: Int) -> String {
+ do {
+ return try recognizer.getTokenStream()?.getText(Interval.of(startIndex, stopIndex)) ?? ""
+ }
+ catch {
+ return ""
}
-
}
diff --git a/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift b/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift
index 529ce372f..c4e2bd310 100644
--- a/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift
+++ b/runtime/Swift/Sources/Antlr4/FailedPredicateException.swift
@@ -1,36 +1,28 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
+///
/// A semantic predicate failed during validation. Validation of predicates
/// occurs when normally parsing the alternative just like matching a token.
/// Disambiguating predicate evaluation occurs when we test a predicate during
/// prediction.
-public class FailedPredicateException: RecognitionException {
+///
+public class FailedPredicateException: RecognitionException {
private final var ruleIndex: Int
private final var predicateIndex: Int
private final var predicate: String?
- public convenience init(_ recognizer: Parser) throws {
- try self.init(recognizer, nil)
- }
+ public init(_ recognizer: Parser, _ predicate: String? = nil, _ message: String? = nil) {
+ let s = recognizer.getInterpreter().atn.states[recognizer.getState()]!
- public convenience init(_ recognizer: Parser, _ predicate: String?)throws {
- try self.init(recognizer, predicate, nil)
- }
-
- public init(_ recognizer: Parser,
- _ predicate: String?,
- _ message: String?) throws
- {
-
- let s: ATNState = recognizer.getInterpreter().atn.states[recognizer.getState()]!
-
- let trans: AbstractPredicateTransition = s.transition(0) as! AbstractPredicateTransition
- if trans is PredicateTransition {
- self.ruleIndex = (trans as! PredicateTransition).ruleIndex
- self.predicateIndex = (trans as! PredicateTransition).predIndex
+ let trans = s.transition(0) as! AbstractPredicateTransition
+ if let predex = trans as? PredicateTransition {
+ self.ruleIndex = predex.ruleIndex
+ self.predicateIndex = predex.predIndex
}
else {
self.ruleIndex = 0
@@ -39,9 +31,10 @@ public class FailedPredicateException: RecognitionException
self.predicate = predicate
- super.init(FailedPredicateException.formatMessage(predicate!, message), recognizer , recognizer.getInputStream()!, recognizer._ctx)
-
- try self.setOffendingToken(recognizer.getCurrentToken())
+ super.init(recognizer, recognizer.getInputStream()!, recognizer._ctx, FailedPredicateException.formatMessage(predicate, message))
+ if let token = try? recognizer.getCurrentToken() {
+ setOffendingToken(token)
+ }
}
public func getRuleIndex() -> Int {
@@ -52,17 +45,17 @@ public class FailedPredicateException: RecognitionException
return predicateIndex
}
-
public func getPredicate() -> String? {
return predicate
}
- private static func formatMessage(_ predicate: String, _ message: String?) -> String {
+ private static func formatMessage(_ predicate: String?, _ message: String?) -> String {
if message != nil {
return message!
}
- return "failed predicate: {predicate}?" //String.format(Locale.getDefault(), "failed predicate: {%s}?", predicate);
+ let predstr = predicate ?? ""
+ return "failed predicate: {\(predstr)}?"
}
}
diff --git a/runtime/Swift/Sources/Antlr4/InputMismatchException.swift b/runtime/Swift/Sources/Antlr4/InputMismatchException.swift
index 32f9ac89e..8af781702 100644
--- a/runtime/Swift/Sources/Antlr4/InputMismatchException.swift
+++ b/runtime/Swift/Sources/Antlr4/InputMismatchException.swift
@@ -1,14 +1,20 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
+///
/// This signifies any kind of mismatched input exceptions such as
/// when the current input does not match the expected token.
+///
-public class InputMismatchException: RecognitionException {
- public init(_ recognizer: Parser) throws {
+public class InputMismatchException: RecognitionException {
+ public init(_ recognizer: Parser) {
super.init(recognizer, recognizer.getInputStream()!, recognizer._ctx)
- self.setOffendingToken(try recognizer.getCurrentToken())
+ if let token = try? recognizer.getCurrentToken() {
+ setOffendingToken(token)
+ }
}
}
diff --git a/runtime/Swift/Sources/Antlr4/IntStream.swift b/runtime/Swift/Sources/Antlr4/IntStream.swift
index 69af63969..d56a3f62c 100644
--- a/runtime/Swift/Sources/Antlr4/IntStream.swift
+++ b/runtime/Swift/Sources/Antlr4/IntStream.swift
@@ -1,117 +1,112 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
+///
/// A simple stream of symbols whose values are represented as integers. This
-/// interface provides marked ranges with support for a minimum level
+/// interface provides __marked ranges__ with support for a minimum level
/// of buffering necessary to implement arbitrary lookahead during prediction.
-/// For more information on marked ranges, see {@link #mark}.
-///
-/// Initializing Methods: Some methods in this interface have
+/// For more information on marked ranges, see _#mark_.
+///
+/// __Initializing Methods:__ Some methods in this interface have
/// unspecified behavior if no call to an initializing method has occurred after
-/// the stream was constructed. The following is a list of initializing methods:
-///
-///
-/// - {@link #LA}
-/// - {@link #consume}
-/// - {@link #size}
-///
-
+/// the stream was constructed. The following is a list of initializing methods:
+///
+/// * _#LA_
+/// * _#consume_
+/// * _#size_
+///
public protocol IntStream: class {
- /// The value returned by {@link #LA LA()} when the end of the stream is
- /// reached.
- //let EOF : Int = -1;
-
- /// The value returned by {@link #getSourceName} when the actual name of the
- /// underlying source is not known.
- //let UNKNOWN_SOURCE_NAME : String = "";
+ ///
/// Consumes the current symbol in the stream. This method has the following
/// effects:
- ///
- ///
- /// - Forward movement: The value of {@link #index index()}
- /// before calling this method is less than the value of {@code index()}
- /// after calling this method.
- /// - Ordered lookahead: The value of {@code LA(1)} before
- /// calling this method becomes the value of {@code LA(-1)} after calling
- /// this method.
- ///
- ///
- /// Note that calling this method does not guarantee that {@code index()} is
+ ///
+ /// * __Forward movement:__ The value of _#index index()_
+ /// before calling this method is less than the value of `index()`
+ /// after calling this method.
+ /// * __Ordered lookahead:__ The value of `LA(1)` before
+ /// calling this method becomes the value of `LA(-1)` after calling
+ /// this method.
+ ///
+ /// Note that calling this method does not guarantee that `index()` is
/// incremented by exactly 1, as that would preclude the ability to implement
- /// filtering streams (e.g. {@link org.antlr.v4.runtime.CommonTokenStream} which distinguishes
+ /// filtering streams (e.g. _org.antlr.v4.runtime.CommonTokenStream_ which distinguishes
/// between "on-channel" and "off-channel" tokens).
- ///
- /// - IllegalStateException if an attempt is made to consume the the
- /// end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling
- /// {@code consume}).
+ ///
+ /// - throws: _ANTLRError.illegalState_ if an attempt is made to consume the the
+ /// end of the stream (i.e. if `LA(1)==`_#EOF EOF_ before calling
+ /// `consume`).
+ ///
func consume() throws
- /// Gets the value of the symbol at offset {@code i} from the current
- /// position. When {@code i==1}, this method returns the value of the current
+ ///
+ /// Gets the value of the symbol at offset `i` from the current
+ /// position. When `i==1`, this method returns the value of the current
/// symbol in the stream (which is the next symbol to be consumed). When
- /// {@code i==-1}, this method returns the value of the previously read
+ /// `i==-1`, this method returns the value of the previously read
/// symbol in the stream. It is not valid to call this method with
- /// {@code i==0}, but the specific behavior is unspecified because this
+ /// `i==0`, but the specific behavior is unspecified because this
/// method is frequently called from performance-critical code.
- ///
- /// This method is guaranteed to succeed if any of the following are true:
- ///
- ///
- /// - {@code i>0}
- /// - {@code i==-1} and {@link #index index()} returns a value greater
- /// than the value of {@code index()} after the stream was constructed
- /// and {@code LA(1)} was called in that order. Specifying the current
- /// {@code index()} relative to the index after the stream was created
+ ///
+ /// This method is guaranteed to succeed if any of the following are true:
+ ///
+ /// * `i>0`
+ /// * `i==-1` and _#index index()_ returns a value greater
+ /// than the value of `index()` after the stream was constructed
+ /// and `LA(1)` was called in that order. Specifying the current
+ /// `index()` relative to the index after the stream was created
/// allows for filtering implementations that do not return every symbol
- /// from the underlying source. Specifying the call to {@code LA(1)}
- /// allows for lazily initialized streams.
- /// - {@code LA(i)} refers to a symbol consumed within a marked region
- /// that has not yet been released.
- ///
- ///
- /// If {@code i} represents a position at or beyond the end of the stream,
- /// this method returns {@link #EOF}.
- ///
- /// The return value is unspecified if {@code i<0} and fewer than {@code -i}
- /// calls to {@link #consume consume()} have occurred from the beginning of
- /// the stream before calling this method.
- ///
- /// - UnsupportedOperationException if the stream does not support
+ /// from the underlying source. Specifying the call to `LA(1)`
+ /// allows for lazily initialized streams.
+ /// * `LA(i)` refers to a symbol consumed within a marked region
+ /// that has not yet been released.
+ ///
+ /// If `i` represents a position at or beyond the end of the stream,
+ /// this method returns _#EOF_.
+ ///
+ /// The return value is unspecified if `i<0` and fewer than `-i`
+ /// calls to _#consume consume()_ have occurred from the beginning of
+ /// the stream before calling this method.
+ ///
+ /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support
/// retrieving the value of the specified symbol
+ ///
func LA(_ i: Int) throws -> Int
- /// A mark provides a guarantee that {@link #seek seek()} operations will be
- /// valid over a "marked range" extending from the index where {@code mark()}
- /// was called to the current {@link #index index()}. This allows the use of
+ ///
+ /// A mark provides a guarantee that _#seek seek()_ operations will be
+ /// valid over a "marked range" extending from the index where `mark()`
+ /// was called to the current _#index index()_. This allows the use of
/// streaming input sources by specifying the minimum buffering requirements
/// to support arbitrary lookahead during prediction.
- ///
- /// The returned mark is an opaque handle (type {@code int}) which is passed
- /// to {@link #release release()} when the guarantees provided by the marked
+ ///
+ /// The returned mark is an opaque handle (type `int`) which is passed
+ /// to _#release release()_ when the guarantees provided by the marked
/// range are no longer necessary. When calls to
- /// {@code mark()}/{@code release()} are nested, the marks must be released
+ /// `mark()`/`release()` are nested, the marks must be released
/// in reverse order of which they were obtained. Since marked regions are
/// used during performance-critical sections of prediction, the specific
/// behavior of invalid usage is unspecified (i.e. a mark is not released, or
/// a mark is released twice, or marks are not released in reverse order from
- /// which they were created).
- ///
- /// The behavior of this method is unspecified if no call to an
- /// {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was
- /// constructed.
- ///
- /// This method does not change the current position in the input stream.
- ///
- /// The following example shows the use of {@link #mark mark()},
- /// {@link #release release(mark)}, {@link #index index()}, and
- /// {@link #seek seek(index)} as part of an operation to safely work within a
+ /// which they were created).
+ ///
+ /// The behavior of this method is unspecified if no call to an
+ /// _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream was
+ /// constructed.
+ ///
+ /// This method does not change the current position in the input stream.
+ ///
+ /// The following example shows the use of _#mark mark()_,
+ /// _#release release(mark)_, _#index index()_, and
+ /// _#seek seek(index)_ as part of an operation to safely work within a
/// marked region, then restore the stream position to its original value and
- /// release the mark.
- ///
+ /// release the mark.
+ ///
/// IntStream stream = ...;
/// int index = -1;
/// int mark = stream.mark();
@@ -124,70 +119,78 @@ public protocol IntStream: class {
/// }
/// stream.release(mark);
/// }
- ///
- ///
+ ///
+ ///
/// - returns: An opaque marker which should be passed to
- /// {@link #release release()} when the marked range is no longer required.
+ /// _#release release()_ when the marked range is no longer required.
+ ///
func mark() -> Int
+ ///
/// This method releases a marked range created by a call to
- /// {@link #mark mark()}. Calls to {@code release()} must appear in the
- /// reverse order of the corresponding calls to {@code mark()}. If a mark is
+ /// _#mark mark()_. Calls to `release()` must appear in the
+ /// reverse order of the corresponding calls to `mark()`. If a mark is
/// released twice, or if marks are not released in reverse order of the
- /// corresponding calls to {@code mark()}, the behavior is unspecified.
- ///
- /// For more information and an example, see {@link #mark}.
- ///
- /// - parameter marker: A marker returned by a call to {@code mark()}.
+ /// corresponding calls to `mark()`, the behavior is unspecified.
+ ///
+ /// For more information and an example, see _#mark_.
+ ///
+ /// - parameter marker: A marker returned by a call to `mark()`.
/// - seealso: #mark
+ ///
func release(_ marker: Int) throws
+ ///
/// Return the index into the stream of the input symbol referred to by
- /// {@code LA(1)}.
- ///
- /// The behavior of this method is unspecified if no call to an
- /// {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream was
- /// constructed.
+ /// `LA(1)`.
+ ///
+ /// The behavior of this method is unspecified if no call to an
+ /// _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream was
+ /// constructed.
+ ///
func index() -> Int
- /// Set the input cursor to the position indicated by {@code index}. If the
+ ///
+ /// Set the input cursor to the position indicated by `index`. If the
/// specified index lies past the end of the stream, the operation behaves as
- /// though {@code index} was the index of the EOF symbol. After this method
+ /// though `index` was the index of the EOF symbol. After this method
/// returns without throwing an exception, then at least one of the following
/// will be true.
- ///
- ///
- /// - {@link #index index()} will return the index of the first symbol
- /// appearing at or after the specified {@code index}. Specifically,
+ ///
+ /// * _#index index()_ will return the index of the first symbol
+ /// appearing at or after the specified `index`. Specifically,
/// implementations which filter their sources should automatically
- /// adjust {@code index} forward the minimum amount required for the
- /// operation to target a non-ignored symbol.
- /// - {@code LA(1)} returns {@link #EOF}
- ///
- ///
- /// This operation is guaranteed to not throw an exception if {@code index}
+ /// adjust `index` forward the minimum amount required for the
+ /// operation to target a non-ignored symbol.
+ /// * `LA(1)` returns _#EOF_
+ ///
+ /// This operation is guaranteed to not throw an exception if `index`
/// lies within a marked region. For more information on marked regions, see
- /// {@link #mark}. The behavior of this method is unspecified if no call to
- /// an {@link org.antlr.v4.runtime.IntStream initializing method} has occurred after this stream
+ /// _#mark_. The behavior of this method is unspecified if no call to
+ /// an _org.antlr.v4.runtime.IntStream initializing method_ has occurred after this stream
/// was constructed.
- ///
+ ///
/// - parameter index: The absolute index to seek to.
- ///
- /// - IllegalArgumentException if {@code index} is less than 0
- /// - UnsupportedOperationException if the stream does not support
+ ///
+ /// - throws: _ANTLRError.illegalArgument_ if `index` is less than 0
+ /// - throws: _ANTLRError.unsupportedOperation_ if the stream does not support
/// seeking to the specified index
+ ///
func seek(_ index: Int) throws
+ ///
/// Returns the total number of symbols in the stream, including a single EOF
/// symbol.
- ///
- /// - UnsupportedOperationException if the size of the stream is
+ ///
+ /// - throws: _ANTLRError.unsupportedOperation_ if the size of the stream is
/// unknown.
+ ///
func size() -> Int
+ ///
/// Gets the name of the underlying symbol source. This method returns a
/// non-null, non-empty string. If such a name is not known, this method
- /// returns {@link #UNKNOWN_SOURCE_NAME}.
-
+ /// returns _#UNKNOWN_SOURCE_NAME_.
+ ///
func getSourceName() -> String
}
diff --git a/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift b/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift
index ea4789386..b881a94ea 100644
--- a/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift
+++ b/runtime/Swift/Sources/Antlr4/InterpreterRuleContext.swift
@@ -1,33 +1,41 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// This class extends {@link org.antlr.v4.runtime.ParserRuleContext} by allowing the value of
-/// {@link #getRuleIndex} to be explicitly set for the context.
-///
-///
-/// {@link org.antlr.v4.runtime.ParserRuleContext} does not include field storage for the rule index
+///
+/// This class extends _org.antlr.v4.runtime.ParserRuleContext_ by allowing the value of
+/// _#getRuleIndex_ to be explicitly set for the context.
+///
+///
+/// _org.antlr.v4.runtime.ParserRuleContext_ does not include field storage for the rule index
/// since the context classes created by the code generator override the
-/// {@link #getRuleIndex} method to return the correct value for that context.
+/// _#getRuleIndex_ method to return the correct value for that context.
/// Since the parser interpreter does not use the context classes generated for a
/// parser, this class (with slightly more memory overhead per node) is used to
-/// provide equivalent functionality.
+/// provide equivalent functionality.
+///
public class InterpreterRuleContext: ParserRuleContext {
- /// This is the backing field for {@link #getRuleIndex}.
+ ///
+ /// This is the backing field for _#getRuleIndex_.
+ ///
private var ruleIndex: Int = -1
public override init() {
super.init()
}
- /// Constructs a new {@link org.antlr.v4.runtime.InterpreterRuleContext} with the specified
+ ///
+ /// Constructs a new _org.antlr.v4.runtime.InterpreterRuleContext_ with the specified
/// parent, invoking state, and rule index.
- ///
+ ///
/// - parameter parent: The parent context.
/// - parameter invokingStateNumber: The invoking state number.
/// - parameter ruleIndex: The rule index for the current context.
+ ///
public init(_ parent: ParserRuleContext?,
_ invokingStateNumber: Int,
_ ruleIndex: Int) {
@@ -41,9 +49,11 @@ public class InterpreterRuleContext: ParserRuleContext {
return ruleIndex
}
- /// Copy a {@link org.antlr.v4.runtime.ParserRuleContext} or {@link org.antlr.v4.runtime.InterpreterRuleContext}
- /// stack to a {@link org.antlr.v4.runtime.InterpreterRuleContext} tree.
- /// Return {@link null} if {@code ctx} is null.
+ ///
+ /// Copy a _org.antlr.v4.runtime.ParserRuleContext_ or _org.antlr.v4.runtime.InterpreterRuleContext_
+ /// stack to a _org.antlr.v4.runtime.InterpreterRuleContext_ tree.
+ /// Return _null_ if `ctx` is null.
+ ///
public static func fromParserRuleContext(_ ctx: ParserRuleContext?) -> InterpreterRuleContext? {
guard let ctx = ctx else {
return nil
diff --git a/runtime/Swift/Sources/Antlr4/Lexer.swift b/runtime/Swift/Sources/Antlr4/Lexer.swift
index 9c251ba66..e47aa1a93 100644
--- a/runtime/Swift/Sources/Antlr4/Lexer.swift
+++ b/runtime/Swift/Sources/Antlr4/Lexer.swift
@@ -1,35 +1,39 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
+///
/// A lexer is recognizer that draws input symbols from a character stream.
/// lexer grammars result in a subclass of this object. A Lexer object
/// uses simplified match() and error recovery mechanisms in the interest
/// of speed.
+///
import Foundation
-//public class Lexer : Recognizer
+open class Lexer: Recognizer, TokenSource {
+ public static let EOF = -1
+ public static let DEFAULT_MODE = 0
+ public static let MORE = -2
+ public static let SKIP = -3
-open class Lexer: Recognizer
- , TokenSource {
- public static let EOF: Int = -1
- public static let DEFAULT_MODE: Int = 0
- public static let MORE: Int = -2
- public static let SKIP: Int = -3
-
- public static let DEFAULT_TOKEN_CHANNEL: Int = CommonToken.DEFAULT_CHANNEL
- public static let HIDDEN: Int = CommonToken.HIDDEN_CHANNEL
- public static let MIN_CHAR_VALUE: Int = Character.MIN_VALUE;
- public static let MAX_CHAR_VALUE: Int = Character.MAX_VALUE;
+ public static let DEFAULT_TOKEN_CHANNEL = CommonToken.DEFAULT_CHANNEL
+ public static let HIDDEN = CommonToken.HIDDEN_CHANNEL
+ public static let MIN_CHAR_VALUE = Character.MIN_VALUE;
+ public static let MAX_CHAR_VALUE = Character.MAX_VALUE;
public var _input: CharStream?
- internal var _tokenFactorySourcePair: (TokenSource?, CharStream?)
+ internal var _tokenFactorySourcePair: TokenSourceAndStream
+ ///
/// How to create token objects
- internal var _factory: TokenFactory = CommonTokenFactory.DEFAULT
+ ///
+ internal var _factory = CommonTokenFactory.DEFAULT
+ ///
/// The goal of all lexer rules/methods is to create a token object.
/// This is an instance variable as multiple rules may collaborate to
/// create a single token. nextToken will return this object after
@@ -37,44 +41,63 @@ open class Lexer: Recognizer
/// emissions, then set this to the last token to be matched or
/// something nonnull so that the auto token emit mechanism will not
/// emit another token.
+ ///
public var _token: Token?
+ ///
/// What character index in the stream did the current token start at?
/// Needed, for example, to get the text for current token. Set at
/// the start of nextToken.
- public var _tokenStartCharIndex: Int = -1
+ ///
+ public var _tokenStartCharIndex = -1
+ ///
/// The line on which the first character of the token resides
- public var _tokenStartLine: Int = 0
+ ///
+ public var _tokenStartLine = 0
+ ///
/// The character position of first character within the line
- public var _tokenStartCharPositionInLine: Int = 0
+ ///
+ public var _tokenStartCharPositionInLine = 0
+ ///
/// Once we see EOF on char stream, next token will be EOF.
/// If you have DONE : EOF ; then you see DONE EOF.
- public var _hitEOF: Bool = false
+ ///
+ public var _hitEOF = false
+ ///
/// The channel number for the current token
- public var _channel: Int = 0
+ ///
+ public var _channel = 0
+ ///
/// The token type for the current token
- public var _type: Int = 0
+ ///
+ public var _type = 0
- public final var _modeStack: Stack = Stack()
- public var _mode: Int = Lexer.DEFAULT_MODE
+ public final var _modeStack = Stack()
+ public var _mode = Lexer.DEFAULT_MODE
+ ///
/// You can set the text for the current token to override what is in
/// the input char buffer. Use setText() or can set this instance var.
+ ///
public var _text: String?
public override init() {
+ self._tokenFactorySourcePair = TokenSourceAndStream()
+ super.init()
+ self._tokenFactorySourcePair.tokenSource = self
}
public init(_ input: CharStream) {
-
- super.init()
self._input = input
- self._tokenFactorySourcePair = (self, input)
+ self._tokenFactorySourcePair = TokenSourceAndStream()
+ super.init()
+ self._tokenFactorySourcePair.tokenSource = self
+ self._tokenFactorySourcePair.stream = input
}
open func reset() throws {
@@ -97,8 +120,10 @@ open class Lexer: Recognizer
getInterpreter().reset()
}
+ ///
/// Return a token from this source; i.e., match a token on the char
/// stream.
+ ///
open func nextToken() throws -> Token {
guard let _input = _input else {
@@ -107,7 +132,7 @@ open class Lexer: Recognizer
// Mark start location in char stream so unbuffered streams are
// guaranteed at least have text of current token
- var tokenStartMarker: Int = _input.mark()
+ var tokenStartMarker = _input.mark()
defer {
// make sure we release marker after match or
// unbuffered char stream will keep buffering
@@ -158,11 +183,13 @@ open class Lexer: Recognizer
}
+ ///
/// Instruct the lexer to skip creating a token for current lexer rule
/// and look for another token. nextToken() knows to keep looking when
/// a lexer rule finishes with token set to SKIP_TOKEN. Recall that
/// if token==null at end of any token rule, it creates one for you
/// and emits it.
+ ///
open func skip() {
_type = Lexer.SKIP
}
@@ -205,14 +232,16 @@ open class Lexer: Recognizer
return _factory
}
+ ///
/// Set the char stream and reset the lexer
+ ///
open override func setInputStream(_ input: IntStream) throws {
self._input = nil
- self._tokenFactorySourcePair = (self, _input!)
+ self._tokenFactorySourcePair = makeTokenSourceAndStream()
try reset()
self._input = input as? CharStream
- self._tokenFactorySourcePair = (self, _input!)
+ self._tokenFactorySourcePair = makeTokenSourceAndStream()
}
@@ -225,40 +254,45 @@ open class Lexer: Recognizer
return _input
}
+ ///
/// By default does not support multiple emits per nextToken invocation
/// for efficiency reasons. Subclass and override this method, nextToken,
/// and getToken (to push tokens into a list and pull from that list
/// rather than a single variable as this implementation does).
+ ///
open func emit(_ token: Token) {
//System.err.println("emit "+token);
self._token = token
}
+ ///
/// The standard method called to automatically emit a token at the
/// outermost lexical rule. The token object should point into the
/// char buffer start..stop. If there is a text override in 'text',
/// use that to set the token's text. Override this method to emit
/// custom Token objects or provide a new factory.
+ ///
@discardableResult
open func emit() -> Token {
- let t: Token = _factory.create(_tokenFactorySourcePair, _type, _text, _channel, _tokenStartCharIndex, getCharIndex() - 1,
- _tokenStartLine, _tokenStartCharPositionInLine)
+ let t = _factory.create(_tokenFactorySourcePair, _type, _text, _channel, _tokenStartCharIndex, getCharIndex() - 1, _tokenStartLine, _tokenStartCharPositionInLine)
emit(t)
return t
}
+
@discardableResult
open func emitEOF() -> Token {
- let cpos: Int = getCharPositionInLine()
- let line: Int = getLine()
- let eof: Token = _factory.create(
- _tokenFactorySourcePair,
- CommonToken.EOF,
- nil,
- CommonToken.DEFAULT_CHANNEL,
- _input!.index(),
- _input!.index() - 1,
- line,
- cpos)
+ let cpos = getCharPositionInLine()
+ let line = getLine()
+ let idx = _input!.index()
+ let eof = _factory.create(
+ _tokenFactorySourcePair,
+ CommonToken.EOF,
+ nil,
+ CommonToken.DEFAULT_CHANNEL,
+ idx,
+ idx - 1,
+ line,
+ cpos)
emit(eof)
return eof
}
@@ -281,13 +315,17 @@ open class Lexer: Recognizer
getInterpreter().setCharPositionInLine(charPositionInLine)
}
+ ///
/// What is the index of the current character of lookahead?
+ ///
open func getCharIndex() -> Int {
return _input!.index()
}
+ ///
/// Return the text matched so far for the current token or any
/// text override.
+ ///
open func getText() -> String {
if _text != nil {
return _text!
@@ -295,13 +333,17 @@ open class Lexer: Recognizer
return getInterpreter().getText(_input!)
}
+ ///
/// Set the complete text of this token; it wipes any previous
/// changes to the text.
+ ///
open func setText(_ text: String) {
self._text = text
}
+ ///
/// Override if emitting multiple tokens.
+ ///
open func getToken() -> Token {
return _token!
}
@@ -334,19 +376,13 @@ open class Lexer: Recognizer
return nil
}
- /// Used to print out token names like ID during debugging and
- /// error reporting. The generated parsers implement a method
- /// that overrides this to point to their String[] tokenNames.
- override
- open func getTokenNames() -> [String?]? {
- return nil
- }
-
+ ///
/// Return a list of all Token objects in input char stream.
/// Forces load of all tokens. Does not include EOF token.
- open func getAllTokens() throws -> Array {
- var tokens: Array = Array()
- var t: Token = try nextToken()
+ ///
+ open func getAllTokens() throws -> [Token] {
+ var tokens = [Token]()
+ var t = try nextToken()
while t.getType() != CommonToken.EOF {
tokens.append(t)
t = try nextToken()
@@ -361,32 +397,35 @@ open class Lexer: Recognizer
}
}
- open func notifyListeners(_ e: LexerNoViableAltException, recognizer: Recognizer) {
+ open func notifyListeners(_ e: LexerNoViableAltException, recognizer: Recognizer) {
- let text: String = _input!.getText(Interval.of(_tokenStartCharIndex, _input!.index()))
- let msg: String = "token recognition error at: '\(getErrorDisplay(text))'"
+ let text: String
+ do {
+ text = try _input!.getText(Interval.of(_tokenStartCharIndex, _input!.index()))
+ }
+ catch {
+ text = ""
+ }
+ let msg = "token recognition error at: '\(getErrorDisplay(text))'"
- let listener: ANTLRErrorListener = getErrorListenerDispatch()
+ let listener = getErrorListenerDispatch()
listener.syntaxError(recognizer, nil, _tokenStartLine, _tokenStartCharPositionInLine, msg, e)
}
open func getErrorDisplay(_ s: String) -> String {
- let buf: StringBuilder = StringBuilder()
- for c: Character in s.characters {
+ let buf = StringBuilder()
+ for c in s.characters {
buf.append(getErrorDisplay(c))
}
return buf.toString()
}
open func getErrorDisplay(_ c: Character) -> String {
- var s: String = String(c) // String.valueOf(c as Character);
+ var s = String(c)
if c.integerValue == CommonToken.EOF {
s = ""
}
switch s {
-// case CommonToken.EOF :
-// s = "";
-// break;
case "\n":
s = "\\n"
case "\t":
@@ -404,16 +443,18 @@ open class Lexer: Recognizer
return "'\(s)'"
}
+ ///
/// Lexers can normally match any char in it's vocabulary after matching
/// a token, so do the easy thing and just kill a character and hope
/// it all works out. You can instead use the rule invocation stack
/// to do sophisticated error recovery if you are in a fragment rule.
- //public func recover(re : RecognitionException) {
-
+ ///
open func recover(_ re: AnyObject) throws {
- //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
- //re.printStackTrace();
// TODO: Do we lose character or line position information?
try _input!.consume()
}
+
+ internal func makeTokenSourceAndStream() -> TokenSourceAndStream {
+ return TokenSourceAndStream(self, _input)
+ }
}
diff --git a/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift b/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift
index 55c2f26e8..99d9f72d8 100644
--- a/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift
+++ b/runtime/Swift/Sources/Antlr4/LexerInterpreter.swift
@@ -1,24 +1,23 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
public class LexerInterpreter: Lexer {
internal final var grammarFileName: String
internal final var atn: ATN
- ////@Deprecated
- internal final var tokenNames: [String?]?
internal final var ruleNames: [String]
internal final var channelNames: [String]
internal final var modeNames: [String]
-
private final var vocabulary: Vocabulary?
internal final var _decisionToDFA: [DFA]
- internal final var _sharedContextCache: PredictionContextCache =
- PredictionContextCache()
+ internal final var _sharedContextCache = PredictionContextCache()
+
// public override init() {
// super.init()}
@@ -36,13 +35,6 @@ public class LexerInterpreter: Lexer {
self.grammarFileName = grammarFileName
self.atn = atn
- self.tokenNames = [String?]()
- //new String[atn.maxTokenType];
- let length = tokenNames!.count
- for i in 0.. [String?]? {
- return tokenNames
- }
-
override
public func getRuleNames() -> [String] {
return ruleNames
diff --git a/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift b/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift
index 392400e37..9f560c6c6 100644
--- a/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift
+++ b/runtime/Swift/Sources/Antlr4/LexerNoViableAltException.swift
@@ -1,13 +1,19 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-public class LexerNoViableAltException: RecognitionException, CustomStringConvertible {
+public class LexerNoViableAltException: RecognitionException, CustomStringConvertible {
+ ///
/// Matching attempted at what input index?
+ ///
private final var startIndex: Int
+ ///
/// Which configurations did we try at input.index() that couldn't match input.LA(1)?
+ ///
private final var deadEndConfigs: ATNConfigSet
public init(_ lexer: Lexer?,
@@ -25,23 +31,15 @@ public class LexerNoViableAltException: RecognitionException,
return startIndex
}
-
public func getDeadEndConfigs() -> ATNConfigSet {
return deadEndConfigs
}
- //override
-// public func getInputStream() -> CharStream {
-// return super.getInputStream() as! CharStream;
-// }
-
-
public var description: String {
- var symbol: String = ""
- if startIndex >= 0 && startIndex < getInputStream().size() {
- let charStream: CharStream = getInputStream() as! CharStream
- let interval: Interval = Interval.of(startIndex, startIndex)
- symbol = charStream.getText(interval)
+ var symbol = ""
+ if let charStream = getInputStream() as? CharStream, startIndex >= 0 && startIndex < charStream.size() {
+ let interval = Interval.of(startIndex, startIndex)
+ symbol = try! charStream.getText(interval)
symbol = Utils.escapeWhitespace(symbol, false)
}
diff --git a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift
index a553642cd..0863e16ee 100644
--- a/runtime/Swift/Sources/Antlr4/ListTokenSource.swift
+++ b/runtime/Swift/Sources/Antlr4/ListTokenSource.swift
@@ -1,90 +1,98 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// Provides an implementation of {@link org.antlr.v4.runtime.TokenSource} as a wrapper around a list
-/// of {@link org.antlr.v4.runtime.Token} objects.
-///
-/// If the final token in the list is an {@link org.antlr.v4.runtime.Token#EOF} token, it will be used
-/// as the EOF token for every call to {@link #nextToken} after the end of the
-/// list is reached. Otherwise, an EOF token will be created.
+///
+/// Provides an implementation of _org.antlr.v4.runtime.TokenSource_ as a wrapper around a list
+/// of _org.antlr.v4.runtime.Token_ objects.
+///
+/// If the final token in the list is an _org.antlr.v4.runtime.Token#EOF_ token, it will be used
+/// as the EOF token for every call to _#nextToken_ after the end of the
+/// list is reached. Otherwise, an EOF token will be created.
+///
public class ListTokenSource: TokenSource {
- /// The wrapped collection of {@link org.antlr.v4.runtime.Token} objects to return.
- internal final var tokens: Array
+ ///
+ /// The wrapped collection of _org.antlr.v4.runtime.Token_ objects to return.
+ ///
+ internal final var tokens: [Token]
- /// The name of the input source. If this value is {@code null}, a call to
- /// {@link #getSourceName} should return the source name used to create the
- /// the next token in {@link #tokens} (or the previous token if the end of
+ ///
+ /// The name of the input source. If this value is `null`, a call to
+ /// _#getSourceName_ should return the source name used to create the
+ /// the next token in _#tokens_ (or the previous token if the end of
/// the input has been reached).
+ ///
private final var sourceName: String?
- /// The index into {@link #tokens} of token to return by the next call to
- /// {@link #nextToken}. The end of the input is indicated by this value
- /// being greater than or equal to the number of items in {@link #tokens}.
- internal var i: Int = 0
+ ///
+ /// The index into _#tokens_ of token to return by the next call to
+ /// _#nextToken_. The end of the input is indicated by this value
+ /// being greater than or equal to the number of items in _#tokens_.
+ ///
+ internal var i = 0
+ ///
/// This field caches the EOF token for the token source.
+ ///
internal var eofToken: Token?
- /// This is the backing field for {@link #getTokenFactory} and
- /// {@link setTokenFactory}.
- private var _factory: TokenFactory = CommonTokenFactory.DEFAULT
+ ///
+ /// This is the backing field for _#getTokenFactory_ and
+ /// _setTokenFactory_.
+ ///
+ private var _factory = CommonTokenFactory.DEFAULT
- /// Constructs a new {@link org.antlr.v4.runtime.ListTokenSource} instance from the specified
- /// collection of {@link org.antlr.v4.runtime.Token} objects.
- ///
- /// - parameter tokens: The collection of {@link org.antlr.v4.runtime.Token} objects to provide as a
- /// {@link org.antlr.v4.runtime.TokenSource}.
- /// - NullPointerException if {@code tokens} is {@code null}
- public convenience init(_ tokens: Array) {
+ ///
+ /// Constructs a new _org.antlr.v4.runtime.ListTokenSource_ instance from the specified
+ /// collection of _org.antlr.v4.runtime.Token_ objects.
+ ///
+ /// - parameter tokens: The collection of _org.antlr.v4.runtime.Token_ objects to provide as a
+ /// _org.antlr.v4.runtime.TokenSource_.
+ ///
+ public convenience init(_ tokens: [Token]) {
self.init(tokens, nil)
}
- /// Constructs a new {@link org.antlr.v4.runtime.ListTokenSource} instance from the specified
- /// collection of {@link org.antlr.v4.runtime.Token} objects and source name.
- ///
- /// - parameter tokens: The collection of {@link org.antlr.v4.runtime.Token} objects to provide as a
- /// {@link org.antlr.v4.runtime.TokenSource}.
- /// - parameter sourceName: The name of the {@link org.antlr.v4.runtime.TokenSource}. If this value is
- /// {@code null}, {@link #getSourceName} will attempt to infer the name from
- /// the next {@link org.antlr.v4.runtime.Token} (or the previous token if the end of the input has
+ ///
+ /// Constructs a new _org.antlr.v4.runtime.ListTokenSource_ instance from the specified
+ /// collection of _org.antlr.v4.runtime.Token_ objects and source name.
+ ///
+ /// - parameter tokens: The collection of _org.antlr.v4.runtime.Token_ objects to provide as a
+ /// _org.antlr.v4.runtime.TokenSource_.
+ /// - parameter sourceName: The name of the _org.antlr.v4.runtime.TokenSource_. If this value is
+ /// `null`, _#getSourceName_ will attempt to infer the name from
+ /// the next _org.antlr.v4.runtime.Token_ (or the previous token if the end of the input has
/// been reached).
- ///
- /// - NullPointerException if {@code tokens} is {@code null}
- public init(_ tokens: Array, _ sourceName: String?) {
-
+ ///
+ public init(_ tokens: [Token], _ sourceName: String?) {
self.tokens = tokens
self.sourceName = sourceName
}
- /// {@inheritDoc}
-
public func getCharPositionInLine() -> Int {
if i < tokens.count {
return tokens[i].getCharPositionInLine()
- } else {
- if let eofToken = eofToken {
- return eofToken.getCharPositionInLine()
- } else {
- if tokens.count > 0 {
- // have to calculate the result from the line/column of the previous
- // token, along with the text of the token.
- let lastToken: Token = tokens[tokens.count - 1]
+ }
+ else if let eofToken = eofToken {
+ return eofToken.getCharPositionInLine()
+ }
+ else if tokens.count > 0 {
+ // have to calculate the result from the line/column of the previous
+ // token, along with the text of the token.
+ let lastToken = tokens[tokens.count - 1]
- if let tokenText = lastToken.getText() {
- let lastNewLine: Int = tokenText.lastIndexOf("\n")
- if lastNewLine >= 0 {
- return tokenText.length - lastNewLine - 1
- }
- }
- var position = lastToken.getCharPositionInLine()
- position += lastToken.getStopIndex()
- position -= lastToken.getStartIndex()
- position += 1
- return position
+ if let tokenText = lastToken.getText() {
+ let lastNewLine = tokenText.lastIndexOf("\n")
+ if lastNewLine >= 0 {
+ return tokenText.length - lastNewLine - 1
}
}
+ return (lastToken.getCharPositionInLine() +
+ lastToken.getStopIndex() -
+ lastToken.getStartIndex() + 1)
}
// only reach this if tokens is empty, meaning EOF occurs at the first
@@ -92,27 +100,26 @@ public class ListTokenSource: TokenSource {
return 0
}
- /// {@inheritDoc}
-
public func nextToken() -> Token {
if i >= tokens.count {
if eofToken == nil {
- var start: Int = -1
+ var start = -1
if tokens.count > 0 {
- let previousStop: Int = tokens[tokens.count - 1].getStopIndex()
+ let previousStop = tokens[tokens.count - 1].getStopIndex()
if previousStop != -1 {
start = previousStop + 1
}
}
- let stop: Int = max(-1, start - 1)
- eofToken = _factory.create((self, getInputStream()!), CommonToken.EOF, "EOF", CommonToken.DEFAULT_CHANNEL, start, stop, getLine(), getCharPositionInLine())
+ let stop = max(-1, start - 1)
+ let source = TokenSourceAndStream(self, getInputStream())
+ eofToken = _factory.create(source, CommonToken.EOF, "EOF", CommonToken.DEFAULT_CHANNEL, start, stop, getLine(), getCharPositionInLine())
}
return eofToken!
}
- let t: Token = tokens[i]
+ let t = tokens[i]
if i == tokens.count - 1 && t.getType() == CommonToken.EOF {
eofToken = t
}
@@ -121,8 +128,6 @@ public class ListTokenSource: TokenSource {
return t
}
- /// {@inheritDoc}
-
public func getLine() -> Int {
if i < tokens.count {
return tokens[i].getLine()
@@ -133,8 +138,8 @@ public class ListTokenSource: TokenSource {
if tokens.count > 0 {
// have to calculate the result from the line/column of the previous
// token, along with the text of the token.
- let lastToken: Token = tokens[tokens.count - 1]
- var line: Int = lastToken.getLine()
+ let lastToken = tokens[tokens.count - 1]
+ var line = lastToken.getLine()
if let tokenText = lastToken.getText() {
let length = tokenText.length
@@ -156,30 +161,24 @@ public class ListTokenSource: TokenSource {
return 1
}
- /// {@inheritDoc}
-
public func getInputStream() -> CharStream? {
if i < tokens.count {
return tokens[i].getInputStream()
- } else {
- if let eofToken = eofToken{
- return eofToken.getInputStream()
- } else {
- if tokens.count > 0 {
- return tokens[tokens.count - 1].getInputStream()
- }
- }
+ }
+ else if let eofToken = eofToken {
+ return eofToken.getInputStream()
+ }
+ else if tokens.count > 0 {
+ return tokens[tokens.count - 1].getInputStream()
}
// no input stream information is available
return nil
}
- /// {@inheritDoc}
-
public func getSourceName() -> String {
- if sourceName != nil {
- return sourceName!
+ if let sourceName = sourceName {
+ return sourceName
}
if let inputStream = getInputStream() {
@@ -189,14 +188,10 @@ public class ListTokenSource: TokenSource {
return "List"
}
- /// {@inheritDoc}
-
public func setTokenFactory(_ factory: TokenFactory) {
self._factory = factory
}
- /// {@inheritDoc}
-
public func getTokenFactory() -> TokenFactory {
return _factory
}
diff --git a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift
index 8a41f8cca..bf3ab415f 100644
--- a/runtime/Swift/Sources/Antlr4/NoViableAltException.swift
+++ b/runtime/Swift/Sources/Antlr4/NoViableAltException.swift
@@ -4,48 +4,49 @@
*/
-/** Indicates that the parser could not decide which of two or more paths
- * to take based upon the remaining input. It tracks the starting token
- * of the offending input and also knows where the parser was
- * in the various paths when the error. Reported by reportNoViableAlternative()
- */
+/// Indicates that the parser could not decide which of two or more paths
+/// to take based upon the remaining input. It tracks the starting token
+/// of the offending input and also knows where the parser was
+/// in the various paths when the error. Reported by reportNoViableAlternative()
+///
-public class NoViableAltException: RecognitionException {
- /** Which configurations did we try at input.index() that couldn't match input.LT(1)? */
+public class NoViableAltException: RecognitionException {
+ /// Which configurations did we try at input.index() that couldn't match input.LT(1)?
private final var deadEndConfigs: ATNConfigSet?
- /** The token object at the start index; the input stream might
- * not be buffering tokens so get a reference to it. (At the
- * time the error occurred, of course the stream needs to keep a
- * buffer all of the tokens but later we might not have access to those.)
- */
-
+ /// The token object at the start index; the input stream might
+ /// not be buffering tokens so get a reference to it. (At the
+ /// time the error occurred, of course the stream needs to keep a
+ /// buffer all of the tokens but later we might not have access to those.)
+ ///
private final var startToken: Token
- public convenience init(_ recognizer: Parser?) throws {
+ public convenience init(_ recognizer: Parser) {
// LL(1) error
+ let token = try! recognizer.getCurrentToken()
self.init(recognizer,
- recognizer!.getInputStream()!,
- try recognizer!.getCurrentToken(),
- try recognizer!.getCurrentToken(),
+ recognizer.getInputStream()!,
+ token,
+ token,
nil,
- recognizer!._ctx)
+ recognizer._ctx)
}
public init(_ recognizer: Parser?,
_ input: IntStream,
_ startToken: Token,
- _ offendingToken: Token,
+ _ offendingToken: Token?,
_ deadEndConfigs: ATNConfigSet?,
_ ctx: ParserRuleContext?) {
self.deadEndConfigs = deadEndConfigs
self.startToken = startToken
- // as? Recognizer
super.init(recognizer, input, ctx)
- self.setOffendingToken(offendingToken)
+ if let offendingToken = offendingToken {
+ setOffendingToken(offendingToken)
+ }
}
diff --git a/runtime/Swift/Sources/Antlr4/Parser.swift b/runtime/Swift/Sources/Antlr4/Parser.swift
index 81ce2ac4f..a031aaf88 100644
--- a/runtime/Swift/Sources/Antlr4/Parser.swift
+++ b/runtime/Swift/Sources/Antlr4/Parser.swift
@@ -1,14 +1,18 @@
-///
+///
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
-///
+///
+///
import Foundation
+///
/// This is all the parsing support code essentially; most of it is error recovery stuff.
+///
open class Parser: Recognizer {
- public static let EOF: Int = -1
+ public static let EOF = -1
public static var ConsoleError = true
public class TraceListener: ParseTreeListener {
@@ -23,16 +27,13 @@ open class Parser: Recognizer {
print("enter \(ruleName), LT(1)=\(lt1)")
}
-
public func visitTerminal(_ node: TerminalNode) {
print("consume \(String(describing: node.getSymbol())) rule \(host.getRuleNames()[host._ctx!.getRuleIndex()])")
}
-
public func visitErrorNode(_ node: ErrorNode) {
}
-
public func exitEveryRule(_ ctx: ParserRuleContext) throws {
let ruleName = host.getRuleNames()[ctx.getRuleIndex()]
let lt1 = try host._input.LT(1)!.getText()!
@@ -41,58 +42,56 @@ open class Parser: Recognizer {
}
public class TrimToSizeListener: ParseTreeListener {
-
-
- public static let INSTANCE: TrimToSizeListener = TrimToSizeListener()
-
+ public static let INSTANCE = TrimToSizeListener()
public func enterEveryRule(_ ctx: ParserRuleContext) {
}
-
public func visitTerminal(_ node: TerminalNode) {
}
-
public func visitErrorNode(_ node: ErrorNode) {
}
-
public func exitEveryRule(_ ctx: ParserRuleContext) {
// TODO: Print exit info.
}
}
+ ///
/// mutex for bypassAltsAtnCache updates
+ ///
private var bypassAltsAtnCacheMutex = Mutex()
+ ///
/// mutex for decisionToDFA updates
+ ///
private var decisionToDFAMutex = Mutex()
- /**
- * This field maps from the serialized ATN string to the deserialized {@link org.antlr.v4.runtime.atn.ATN} with
- * bypass alternatives.
- *
- * @see org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions()
- */
+ ///
+ /// This field maps from the serialized ATN string to the deserialized _org.antlr.v4.runtime.atn.ATN_ with
+ /// bypass alternatives.
+ ///
+ /// - SeeAlso: org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions()
+ ///
private let bypassAltsAtnCache: HashMap = HashMap()
- /**
- * The error handling strategy for the parser. The default value is a new
- * instance of {@link org.antlr.v4.runtime.DefaultErrorStrategy}.
- *
- * @see #getErrorHandler
- * @see #setErrorHandler
- */
+ ///
+ /// The error handling strategy for the parser. The default value is a new
+ /// instance of _org.antlr.v4.runtime.DefaultErrorStrategy_.
+ ///
+ /// - SeeAlso: #getErrorHandler
+ /// - SeeAlso: #setErrorHandler
+ ///
public var _errHandler: ANTLRErrorStrategy = DefaultErrorStrategy()
- /**
- * The input stream.
- *
- * @see #getInputStream
- * @see #setInputStream
- */
+ ///
+ /// The input stream.
+ ///
+ /// - SeeAlso: #getInputStream
+ /// - SeeAlso: #setInputStream
+ ///
public var _input: TokenStream!
internal var _precedenceStack: Stack = {
@@ -102,42 +101,42 @@ open class Parser: Recognizer {
}()
- /**
- * The {@link org.antlr.v4.runtime.ParserRuleContext} object for the currently executing rule.
- * This is always non-null during the parsing process.
- */
+ ///
+ /// The _org.antlr.v4.runtime.ParserRuleContext_ object for the currently executing rule.
+ /// This is always non-null during the parsing process.
+ ///
public var _ctx: ParserRuleContext? = nil
- /**
- * Specifies whether or not the parser should construct a parse tree during
- * the parsing process. The default value is {@code true}.
- *
- * @see #getBuildParseTree
- * @see #setBuildParseTree
- */
+ ///
+ /// Specifies whether or not the parser should construct a parse tree during
+ /// the parsing process. The default value is `true`.
+ ///
+ /// - SeeAlso: #getBuildParseTree
+ /// - SeeAlso: #setBuildParseTree
+ ///
internal var _buildParseTrees: Bool = true
- /**
- * When {@link #setTrace}{@code (true)} is called, a reference to the
- * {@link org.antlr.v4.runtime.Parser.TraceListener} is stored here so it can be easily removed in a
- * later call to {@link #setTrace}{@code (false)}. The listener itself is
- * implemented as a parser listener so this field is not directly used by
- * other parser methods.
- */
+ ///
+ /// When _#setTrace_`(true)` is called, a reference to the
+ /// _org.antlr.v4.runtime.Parser.TraceListener_ is stored here so it can be easily removed in a
+ /// later call to _#setTrace_`(false)`. The listener itself is
+ /// implemented as a parser listener so this field is not directly used by
+ /// other parser methods.
+ ///
private var _tracer: TraceListener?
- /**
- * The list of {@link org.antlr.v4.runtime.tree.ParseTreeListener} listeners registered to receive
- * events during the parse.
- *
- * @see #addParseListener
- */
+ ///
+ /// The list of _org.antlr.v4.runtime.tree.ParseTreeListener_ listeners registered to receive
+ /// events during the parse.
+ ///
+ /// - SeeAlso: #addParseListener
+ ///
public var _parseListeners: Array?
- /**
- * The number of syntax errors reported during parsing. This value is
- * incremented each time {@link #notifyErrorListeners} is called.
- */
+ ///
+ /// The number of syntax errors reported during parsing. This value is
+ /// incremented each time _#notifyErrorListeners_ is called.
+ ///
internal var _syntaxErrors: Int = 0
public init(_ input: TokenStream) throws {
@@ -146,7 +145,7 @@ open class Parser: Recognizer {
try setInputStream(input)
}
- /** reset the parser's state */
+ /// reset the parser's state
public func reset() throws {
if (getInputStream() != nil) {
try getInputStream()!.seek(0)
@@ -164,28 +163,28 @@ open class Parser: Recognizer {
}
}
- /**
- * Match current input symbol against {@code ttype}. If the symbol type
- * matches, {@link org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch} and {@link #consume} are
- * called to complete the match process.
- *
- * If the symbol type does not match,
- * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is called on the current error
- * strategy to attempt recovery. If {@link #getBuildParseTree} is
- * {@code true} and the token index of the symbol returned by
- * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
- * the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
- * {@link ParserRuleContext#addErrorNode(ErrorNode)}.
- *
- * @param ttype the token type to match
- * @return the matched symbol
- * @throws org.antlr.v4.runtime.RecognitionException if the current input symbol did not match
- * {@code ttype} and the error strategy could not recover from the
- * mismatched symbol
- */
+ ///
+ /// Match current input symbol against `ttype`. If the symbol type
+ /// matches, _org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch_ and _#consume_ are
+ /// called to complete the match process.
+ ///
+ /// If the symbol type does not match,
+ /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is called on the current error
+ /// strategy to attempt recovery. If _#getBuildParseTree_ is
+ /// `true` and the token index of the symbol returned by
+ /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is -1, the symbol is added to
+ /// the parse tree by calling _#createErrorNode(ParserRuleContext, Token)_ then
+ /// _ParserRuleContext#addErrorNode(ErrorNode)_.
+ ///
+ /// - Parameter ttype: the token type to match
+ /// - Throws: org.antlr.v4.runtime.RecognitionException if the current input symbol did not match
+ /// `ttype` and the error strategy could not recover from the
+ /// mismatched symbol
+ /// - Returns: the matched symbol
+ ///
@discardableResult
public func match(_ ttype: Int) throws -> Token {
- var t: Token = try getCurrentToken()
+ var t = try getCurrentToken()
if t.getType() == ttype {
_errHandler.reportMatch(self)
try consume()
@@ -200,27 +199,27 @@ open class Parser: Recognizer {
return t
}
- /**
- * Match current input symbol as a wildcard. If the symbol type matches
- * (i.e. has a value greater than 0), {@link org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch}
- * and {@link #consume} are called to complete the match process.
- *
- * If the symbol type does not match,
- * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is called on the current error
- * strategy to attempt recovery. If {@link #getBuildParseTree} is
- * {@code true} and the token index of the symbol returned by
- * {@link org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
- * the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
- * {@link ParserRuleContext#addErrorNode(ErrorNode)}.
- *
- * @return the matched symbol
- * @throws org.antlr.v4.runtime.RecognitionException if the current input symbol did not match
- * a wildcard and the error strategy could not recover from the mismatched
- * symbol
- *///; RecognitionException
+ ///
+ /// Match current input symbol as a wildcard. If the symbol type matches
+ /// (i.e. has a value greater than 0), _org.antlr.v4.runtime.ANTLRErrorStrategy#reportMatch_
+ /// and _#consume_ are called to complete the match process.
+ ///
+ /// If the symbol type does not match,
+ /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is called on the current error
+ /// strategy to attempt recovery. If _#getBuildParseTree_ is
+ /// `true` and the token index of the symbol returned by
+ /// _org.antlr.v4.runtime.ANTLRErrorStrategy#recoverInline_ is -1, the symbol is added to
+ /// the parse tree by calling _#createErrorNode(ParserRuleContext, Token)_ then
+ /// _ParserRuleContext#addErrorNode(ErrorNode)_.
+ ///
+ /// - Throws: org.antlr.v4.runtime.RecognitionException if the current input symbol did not match
+ /// a wildcard and the error strategy could not recover from the mismatched
+ /// symbol
+ /// - Returns: the matched symbol
+ ///
@discardableResult
public func matchWildcard() throws -> Token {
- var t: Token = try getCurrentToken()
+ var t = try getCurrentToken()
if t.getType() > 0 {
_errHandler.reportMatch(self)
try consume()
@@ -236,43 +235,43 @@ open class Parser: Recognizer {
return t
}
- /**
- * Track the {@link org.antlr.v4.runtime.ParserRuleContext} objects during the parse and hook
- * them up using the {@link org.antlr.v4.runtime.ParserRuleContext#children} list so that it
- * forms a parse tree. The {@link org.antlr.v4.runtime.ParserRuleContext} returned from the start
- * rule represents the root of the parse tree.
- *
- * Note that if we are not building parse trees, rule contexts only point
- * upwards. When a rule exits, it returns the context but that gets garbage
- * collected if nobody holds a reference. It points upwards but nobody
- * points at it.
- *
- * When we build parse trees, we are adding all of these contexts to
- * {@link org.antlr.v4.runtime.ParserRuleContext#children} list. Contexts are then not candidates
- * for garbage collection.
- */
+ ///
+ /// Track the _org.antlr.v4.runtime.ParserRuleContext_ objects during the parse and hook
+ /// them up using the _org.antlr.v4.runtime.ParserRuleContext#children_ list so that it
+ /// forms a parse tree. The _org.antlr.v4.runtime.ParserRuleContext_ returned from the start
+ /// rule represents the root of the parse tree.
+ ///
+ /// Note that if we are not building parse trees, rule contexts only point
+ /// upwards. When a rule exits, it returns the context but that gets garbage
+ /// collected if nobody holds a reference. It points upwards but nobody
+ /// points at it.
+ ///
+ /// When we build parse trees, we are adding all of these contexts to
+ /// _org.antlr.v4.runtime.ParserRuleContext#children_ list. Contexts are then not candidates
+ /// for garbage collection.
+ ///
public func setBuildParseTree(_ buildParseTrees: Bool) {
self._buildParseTrees = buildParseTrees
}
- /**
- * Gets whether or not a complete parse tree will be constructed while
- * parsing. This property is {@code true} for a newly constructed parser.
- *
- * @return {@code true} if a complete parse tree will be constructed while
- * parsing, otherwise {@code false}
- */
+ ///
+ /// Gets whether or not a complete parse tree will be constructed while
+ /// parsing. This property is `true` for a newly constructed parser.
+ ///
+ /// - Returns: `true` if a complete parse tree will be constructed while
+ /// parsing, otherwise `false`
+ ///
public func getBuildParseTree() -> Bool {
return _buildParseTrees
}
- /**
- * Trim the internal lists of the parse tree during parsing to conserve memory.
- * This property is set to {@code false} by default for a newly constructed parser.
- *
- * @param trimParseTrees {@code true} to trim the capacity of the {@link org.antlr.v4.runtime.ParserRuleContext#children}
- * list to its size after a rule is parsed.
- */
+ ///
+ /// Trim the internal lists of the parse tree during parsing to conserve memory.
+ /// This property is set to `false` by default for a newly constructed parser.
+ ///
+ /// - Parameter trimParseTrees: `true` to trim the capacity of the _org.antlr.v4.runtime.ParserRuleContext#children_
+ /// list to its size after a rule is parsed.
+ ///
public func setTrimParseTree(_ trimParseTrees: Bool) {
if trimParseTrees {
if getTrimParseTree() {
@@ -284,72 +283,61 @@ open class Parser: Recognizer {
}
}
- /**
- * @return {@code true} if the {@link org.antlr.v4.runtime.ParserRuleContext#children} list is trimmed
- * using the default {@link org.antlr.v4.runtime.Parser.TrimToSizeListener} during the parse process.
- */
+ ///
+ /// - Returns: `true` if the _org.antlr.v4.runtime.ParserRuleContext#children_ list is trimmed
+ /// using the default _org.antlr.v4.runtime.Parser.TrimToSizeListener_ during the parse process.
+ ///
public func getTrimParseTree() -> Bool {
-
return !getParseListeners().filter({ $0 === TrimToSizeListener.INSTANCE }).isEmpty
}
-
- public func getParseListeners() -> Array {
- let listeners: Array? = _parseListeners
- if listeners == nil {
- return Array()
- }
-
- return listeners!
+ public func getParseListeners() -> [ParseTreeListener] {
+ return _parseListeners ?? [ParseTreeListener]()
}
- /**
- * Registers {@code listener} to receive events during the parsing process.
- *
- * To support output-preserving grammar transformations (including but not
- * limited to left-recursion removal, automated left-factoring, and
- * optimized code generation), calls to listener methods during the parse
- * may differ substantially from calls made by
- * {@link org.antlr.v4.runtime.tree.ParseTreeWalker#DEFAULT} used after the parse is complete. In
- * particular, rule entry and exit events may occur in a different order
- * during the parse than after the parser. In addition, calls to certain
- * rule entry methods may be omitted.
- *
- * With the following specific exceptions, calls to listener events are
- * deterministic, i.e. for identical input the calls to listener
- * methods will be the same.
- *
- *
- * - Alterations to the grammar used to generate code may change the
- * behavior of the listener calls.
- * - Alterations to the command line options passed to ANTLR 4 when
- * generating the parser may change the behavior of the listener calls.
- * - Changing the version of the ANTLR Tool used to generate the parser
- * may change the behavior of the listener calls.
- *
- *
- * @param listener the listener to add
- *
- * @throws NullPointerException if {@code} listener is {@code null}
- */
+ ///
+ /// Registers `listener` to receive events during the parsing process.
+ ///
+ /// To support output-preserving grammar transformations (including but not
+ /// limited to left-recursion removal, automated left-factoring, and
+ /// optimized code generation), calls to listener methods during the parse
+ /// may differ substantially from calls made by
+ /// _org.antlr.v4.runtime.tree.ParseTreeWalker#DEFAULT_ used after the parse is complete. In
+ /// particular, rule entry and exit events may occur in a different order
+ /// during the parse than after the parser. In addition, calls to certain
+ /// rule entry methods may be omitted.
+ ///
+ /// With the following specific exceptions, calls to listener events are
+ /// __deterministic__, i.e. for identical input the calls to listener
+ /// methods will be the same.
+ ///
+ /// * Alterations to the grammar used to generate code may change the
+ /// behavior of the listener calls.
+ /// * Alterations to the command line options passed to ANTLR 4 when
+ /// generating the parser may change the behavior of the listener calls.
+ /// * Changing the version of the ANTLR Tool used to generate the parser
+ /// may change the behavior of the listener calls.
+ ///
+ /// - Parameter listener: the listener to add
+ ///
public func addParseListener(_ listener: ParseTreeListener) {
if _parseListeners == nil {
- _parseListeners = Array()
+ _parseListeners = [ParseTreeListener]()
}
- self._parseListeners!.append(listener)
+ _parseListeners!.append(listener)
}
- /**
- * Remove {@code listener} from the list of parse listeners.
- *
- * If {@code listener} is {@code null} or has not been added as a parse
- * listener, this method does nothing.
- *
- * @see #addParseListener
- *
- * @param listener the listener to remove
- */
+ ///
+ /// Remove `listener` from the list of parse listeners.
+ ///
+ /// If `listener` is `null` or has not been added as a parse
+ /// listener, this method does nothing.
+ ///
+ /// - SeeAlso: #addParseListener
+ ///
+ /// - Parameter listener: the listener to remove
+ ///
public func removeParseListener(_ listener: ParseTreeListener?) {
if _parseListeners != nil {
@@ -364,20 +352,20 @@ open class Parser: Recognizer {
}
}
- /**
- * Remove all parse listeners.
- *
- * @see #addParseListener
- */
+ ///
+ /// Remove all parse listeners.
+ ///
+ /// - SeeAlso: #addParseListener
+ ///
public func removeParseListeners() {
_parseListeners = nil
}
- /**
- * Notify any parse listeners of an enter rule event.
- *
- * @see #addParseListener
- */
+ ///
+ /// Notify any parse listeners of an enter rule event.
+ ///
+ /// - SeeAlso: #addParseListener
+ ///
public func triggerEnterRuleEvent() throws {
if let _parseListeners = _parseListeners, let _ctx = _ctx {
for listener: ParseTreeListener in _parseListeners {
@@ -387,63 +375,59 @@ open class Parser: Recognizer {
}
}
- /**
- * Notify any parse listeners of an exit rule event.
- *
- * @see #addParseListener
- */
+ ///
+ /// Notify any parse listeners of an exit rule event.
+ ///
+ /// - SeeAlso: #addParseListener
+ ///
public func triggerExitRuleEvent() throws {
// reverse order walk of listeners
if let _parseListeners = _parseListeners, let _ctx = _ctx {
- var i: Int = _parseListeners.count - 1
+ var i = _parseListeners.count - 1
while i >= 0 {
- let listener: ParseTreeListener = _parseListeners[i]
+ let listener = _parseListeners[i]
_ctx.exitRule(listener)
- try listener.exitEveryRule(_ctx)
+ try listener.exitEveryRule(_ctx)
i -= 1
}
}
}
- /**
- * Gets the number of syntax errors reported during parsing. This value is
- * incremented each time {@link #notifyErrorListeners} is called.
- *
- * @see #notifyErrorListeners
- */
+ ///
+ /// Gets the number of syntax errors reported during parsing. This value is
+ /// incremented each time _#notifyErrorListeners_ is called.
+ ///
+ /// - SeeAlso: #notifyErrorListeners
+ ///
public func getNumberOfSyntaxErrors() -> Int {
return _syntaxErrors
}
override
open func getTokenFactory() -> TokenFactory {
- //
return _input.getTokenSource().getTokenFactory()
}
- /** Tell our token source and error strategy about a new way to create tokens. */
+ /// Tell our token source and error strategy about a new way to create tokens.
override
open func setTokenFactory(_ factory: TokenFactory) {
- //
_input.getTokenSource().setTokenFactory(factory)
}
- /**
- * The ATN with bypass alternatives is expensive to create so we create it
- * lazily.
- *
- * @throws UnsupportedOperationException if the current parser does not
- * implement the {@link #getSerializedATN()} method.
- */
-
+ ///
+ /// The ATN with bypass alternatives is expensive to create so we create it
+ /// lazily.
+ ///
+ /// - Throws: _ANTLRError.unsupportedOperation_ if the current parser does not
+ /// implement the _#getSerializedATN()_ method.
+ ///
public func getATNWithBypassAlts() -> ATN {
- let serializedAtn: String = getSerializedATN()
+ let serializedAtn = getSerializedATN()
- var result: ATN? = bypassAltsAtnCache[serializedAtn]
- bypassAltsAtnCacheMutex.synchronized {
- [unowned self] in
+ var result = bypassAltsAtnCache[serializedAtn]
+ bypassAltsAtnCacheMutex.synchronized { [unowned self] in
if result == nil {
- let deserializationOptions: ATNDeserializationOptions = ATNDeserializationOptions()
+ let deserializationOptions = ATNDeserializationOptions()
try! deserializationOptions.setGenerateRuleBypassTransitions(true)
result = try! ATNDeserializer(deserializationOptions).deserialize(Array(serializedAtn.characters))
self.bypassAltsAtnCache[serializedAtn] = result!
@@ -452,36 +436,34 @@ open class Parser: Recognizer {
return result!
}
- /**
- * The preferred method of getting a tree pattern. For example, here's a
- * sample use:
- *
- *
- * ParseTree t = parser.expr();
- * ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
- * ParseTreeMatch m = p.match(t);
- * String id = m.get("ID");
- *
- */
+ ///
+ /// The preferred method of getting a tree pattern. For example, here's a
+ /// sample use:
+ ///
+ ///
+ /// ParseTree t = parser.expr();
+ /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
+ /// ParseTreeMatch m = p.match(t);
+ /// String id = m.get("ID");
+ ///
+ ///
public func compileParseTreePattern(_ pattern: String, _ patternRuleIndex: Int) throws -> ParseTreePattern {
if let tokenStream = getTokenStream() {
- let tokenSource: TokenSource = tokenStream.getTokenSource()
- if tokenSource is Lexer {
- let lexer: Lexer = tokenSource as! Lexer
+ let tokenSource = tokenStream.getTokenSource()
+ if let lexer = tokenSource as? Lexer {
return try compileParseTreePattern(pattern, patternRuleIndex, lexer)
}
}
throw ANTLRError.unsupportedOperation(msg: "Parser can't discover a lexer to use")
-
}
- /**
- * The same as {@link #compileParseTreePattern(String, int)} but specify a
- * {@link org.antlr.v4.runtime.Lexer} rather than trying to deduce it from this parser.
- */
+ ///
+ /// The same as _#compileParseTreePattern(String, int)_ but specify a
+ /// _org.antlr.v4.runtime.Lexer_ rather than trying to deduce it from this parser.
+ ///
public func compileParseTreePattern(_ pattern: String, _ patternRuleIndex: Int,
_ lexer: Lexer) throws -> ParseTreePattern {
- let m: ParseTreePatternMatcher = ParseTreePatternMatcher(lexer, self)
+ let m = ParseTreePatternMatcher(lexer, self)
return try m.compile(pattern, patternRuleIndex)
}
@@ -508,7 +490,7 @@ open class Parser: Recognizer {
return _input
}
- /** Set the token stream and reset the parser. */
+ /// Set the token stream and reset the parser.
public func setTokenStream(_ input: TokenStream) throws {
//TODO self._input = nil;
self._input = nil;
@@ -516,74 +498,76 @@ open class Parser: Recognizer {
self._input = input
}
- /** Match needs to return the current input symbol, which gets put
- * into the label for the associated token ref; e.g., x=ID.
- */
+ /// Match needs to return the current input symbol, which gets put
+ /// into the label for the associated token ref; e.g., x=ID.
+ ///
public func getCurrentToken() throws -> Token {
return try _input.LT(1)!
}
- public final func notifyErrorListeners(_ msg: String) throws {
- try notifyErrorListeners(getCurrentToken(), msg, nil)
+ public final func notifyErrorListeners(_ msg: String) {
+ let token = try? getCurrentToken()
+ notifyErrorListeners(token, msg, nil)
}
- public func notifyErrorListeners(_ offendingToken: Token, _ msg: String,
- _ e: AnyObject?) {
+ public func notifyErrorListeners(_ offendingToken: Token?, _ msg: String, _ e: AnyObject?) {
_syntaxErrors += 1
- var line: Int = -1
- var charPositionInLine: Int = -1
- line = offendingToken.getLine()
- charPositionInLine = offendingToken.getCharPositionInLine()
+ var line = -1
+ var charPositionInLine = -1
+ if let offendingToken = offendingToken {
+ line = offendingToken.getLine()
+ charPositionInLine = offendingToken.getCharPositionInLine()
+ }
- let listener: ANTLRErrorListener = getErrorListenerDispatch()
+ let listener = getErrorListenerDispatch()
listener.syntaxError(self, offendingToken, line, charPositionInLine, msg, e)
}
- /**
- * Consume and return the {@linkplain #getCurrentToken current symbol}.
- *
- * E.g., given the following input with {@code A} being the current
- * lookahead symbol, this function moves the cursor to {@code B} and returns
- * {@code A}.
- *
- *
- * A B
- * ^
- *
- *
- * If the parser is not in error recovery mode, the consumed symbol is added
- * to the parse tree using {@link ParserRuleContext#addChild(TerminalNode)}, and
- * {@link org.antlr.v4.runtime.tree.ParseTreeListener#visitTerminal} is called on any parse listeners.
- * If the parser is in error recovery mode, the consumed symbol is
- * added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then
- * {@link ParserRuleContext#addErrorNode(ErrorNode)} and
- * {@link org.antlr.v4.runtime.tree.ParseTreeListener#visitErrorNode} is called on any parse
- * listeners.
- */
+ ///
+ /// Consume and return the |: #getCurrentToken current symbol:|.
+ ///
+ /// E.g., given the following input with `A` being the current
+ /// lookahead symbol, this function moves the cursor to `B` and returns
+ /// `A`.
+ ///
+ ///
+ /// A B
+ /// ^
+ ///
+ ///
+ /// If the parser is not in error recovery mode, the consumed symbol is added
+ /// to the parse tree using _ParserRuleContext#addChild(TerminalNode)_, and
+ /// _org.antlr.v4.runtime.tree.ParseTreeListener#visitTerminal_ is called on any parse listeners.
+ /// If the parser __is__ in error recovery mode, the consumed symbol is
+ /// added to the parse tree using _#createErrorNode(ParserRuleContext, Token)_ then
+ /// _ParserRuleContext#addErrorNode(ErrorNode)_ and
+ /// _org.antlr.v4.runtime.tree.ParseTreeListener#visitErrorNode_ is called on any parse
+ /// listeners.
+ ///
@discardableResult
public func consume() throws -> Token {
- let o: Token = try getCurrentToken()
+ let o = try getCurrentToken()
if o.getType() != Parser.EOF {
try getInputStream()!.consume()
}
guard let _ctx = _ctx else {
return o
}
- let hasListener: Bool = _parseListeners != nil && !_parseListeners!.isEmpty
+ let hasListener = _parseListeners != nil && !_parseListeners!.isEmpty
if _buildParseTrees || hasListener {
if _errHandler.inErrorRecoveryMode(self) {
- let node: ErrorNode = _ctx.addErrorNode(createErrorNode(parent: _ctx, t: o))
+ let node = _ctx.addErrorNode(createErrorNode(parent: _ctx, t: o))
if let _parseListeners = _parseListeners {
- for listener: ParseTreeListener in _parseListeners {
+ for listener in _parseListeners {
listener.visitErrorNode(node)
}
}
} else {
- let node: TerminalNode = _ctx.addChild(createTerminalNode(parent: _ctx, t: o))
+ let node = _ctx.addChild(createTerminalNode(parent: _ctx, t: o))
if let _parseListeners = _parseListeners {
- for listener: ParseTreeListener in _parseListeners {
+ for listener in _parseListeners {
listener.visitTerminal(node)
}
}
@@ -592,20 +576,20 @@ open class Parser: Recognizer {
return o
}
- /** How to create a token leaf node associated with a parent.
- * Typically, the terminal node to create is not a function of the parent.
- *
- * @since 4.7
- */
+ /// How to create a token leaf node associated with a parent.
+ /// Typically, the terminal node to create is not a function of the parent.
+ ///
+ /// - Since: 4.7
+ ///
public func createTerminalNode(parent: ParserRuleContext, t: Token) -> TerminalNode {
return TerminalNodeImpl(t);
}
- /** How to create an error node, given a token, associated with a parent.
- * Typically, the error node to create is not a function of the parent.
- *
- * @since 4.7
- */
+ /// How to create an error node, given a token, associated with a parent.
+ /// Typically, the error node to create is not a function of the parent.
+ ///
+ /// - Since: 4.7
+ ///
public func createErrorNode(parent: ParserRuleContext, t: Token) -> ErrorNode {
return ErrorNode(t);
}
@@ -618,10 +602,10 @@ open class Parser: Recognizer {
}
}
- /**
- * Always called by generated parsers upon entry to a rule. Access field
- * {@link #_ctx} get the current context.
- */
+ ///
+ /// Always called by generated parsers upon entry to a rule. Access field
+ /// _#_ctx_ get the current context.
+ ///
public func enterRule(_ localctx: ParserRuleContext, _ state: Int, _ ruleIndex: Int) throws {
setState(state)
_ctx = localctx
@@ -660,12 +644,12 @@ open class Parser: Recognizer {
}
}
- /**
- * Get the precedence level for the top-most precedence rule.
- *
- * @return The precedence level for the top-most precedence rule, or -1 if
- * the parser context is not nested within a precedence rule.
- */
+ ///
+ /// Get the precedence level for the top-most precedence rule.
+ ///
+ /// - Returns: The precedence level for the top-most precedence rule, or -1 if
+ /// the parser context is not nested within a precedence rule.
+ ///
public final func getPrecedence() -> Int {
if _precedenceStack.isEmpty {
return -1
@@ -674,11 +658,13 @@ open class Parser: Recognizer {
return _precedenceStack.peek() ?? -1
}
- /**
- * @deprecated Use
- * {@link #enterRecursionRule(org.antlr.v4.runtime.ParserRuleContext, int, int, int)} instead.
- */
- ////@Deprecated
+ ///
+ /// Use
+ /// _#enterRecursionRule(org.antlr.v4.runtime.ParserRuleContext, int, int, int)_ instead.
+ ///
+ ///
+ /// /@Deprecated
+ ///
public func enterRecursionRule(_ localctx: ParserRuleContext, _ ruleIndex: Int) throws {
try enterRecursionRule(localctx, getATN().ruleToStartState[ruleIndex].stateNumber, ruleIndex, 0)
}
@@ -693,11 +679,11 @@ open class Parser: Recognizer {
}
}
- /** Like {@link #enterRule} but for recursive rules.
- * Make the current context the child of the incoming localctx.
- */
+ /// Like _#enterRule_ but for recursive rules.
+ /// Make the current context the child of the incoming localctx.
+ ///
public func pushNewRecursionContext(_ localctx: ParserRuleContext, _ state: Int, _ ruleIndex: Int) throws {
- let previous: ParserRuleContext = _ctx!
+ let previous = _ctx!
previous.parent = localctx
previous.invokingState = state
previous.stop = try _input.LT(-1)
@@ -716,12 +702,12 @@ open class Parser: Recognizer {
public func unrollRecursionContexts(_ _parentctx: ParserRuleContext?) throws {
_precedenceStack.pop()
_ctx!.stop = try _input.LT(-1)
- let retctx: ParserRuleContext = _ctx! // save current ctx (return value)
+ let retctx = _ctx! // save current ctx (return value)
// unroll so _ctx is as it was before call to recursive method
if _parseListeners != nil {
- while let ctxWrap = _ctx , ctxWrap !== _parentctx {
- try triggerExitRuleEvent()
+ while let ctxWrap = _ctx, ctxWrap !== _parentctx {
+ try triggerExitRuleEvent()
_ctx = ctxWrap.parent as? ParserRuleContext
}
} else {
@@ -738,7 +724,7 @@ open class Parser: Recognizer {
}
public func getInvokingContext(_ ruleIndex: Int) -> ParserRuleContext? {
- var p: ParserRuleContext? = _ctx
+ var p = _ctx
while let pWrap = p {
if pWrap.getRuleIndex() == ruleIndex {
return pWrap
@@ -766,63 +752,63 @@ open class Parser: Recognizer {
return false
}
- /** Given an AmbiguityInfo object that contains information about an
- * ambiguous decision event, return the list of ambiguous parse trees.
- * An ambiguity occurs when a specific token sequence can be recognized
- * in more than one way by the grammar. These ambiguities are detected only
- * at decision points.
- *
- * The list of trees includes the actual interpretation (that for
- * the minimum alternative number) and all ambiguous alternatives.
- * The actual interpretation is always first.
- *
- * This method reuses the same physical input token stream used to
- * detect the ambiguity by the original parser in the first place.
- * This method resets/seeks within but does not alter originalParser.
- * The input position is restored upon exit from this method.
- * Parsers using a {@link org.antlr.v4.runtime.UnbufferedTokenStream} may not be able to
- * perform the necessary save index() / seek(saved_index) operation.
- *
- * The trees are rooted at the node whose start..stop token indices
- * include the start and stop indices of this ambiguity event. That is,
- * the trees returns will always include the complete ambiguous subphrase
- * identified by the ambiguity event.
- *
- * Be aware that this method does NOT notify error or parse listeners as
- * it would trigger duplicate or otherwise unwanted events.
- *
- * This uses a temporary ParserATNSimulator and a ParserInterpreter
- * so we don't mess up any statistics, event lists, etc...
- * The parse tree constructed while identifying/making ambiguityInfo is
- * not affected by this method as it creates a new parser interp to
- * get the ambiguous interpretations.
- *
- * Nodes in the returned ambig trees are independent of the original parse
- * tree (constructed while identifying/creating ambiguityInfo).
- *
- * @since 4.5.1
- *
- * @param originalParser The parser used to create ambiguityInfo; it
- * is not modified by this routine and can be either
- * a generated or interpreted parser. It's token
- * stream *is* reset/seek()'d.
- * @param ambiguityInfo The information about an ambiguous decision event
- * for which you want ambiguous parse trees.
- * @param startRuleIndex The start rule for the entire grammar, not
- * the ambiguous decision. We re-parse the entire input
- * and so we need the original start rule.
- *
- * @return The list of all possible interpretations of
- * the input for the decision in ambiguityInfo.
- * The actual interpretation chosen by the parser
- * is always given first because this method
- * retests the input in alternative order and
- * ANTLR always resolves ambiguities by choosing
- * the first alternative that matches the input.
- *
- * @throws org.antlr.v4.runtime.RecognitionException Throws upon syntax error while matching
- * ambig input.
- */
+ /// Given an AmbiguityInfo object that contains information about an
+ /// ambiguous decision event, return the list of ambiguous parse trees.
+ /// An ambiguity occurs when a specific token sequence can be recognized
+ /// in more than one way by the grammar. These ambiguities are detected only
+ /// at decision points.
+ ///
+ /// The list of trees includes the actual interpretation (that for
+ /// the minimum alternative number) and all ambiguous alternatives.
+ /// The actual interpretation is always first.
+ ///
+ /// This method reuses the same physical input token stream used to
+ /// detect the ambiguity by the original parser in the first place.
+ /// This method resets/seeks within but does not alter originalParser.
+ /// The input position is restored upon exit from this method.
+ /// Parsers using a _org.antlr.v4.runtime.UnbufferedTokenStream_ may not be able to
+ /// perform the necessary save index() / seek(saved_index) operation.
+ ///
+ /// The trees are rooted at the node whose start..stop token indices
+ /// include the start and stop indices of this ambiguity event. That is,
+ /// the trees returns will always include the complete ambiguous subphrase
+ /// identified by the ambiguity event.
+ ///
+ /// Be aware that this method does NOT notify error or parse listeners as
+ /// it would trigger duplicate or otherwise unwanted events.
+ ///
+ /// This uses a temporary ParserATNSimulator and a ParserInterpreter
+ /// so we don't mess up any statistics, event lists, etc...
+ /// The parse tree constructed while identifying/making ambiguityInfo is
+ /// not affected by this method as it creates a new parser interp to
+ /// get the ambiguous interpretations.
+ ///
+ /// Nodes in the returned ambig trees are independent of the original parse
+ /// tree (constructed while identifying/creating ambiguityInfo).
+ ///
+ /// - Since: 4.5.1
+ ///
+ /// - Parameter originalParser: The parser used to create ambiguityInfo; it
+ /// is not modified by this routine and can be either
+ /// a generated or interpreted parser. It's token
+ /// stream *is* reset/seek()'d.
+ /// - Parameter ambiguityInfo: The information about an ambiguous decision event
+ /// for which you want ambiguous parse trees.
+ /// - Parameter startRuleIndex: The start rule for the entire grammar, not
+ /// the ambiguous decision. We re-parse the entire input
+ /// and so we need the original start rule.
+ ///
+ /// - Throws: org.antlr.v4.runtime.RecognitionException Throws upon syntax error while matching
+ /// ambig input.
+ /// - Returns: The list of all possible interpretations of
+ /// the input for the decision in ambiguityInfo.
+ /// The actual interpretation chosen by the parser
+ /// is always given first because this method
+ /// retests the input in alternative order and
+ /// ANTLR always resolves ambiguities by choosing
+ /// the first alternative that matches the input.
+ ///
+ ///
// public class func getAmbiguousParseTrees(originalParser : Parser,
// _ ambiguityInfo : AmbiguityInfo,
// _ startRuleIndex : Int) throws -> Array //; RecognitionException
@@ -851,7 +837,7 @@ open class Parser: Recognizer {
// parser.getInterpreter()!.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
//
// // get ambig trees
-// var alt : Int = ambiguityInfo.ambigAlts.nextSetBit(0);
+// var alt : Int = ambiguityInfo.ambigAlts.firstSetBit();
// while alt>=0 {
// // re-parse entire input for all ambiguous alternatives
// // (don't have to do first as it's been parsed, but do again for simplicity
@@ -875,26 +861,25 @@ open class Parser: Recognizer {
// return trees;
// }
- /**
- * Checks whether or not {@code symbol} can follow the current state in the
- * ATN. The behavior of this method is equivalent to the following, but is
- * implemented such that the complete context-sensitive follow set does not
- * need to be explicitly constructed.
- *
- *
- * return getExpectedTokens().contains(symbol);
- *
- *
- * @param symbol the symbol type to check
- * @return {@code true} if {@code symbol} can follow the current state in
- * the ATN, otherwise {@code false}.
- */
- public func isExpectedToken(_ symbol: Int) throws -> Bool {
-// return getInterpreter().atn.nextTokens(_ctx);
- let atn: ATN = getInterpreter().atn
+ ///
+ /// Checks whether or not `symbol` can follow the current state in the
+ /// ATN. The behavior of this method is equivalent to the following, but is
+ /// implemented such that the complete context-sensitive follow set does not
+ /// need to be explicitly constructed.
+ ///
+ ///
+ /// return getExpectedTokens().contains(symbol);
+ ///
+ ///
+ /// - Parameter symbol: the symbol type to check
+ /// - Returns: `true` if `symbol` can follow the current state in
+ /// the ATN, otherwise `false`.
+ ///
+ public func isExpectedToken(_ symbol: Int) -> Bool {
+ let atn = getInterpreter().atn
var ctx: ParserRuleContext? = _ctx
- let s: ATNState = atn.states[getState()]!
- var following: IntervalSet = try atn.nextTokens(s)
+ let s = atn.states[getState()]!
+ var following = atn.nextTokens(s)
if following.contains(symbol) {
return true
}
@@ -903,10 +888,10 @@ open class Parser: Recognizer {
return false
}
- while let ctxWrap = ctx , ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) {
- let invokingState: ATNState = atn.states[ctxWrap.invokingState]!
- let rt: RuleTransition = invokingState.transition(0) as! RuleTransition
- following = try atn.nextTokens(rt.followState)
+ while let ctxWrap = ctx, ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) {
+ let invokingState = atn.states[ctxWrap.invokingState]!
+ let rt = invokingState.transition(0) as! RuleTransition
+ following = atn.nextTokens(rt.followState)
if following.contains(symbol) {
return true
}
@@ -921,55 +906,51 @@ open class Parser: Recognizer {
return false
}
- /**
- * Computes the set of input symbols which could follow the current parser
- * state and context, as given by {@link #getState} and {@link #getContext},
- * respectively.
- *
- * @see org.antlr.v4.runtime.atn.ATN#getExpectedTokens(int, org.antlr.v4.runtime.RuleContext)
- */
+ ///
+ /// Computes the set of input symbols which could follow the current parser
+ /// state and context, as given by _#getState_ and _#getContext_,
+ /// respectively.
+ ///
+ /// - SeeAlso: org.antlr.v4.runtime.atn.ATN#getExpectedTokens(int, org.antlr.v4.runtime.RuleContext)
+ ///
public func getExpectedTokens() throws -> IntervalSet {
return try getATN().getExpectedTokens(getState(), getContext()!)
}
- public func getExpectedTokensWithinCurrentRule() throws -> IntervalSet {
- let atn: ATN = getInterpreter().atn
- let s: ATNState = atn.states[getState()]!
- return try atn.nextTokens(s)
+ public func getExpectedTokensWithinCurrentRule() -> IntervalSet {
+ let atn = getInterpreter().atn
+ let s = atn.states[getState()]!
+ return atn.nextTokens(s)
}
- /** Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. */
+ /// Get a rule's index (i.e., `RULE_ruleName` field) or -1 if not found.
public func getRuleIndex(_ ruleName: String) -> Int {
- let ruleIndex: Int? = getRuleIndexMap()[ruleName]
- if ruleIndex != nil {
- return ruleIndex!
- }
- return -1
+ return getRuleIndexMap()[ruleName] ?? -1
}
public func getRuleContext() -> ParserRuleContext? {
return _ctx
}
- /** Return List<String> of the rule names in your parser instance
- * leading up to a call to the current rule. You could override if
- * you want more details such as the file/line info of where
- * in the ATN a rule is invoked.
- *
- * This is very useful for error messages.
- */
- public func getRuleInvocationStack() -> Array {
+ /// Return List<String> of the rule names in your parser instance
+ /// leading up to a call to the current rule. You could override if
+ /// you want more details such as the file/line info of where
+ /// in the ATN a rule is invoked.
+ ///
+ /// This is very useful for error messages.
+ ///
+ public func getRuleInvocationStack() -> [String] {
return getRuleInvocationStack(_ctx)
}
- public func getRuleInvocationStack(_ p: RuleContext?) -> Array {
+ public func getRuleInvocationStack(_ p: RuleContext?) -> [String] {
var p = p
- var ruleNames: [String] = getRuleNames()
- var stack: Array = Array()
+ var ruleNames = getRuleNames()
+ var stack = [String]()
while let pWrap = p {
// compute what follows who invoked us
- let ruleIndex: Int = pWrap.getRuleIndex()
+ let ruleIndex = pWrap.getRuleIndex()
if ruleIndex < 0 {
stack.append("n/a")
} else {
@@ -980,17 +961,15 @@ open class Parser: Recognizer {
return stack
}
- /** For debugging and other purposes. */
- public func getDFAStrings() -> Array {
- var s: Array = Array()
+ /// For debugging and other purposes.
+ public func getDFAStrings() -> [String] {
+ var s = [String]()
guard let _interp = _interp else {
return s
}
- decisionToDFAMutex.synchronized {
- [unowned self] in
-
+ decisionToDFAMutex.synchronized { [unowned self] in
for d in 0..<_interp.decisionToDFA.count {
- let dfa: DFA = _interp.decisionToDFA[d]
+ let dfa = _interp.decisionToDFA[d]
s.append(dfa.toString(self.getVocabulary()))
}
@@ -998,17 +977,15 @@ open class Parser: Recognizer {
return s
}
- /** For debugging and other purposes. */
+ /// For debugging and other purposes.
public func dumpDFA() {
- guard let _interp = _interp else {
+ guard let _interp = _interp else {
return
}
- decisionToDFAMutex.synchronized {
- [unowned self] in
- var seenOne: Bool = false
+ decisionToDFAMutex.synchronized { [unowned self] in
+ var seenOne = false
- for d in 0..<_interp.decisionToDFA.count {
- let dfa: DFA = _interp.decisionToDFA[d]
+ for dfa in _interp.decisionToDFA {
if !dfa.states.isEmpty {
if seenOne {
print("")
@@ -1028,36 +1005,35 @@ open class Parser: Recognizer {
override
open func getParseInfo() -> ParseInfo? {
- let interp: ParserATNSimulator? = getInterpreter()
- if interp is ProfilingATNSimulator {
- return ParseInfo(interp as! ProfilingATNSimulator)
+ let interp = getInterpreter()
+ if let interp = interp as? ProfilingATNSimulator {
+ return ParseInfo(interp)
}
return nil
}
- /**
- * @since 4.3
- */
+ ///
+ /// - Since: 4.3
+ ///
public func setProfile(_ profile: Bool) {
- let interp: ParserATNSimulator = getInterpreter()
- let saveMode: PredictionMode = interp.getPredictionMode()
+ let interp = getInterpreter()
+ let saveMode = interp.getPredictionMode()
if profile {
if !(interp is ProfilingATNSimulator) {
setInterpreter(ProfilingATNSimulator(self))
}
} else {
if interp is ProfilingATNSimulator {
- let sim: ParserATNSimulator =
- ParserATNSimulator(self, getATN(), interp.decisionToDFA, interp.getSharedContextCache()!)
+ let sim = ParserATNSimulator(self, getATN(), interp.decisionToDFA, interp.getSharedContextCache()!)
setInterpreter(sim)
}
}
getInterpreter().setPredictionMode(saveMode)
}
- /** During a parse is sometimes useful to listen in on the rule entry and exit
- * events as well as token matches. This is for quick and dirty debugging.
- */
+ /// During a parse is sometimes useful to listen in on the rule entry and exit
+ /// events as well as token matches. This is for quick and dirty debugging.
+ ///
public func setTrace(_ trace: Bool) {
if !trace {
removeParseListener(_tracer)
@@ -1072,12 +1048,12 @@ open class Parser: Recognizer {
}
}
- /**
- * Gets whether a {@link org.antlr.v4.runtime.Parser.TraceListener} is registered as a parse listener
- * for the parser.
- *
- * @see #setTrace(boolean)
- */
+ ///
+ /// Gets whether a _org.antlr.v4.runtime.Parser.TraceListener_ is registered as a parse listener
+ /// for the parser.
+ ///
+ /// - SeeAlso: #setTrace(boolean)
+ ///
public func isTrace() -> Bool {
return _tracer != nil
}
diff --git a/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift b/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift
index 6f136cc68..1e760fc84 100644
--- a/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift
+++ b/runtime/Swift/Sources/Antlr4/ParserInterpreter.swift
@@ -5,26 +5,26 @@
-/** A parser simulator that mimics what ANTLR's generated
- * parser code does. A ParserATNSimulator is used to make
- * predictions via adaptivePredict but this class moves a pointer through the
- * ATN to simulate parsing. ParserATNSimulator just
- * makes us efficient rather than having to backtrack, for example.
- *
- * This properly creates parse trees even for left recursive rules.
- *
- * We rely on the left recursive rule invocation and special predicate
- * transitions to make left recursive rules work.
- *
- * See TestParserInterpreter for examples.
- */
+/// A parser simulator that mimics what ANTLR's generated
+/// parser code does. A ParserATNSimulator is used to make
+/// predictions via adaptivePredict but this class moves a pointer through the
+/// ATN to simulate parsing. ParserATNSimulator just
+/// makes us efficient rather than having to backtrack, for example.
+///
+/// This properly creates parse trees even for left recursive rules.
+///
+/// We rely on the left recursive rule invocation and special predicate
+/// transitions to make left recursive rules work.
+///
+/// See TestParserInterpreter for examples.
+///
public class ParserInterpreter: Parser {
internal final var grammarFileName: String
internal final var atn: ATN
- /** This identifies StarLoopEntryState's that begin the (...)*
- * precedence loops of left recursive rules.
- */
+ /// This identifies StarLoopEntryState's that begin the (...)*
+ /// precedence loops of left recursive rules.
+ ///
internal final var statesNeedingLeftRecursionContext: BitSet
internal final var decisionToDFA: [DFA]
@@ -32,37 +32,34 @@ public class ParserInterpreter: Parser {
internal final var sharedContextCache: PredictionContextCache =
PredictionContextCache()
- ////@Deprecated
- internal final var tokenNames: [String]
internal final var ruleNames: [String]
private final var vocabulary: Vocabulary
- /** Tracks LR rules for adjusting the contexts */
+ /// Tracks LR rules for adjusting the contexts
internal final var _parentContextStack: Array<(ParserRuleContext?, Int)> =
Array<(ParserRuleContext?, Int)>()
- /** We need a map from (decision,inputIndex)->forced alt for computing ambiguous
- * parse trees. For now, we allow exactly one override.
- */
+ /// We need a map from (decision,inputIndex)->forced alt for computing ambiguous
+ /// parse trees. For now, we allow exactly one override.
+ ///
internal var overrideDecision: Int = -1
internal var overrideDecisionInputIndex: Int = -1
internal var overrideDecisionAlt: Int = -1
- /** A copy constructor that creates a new parser interpreter by reusing
- * the fields of a previous interpreter.
- *
- * @since 4.5.1
- *
- * @param old The interpreter to copy
- */
+ /// A copy constructor that creates a new parser interpreter by reusing
+ /// the fields of a previous interpreter.
+ ///
+ /// - Since: 4.5.1
+ ///
+ /// - Parameter old: The interpreter to copy
+ ///
public init(_ old: ParserInterpreter) throws {
self.atn = old.atn
self.grammarFileName = old.grammarFileName
self.statesNeedingLeftRecursionContext = old.statesNeedingLeftRecursionContext
self.decisionToDFA = old.decisionToDFA
- self.tokenNames = old.tokenNames
self.ruleNames = old.ruleNames
self.vocabulary = old.vocabulary
try super.init(old.getTokenStream()!)
@@ -71,26 +68,11 @@ public class ParserInterpreter: Parser {
sharedContextCache))
}
- /**
- * @deprecated Use {@link #ParserInterpreter(String, org.antlr.v4.runtime.Vocabulary, java.util.Collection, org.antlr.v4.runtime.atn.ATN, org.antlr.v4.runtime.TokenStream)} instead.
- */
- //@Deprecated
- public convenience init(_ grammarFileName: String, _ tokenNames: Array?,
- _ ruleNames: Array, _ atn: ATN, _ input: TokenStream) throws {
- try self.init(grammarFileName, Vocabulary.fromTokenNames(tokenNames), ruleNames, atn, input)
- }
-
public init(_ grammarFileName: String, _ vocabulary: Vocabulary,
_ ruleNames: Array, _ atn: ATN, _ input: TokenStream) throws {
self.grammarFileName = grammarFileName
self.atn = atn
- self.tokenNames = [String]()// new String[atn.maxTokenType];
- let length = tokenNames.count
- for i in 0.. [String] {
- return tokenNames
- }
-
override
public func getVocabulary() -> Vocabulary {
return vocabulary
@@ -142,19 +118,19 @@ public class ParserInterpreter: Parser {
return grammarFileName
}
- /** Begin parsing at startRuleIndex */
+ /// Begin parsing at startRuleIndex
public func parse(_ startRuleIndex: Int) throws -> ParserRuleContext {
- let startRuleStartState: RuleStartState = atn.ruleToStartState[startRuleIndex]
+ let startRuleStartState = atn.ruleToStartState[startRuleIndex]
- let rootContext: InterpreterRuleContext = InterpreterRuleContext(nil, ATNState.INVALID_STATE_NUMBER, startRuleIndex)
+ let rootContext = InterpreterRuleContext(nil, ATNState.INVALID_STATE_NUMBER, startRuleIndex)
if startRuleStartState.isPrecedenceRule {
- try enterRecursionRule(rootContext, startRuleStartState.stateNumber, startRuleIndex, 0)
+ try enterRecursionRule(rootContext, startRuleStartState.stateNumber, startRuleIndex, 0)
} else {
try enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex)
}
while true {
- let p: ATNState = getATNState()!
+ let p = getATNState()!
switch p.getStateType() {
case ATNState.RULE_STOP:
// pop; return from rule
@@ -204,7 +180,7 @@ public class ParserInterpreter: Parser {
var altNum: Int
if p.getNumberOfTransitions() > 1 {
try getErrorHandler().sync(self)
- let decision: Int = (p as! DecisionState).decision
+ let decision = (p as! DecisionState).decision
if decision == overrideDecision && _input.index() == overrideDecisionInputIndex {
altNum = overrideDecisionAlt
} else {
@@ -214,7 +190,7 @@ public class ParserInterpreter: Parser {
altNum = 1
}
- let transition: Transition = p.transition(altNum - 1)
+ let transition = p.transition(altNum - 1)
switch transition.getSerializationType() {
case Transition.EPSILON:
if try statesNeedingLeftRecursionContext.get(p.stateNumber) &&
@@ -248,9 +224,9 @@ public class ParserInterpreter: Parser {
break
case Transition.RULE:
- let ruleStartState: RuleStartState = transition.target as! RuleStartState
- let ruleIndex: Int = ruleStartState.ruleIndex!
- let ctx: InterpreterRuleContext = InterpreterRuleContext(_ctx, p.stateNumber, ruleIndex)
+ let ruleStartState = transition.target as! RuleStartState
+ let ruleIndex = ruleStartState.ruleIndex!
+ let ctx = InterpreterRuleContext(_ctx, p.stateNumber, ruleIndex)
if ruleStartState.isPrecedenceRule {
try enterRecursionRule(ctx, ruleStartState.stateNumber, ruleIndex, (transition as! RuleTransition).precedence)
} else {
@@ -259,25 +235,20 @@ public class ParserInterpreter: Parser {
break
case Transition.PREDICATE:
- let predicateTransition: PredicateTransition = transition as! PredicateTransition
+ let predicateTransition = transition as! PredicateTransition
if try !sempred(_ctx!, predicateTransition.ruleIndex, predicateTransition.predIndex) {
-
- throw try ANTLRException.recognition(e: FailedPredicateException(self))
-
+ throw ANTLRException.recognition(e: FailedPredicateException(self))
}
-
break
case Transition.ACTION:
- let actionTransition: ActionTransition = transition as! ActionTransition
+ let actionTransition = transition as! ActionTransition
try action(_ctx, actionTransition.ruleIndex, actionTransition.actionIndex)
break
case Transition.PRECEDENCE:
if !precpred(_ctx!, (transition as! PrecedencePredicateTransition).precedence) {
-
- throw try ANTLRException.recognition(e: FailedPredicateException(self, "precpred(_ctx,\((transition as! PrecedencePredicateTransition).precedence))"))
-
+ throw ANTLRException.recognition(e: FailedPredicateException(self, "precpred(_ctx,\((transition as! PrecedencePredicateTransition).precedence))"))
}
break
@@ -290,59 +261,59 @@ public class ParserInterpreter: Parser {
}
internal func visitRuleStopState(_ p: ATNState) throws {
- let ruleStartState: RuleStartState = atn.ruleToStartState[p.ruleIndex!]
+ let ruleStartState = atn.ruleToStartState[p.ruleIndex!]
if ruleStartState.isPrecedenceRule {
- let parentContext: (ParserRuleContext?, Int) = _parentContextStack.pop()
- try unrollRecursionContexts(parentContext.0!)
- setState(parentContext.1)
+ let (parentContext, parentState) = _parentContextStack.pop()
+ try unrollRecursionContexts(parentContext!)
+ setState(parentState)
} else {
try exitRule()
}
- let ruleTransition: RuleTransition = atn.states[getState()]!.transition(0) as! RuleTransition
+ let ruleTransition = atn.states[getState()]!.transition(0) as! RuleTransition
setState(ruleTransition.followState.stateNumber)
}
- /** Override this parser interpreters normal decision-making process
- * at a particular decision and input token index. Instead of
- * allowing the adaptive prediction mechanism to choose the
- * first alternative within a block that leads to a successful parse,
- * force it to take the alternative, 1..n for n alternatives.
- *
- * As an implementation limitation right now, you can only specify one
- * override. This is sufficient to allow construction of different
- * parse trees for ambiguous input. It means re-parsing the entire input
- * in general because you're never sure where an ambiguous sequence would
- * live in the various parse trees. For example, in one interpretation,
- * an ambiguous input sequence would be matched completely in expression
- * but in another it could match all the way back to the root.
- *
- * s : e '!'? ;
- * e : ID
- * | ID '!'
- * ;
- *
- * Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
- * case, the ambiguous sequence is fully contained only by the root.
- * In the second case, the ambiguous sequences fully contained within just
- * e, as in: (e ID !).
- *
- * Rather than trying to optimize this and make
- * some intelligent decisions for optimization purposes, I settled on
- * just re-parsing the whole input and then using
- * {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
- * subtree that contains the ambiguous sequence. I originally tried to
- * record the call stack at the point the parser detected and ambiguity but
- * left recursive rules create a parse tree stack that does not reflect
- * the actual call stack. That impedance mismatch was enough to make
- * it it challenging to restart the parser at a deeply nested rule
- * invocation.
- *
- * Only parser interpreters can override decisions so as to avoid inserting
- * override checking code in the critical ALL(*) prediction execution path.
- *
- * @since 4.5.1
- */
+ /// Override this parser interpreters normal decision-making process
+ /// at a particular decision and input token index. Instead of
+ /// allowing the adaptive prediction mechanism to choose the
+ /// first alternative within a block that leads to a successful parse,
+ /// force it to take the alternative, 1..n for n alternatives.
+ ///
+ /// As an implementation limitation right now, you can only specify one
+ /// override. This is sufficient to allow construction of different
+ /// parse trees for ambiguous input. It means re-parsing the entire input
+ /// in general because you're never sure where an ambiguous sequence would
+ /// live in the various parse trees. For example, in one interpretation,
+ /// an ambiguous input sequence would be matched completely in expression
+ /// but in another it could match all the way back to the root.
+ ///
+ /// s : e '!'? ;
+ /// e : ID
+ /// | ID '!'
+ /// ;
+ ///
+ /// Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
+ /// case, the ambiguous sequence is fully contained only by the root.
+ /// In the second case, the ambiguous sequences fully contained within just
+ /// e, as in: (e ID !).
+ ///
+ /// Rather than trying to optimize this and make
+ /// some intelligent decisions for optimization purposes, I settled on
+ /// just re-parsing the whole input and then using
+ /// {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
+ /// subtree that contains the ambiguous sequence. I originally tried to
+ /// record the call stack at the point the parser detected and ambiguity but
+ /// left recursive rules create a parse tree stack that does not reflect
+ /// the actual call stack. That impedance mismatch was enough to make
+ /// it it challenging to restart the parser at a deeply nested rule
+ /// invocation.
+ ///
+ /// Only parser interpreters can override decisions so as to avoid inserting
+ /// override checking code in the critical ALL(*) prediction execution path.
+ ///
+ /// - Since: 4.5.1
+ ///
public func addDecisionOverride(_ decision: Int, _ tokenIndex: Int, _ forcedAlt: Int) {
overrideDecision = decision
overrideDecisionInputIndex = tokenIndex
diff --git a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift
index 532018058..deabbc0bd 100644
--- a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift
+++ b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift
@@ -4,83 +4,79 @@
*/
-/** A rule invocation record for parsing.
- *
- * Contains all of the information about the current rule not stored in the
- * RuleContext. It handles parse tree children list, Any ATN state
- * tracing, and the default values available for rule invocations:
- * start, stop, rule index, current alt number.
- *
- * Subclasses made for each rule and grammar track the parameters,
- * return values, locals, and labels specific to that rule. These
- * are the objects that are returned from rules.
- *
- * Note text is not an actual field of a rule return value; it is computed
- * from start and stop using the input stream's toString() method. I
- * could add a ctor to this so that we can pass in and store the input
- * stream, but I'm not sure we want to do that. It would seem to be undefined
- * to get the .text property anyway if the rule matches tokens from multiple
- * input streams.
- *
- * I do not use getters for fields of objects that are used simply to
- * group values such as this aggregate. The getters/setters are there to
- * satisfy the superclass interface.
- */
-
+/// A rule invocation record for parsing.
+///
+/// Contains all of the information about the current rule not stored in the
+/// RuleContext. It handles parse tree children list, Any ATN state
+/// tracing, and the default values available for rule invocations:
+/// start, stop, rule index, current alt number.
+///
+/// Subclasses made for each rule and grammar track the parameters,
+/// return values, locals, and labels specific to that rule. These
+/// are the objects that are returned from rules.
+///
+/// Note text is not an actual field of a rule return value; it is computed
+/// from start and stop using the input stream's toString() method. I
+/// could add a ctor to this so that we can pass in and store the input
+/// stream, but I'm not sure we want to do that. It would seem to be undefined
+/// to get the .text property anyway if the rule matches tokens from multiple
+/// input streams.
+///
+/// I do not use getters for fields of objects that are used simply to
+/// group values such as this aggregate. The getters/setters are there to
+/// satisfy the superclass interface.
+///
open class ParserRuleContext: RuleContext {
public var visited = false
- /** If we are debugging or building a parse tree for a visitor,
- * we need to track all of the tokens and rule invocations associated
- * with this rule's context. This is empty for parsing w/o tree constr.
- * operation because we don't the need to track the details about
- * how we parse this rule.
- */
+ /// If we are debugging or building a parse tree for a visitor,
+ /// we need to track all of the tokens and rule invocations associated
+ /// with this rule's context. This is empty for parsing w/o tree constr.
+ /// operation because we don't the need to track the details about
+ /// how we parse this rule.
+ ///
public var children: Array?
- /** For debugging/tracing purposes, we want to track all of the nodes in
- * the ATN traversed by the parser for a particular rule.
- * This list indicates the sequence of ATN nodes used to match
- * the elements of the children list. This list does not include
- * ATN nodes and other rules used to match rule invocations. It
- * traces the rule invocation node itself but nothing inside that
- * other rule's ATN submachine.
- *
- * There is NOT a one-to-one correspondence between the children and
- * states list. There are typically many nodes in the ATN traversed
- * for each element in the children list. For example, for a rule
- * invocation there is the invoking state and the following state.
- *
- * The parser setState() method updates field s and adds it to this list
- * if we are debugging/tracing.
- *
- * This does not trace states visited during prediction.
- */
-// public List states;
-
+ /// For debugging/tracing purposes, we want to track all of the nodes in
+ /// the ATN traversed by the parser for a particular rule.
+ /// This list indicates the sequence of ATN nodes used to match
+ /// the elements of the children list. This list does not include
+ /// ATN nodes and other rules used to match rule invocations. It
+ /// traces the rule invocation node itself but nothing inside that
+ /// other rule's ATN submachine.
+ ///
+ /// There is NOT a one-to-one correspondence between the children and
+ /// states list. There are typically many nodes in the ATN traversed
+ /// for each element in the children list. For example, for a rule
+ /// invocation there is the invoking state and the following state.
+ ///
+ /// The parser setState() method updates field s and adds it to this list
+ /// if we are debugging/tracing.
+ ///
+ /// This does not trace states visited during prediction.
+ ///
public var start: Token?, stop: Token?
- /**
- * The exception that forced this rule to return. If the rule successfully
- * completed, this is {@code null}.
- */
+ ///
+ /// The exception that forced this rule to return. If the rule successfully
+ /// completed, this is `null`.
+ ///
public var exception: AnyObject!
- //RecognitionException!;
public override init() {
super.init()
}
- /** COPY a ctx (I'm deliberately not using copy constructor) to avoid
- * confusion with creating node with parent. Does not copy children.
- *
- * This is used in the generated parser code to flip a generic XContext
- * node for rule X to a YContext for alt label Y. In that sense, it is
- * not really a generic copy function.
- *
- * If we do an error sync() at start of a rule, we might add error nodes
- * to the generic XContext so this function must copy those nodes to
- * the YContext as well else they are lost!
- */
+ /// COPY a ctx (I'm deliberately not using copy constructor) to avoid
+ /// confusion with creating node with parent. Does not copy children.
+ ///
+ /// This is used in the generated parser code to flip a generic XContext
+ /// node for rule X to a YContext for alt label Y. In that sense, it is
+ /// not really a generic copy function.
+ ///
+ /// If we do an error sync() at start of a rule, we might add error nodes
+ /// to the generic XContext so this function must copy those nodes to
+ /// the YContext as well else they are lost!
+ ///
open func copyFrom(_ ctx: ParserRuleContext) {
self.parent = ctx.parent
self.invokingState = ctx.invokingState
@@ -112,17 +108,17 @@ open class ParserRuleContext: RuleContext {
open func exitRule(_ listener: ParseTreeListener) {
}
- /** Add a parse tree node to this as a child. Works for
- * internal and leaf nodes. Does not set parent link;
- * other add methods must do that. Other addChild methods
- * call this.
- *
- * We cannot set the parent pointer of the incoming node
- * because the existing interfaces do not have a setParent()
- * method and I don't want to break backward compatibility for this.
- *
- * @since 4.7
- */
+ /// Add a parse tree node to this as a child. Works for
+ /// internal and leaf nodes. Does not set parent link;
+ /// other add methods must do that. Other addChild methods
+ /// call this.
+ ///
+ /// We cannot set the parent pointer of the incoming node
+ /// because the existing interfaces do not have a setParent()
+ /// method and I don't want to break backward compatibility for this.
+ ///
+ /// - Since: 4.7
+ ///
@discardableResult
open func addAnyChild(_ t: T) -> T {
if children == nil {
@@ -137,28 +133,28 @@ open class ParserRuleContext: RuleContext {
return addAnyChild(ruleInvocation)
}
- /** Add a token leaf node child and force its parent to be this node. */
+ /// Add a token leaf node child and force its parent to be this node.
@discardableResult
open func addChild(_ t: TerminalNode) -> TerminalNode {
t.setParent(self)
return addAnyChild(t)
}
- /** Add an error node child and force its parent to be this node.
- *
- * @since 4.7
- */
+ /// Add an error node child and force its parent to be this node.
+ ///
+ /// - Since: 4.7
+ ///
@discardableResult
open func addErrorNode(_ errorNode: ErrorNode) -> ErrorNode {
errorNode.setParent(self)
return addAnyChild(errorNode)
}
- /** Add a child to this node based upon matchedToken. It
- * creates a TerminalNodeImpl rather than using
- * {@link Parser#createTerminalNode(ParserRuleContext, Token)}. I'm leaving this
- * in for compatibility but the parser doesn't use this anymore.
- */
+ /// Add a child to this node based upon matchedToken. It
+ /// creates a TerminalNodeImpl rather than using
+ /// _Parser#createTerminalNode(ParserRuleContext, Token)_. I'm leaving this
+ /// in for compatibility but the parser doesn't use this anymore.
+ ///
@available(*, deprecated)
open func addChild(_ matchedToken: Token) -> TerminalNode {
let t: TerminalNodeImpl = TerminalNodeImpl(matchedToken)
@@ -167,11 +163,11 @@ open class ParserRuleContext: RuleContext {
return t
}
- /** Add a child to this node based upon badToken. It
- * creates a ErrorNodeImpl rather than using
- * {@link Parser#createErrorNode(ParserRuleContext, Token)}. I'm leaving this
- * in for compatibility but the parser doesn't use this anymore.
- */
+ /// Add a child to this node based upon badToken. It
+ /// creates a ErrorNodeImpl rather than using
+ /// _Parser#createErrorNode(ParserRuleContext, Token)_. I'm leaving this
+ /// in for compatibility but the parser doesn't use this anymore.
+ ///
@discardableResult
@available(*, deprecated)
open func addErrorNode(_ badToken: Token) -> ErrorNode {
@@ -186,10 +182,10 @@ open class ParserRuleContext: RuleContext {
// states.add(s);
// }
- /** Used by enterOuterAlt to toss out a RuleContext previously added as
- * we entered a rule. If we have # label, we will need to remove
- * generic ruleContext object.
- */
+ /// Used by enterOuterAlt to toss out a RuleContext previously added as
+ /// we entered a rule. If we have # label, we will need to remove
+ /// generic ruleContext object.
+ ///
open func removeLastChild() {
if children != nil {
children!.remove(at: children!.count-1)
@@ -198,7 +194,9 @@ open class ParserRuleContext: RuleContext {
override
- /** Override to make type more specific */
+ ///
+ /// Override to make type more specific
+ ///
open func getParent() -> Tree? {
return super.getParent()
}
@@ -307,24 +305,24 @@ open class ParserRuleContext: RuleContext {
return Interval.of(start.getTokenIndex(), stop.getTokenIndex())
}
- /**
- * Get the initial token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do not consume anything
- * (for example, zero length or error productions) this token may exceed stop.
- */
+ ///
+ /// Get the initial token in this context.
+ /// Note that the range from start to stop is inclusive, so for rules that do not consume anything
+ /// (for example, zero length or error productions) this token may exceed stop.
+ ///
open func getStart() -> Token? {
return start
}
- /**
- * Get the final token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do not consume anything
- * (for example, zero length or error productions) this token may precede start.
- */
+ ///
+ /// Get the final token in this context.
+ /// Note that the range from start to stop is inclusive, so for rules that do not consume anything
+ /// (for example, zero length or error productions) this token may precede start.
+ ///
open func getStop() -> Token? {
return stop
}
- /** Used for rule context info debugging during parse-time, not so much for ATN debugging */
+ /// Used for rule context info debugging during parse-time, not so much for ATN debugging
open func toInfoString(_ recognizer: Parser) -> String {
var rules: Array = recognizer.getRuleInvocationStack(self)
// Collections.reverse(rules);
diff --git a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift
index 53f172b13..17d6b21b7 100644
--- a/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift
+++ b/runtime/Swift/Sources/Antlr4/ProxyErrorListener.swift
@@ -4,31 +4,29 @@
*/
-/**
- * This implementation of {@link org.antlr.v4.runtime.ANTLRErrorListener} dispatches all calls to a
- * collection of delegate listeners. This reduces the effort required to support multiple
- * listeners.
- *
- * @author Sam Harwell
- */
+///
+/// This implementation of _org.antlr.v4.runtime.ANTLRErrorListener_ dispatches all calls to a
+/// collection of delegate listeners. This reduces the effort required to support multiple
+/// listeners.
+///
+/// - Author: Sam Harwell
+///
public class ProxyErrorListener: ANTLRErrorListener {
- private final var delegates: Array
-
- public init(_ delegates: Array) {
+ private final var delegates: [ANTLRErrorListener]
+ public init(_ delegates: [ANTLRErrorListener]) {
self.delegates = delegates
}
- //_ e : RecognitionException
- public func syntaxError(_ recognizer: Recognizer,
- _ offendingSymbol: AnyObject?,
- _ line: Int,
- _ charPositionInLine: Int,
- _ msg: String,
- _ e: AnyObject?)
- {
- for listener: ANTLRErrorListener in delegates {
+ public func syntaxError(_ recognizer: Recognizer,
+ _ offendingSymbol: AnyObject?,
+ _ line: Int,
+ _ charPositionInLine: Int,
+ _ msg: String,
+ _ e: AnyObject?)
+ {
+ for listener in delegates {
listener.syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e)
}
}
@@ -40,9 +38,9 @@ public class ProxyErrorListener: ANTLRErrorListener {
_ stopIndex: Int,
_ exact: Bool,
_ ambigAlts: BitSet,
- _ configs: ATNConfigSet) throws {
- for listener: ANTLRErrorListener in delegates {
- try listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ _ configs: ATNConfigSet) {
+ for listener in delegates {
+ listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
@@ -52,9 +50,9 @@ public class ProxyErrorListener: ANTLRErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ conflictingAlts: BitSet?,
- _ configs: ATNConfigSet) throws {
- for listener: ANTLRErrorListener in delegates {
- try listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ _ configs: ATNConfigSet) {
+ for listener in delegates {
+ listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
@@ -64,9 +62,9 @@ public class ProxyErrorListener: ANTLRErrorListener {
_ startIndex: Int,
_ stopIndex: Int,
_ prediction: Int,
- _ configs: ATNConfigSet) throws {
- for listener: ANTLRErrorListener in delegates {
- try listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ _ configs: ATNConfigSet) {
+ for listener in delegates {
+ listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
}
}
diff --git a/runtime/Swift/Sources/Antlr4/RecognitionException.swift b/runtime/Swift/Sources/Antlr4/RecognitionException.swift
index ea3348b6a..49a8807f9 100644
--- a/runtime/Swift/Sources/Antlr4/RecognitionException.swift
+++ b/runtime/Swift/Sources/Antlr4/RecognitionException.swift
@@ -4,65 +4,56 @@
*/
-/** The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
- * 3 kinds of errors: prediction errors, failed predicate errors, and
- * mismatched input errors. In each case, the parser knows where it is
- * in the input, where it is in the ATN, the rule invocation stack,
- * and what kind of problem occurred.
- */
+/// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+/// 3 kinds of errors: prediction errors, failed predicate errors, and
+/// mismatched input errors. In each case, the parser knows where it is
+/// in the input, where it is in the ATN, the rule invocation stack,
+/// and what kind of problem occurred.
+///
-public class RecognitionException {
- /** The {@link org.antlr.v4.runtime.Recognizer} where this exception originated. */
- private final var recognizer: Recognizer?
- //Recognizer? ;
+public class RecognitionException {
+ ///
+ /// The _org.antlr.v4.runtime.Recognizer_ where this exception originated.
+ ///
+ private final var recognizer: RecognizerProtocol?
- private final var ctx: RuleContext?
+ private final weak var ctx: RuleContext?
- private final var input: IntStream
+ private final var input: IntStream?
- /**
- * The current {@link org.antlr.v4.runtime.Token} when an error occurred. Since not all streams
- * support accessing symbols by index, we have to track the {@link org.antlr.v4.runtime.Token}
- * instance itself.
- */
+ ///
+ /// The current _org.antlr.v4.runtime.Token_ when an error occurred. Since not all streams
+ /// support accessing symbols by index, we have to track the _org.antlr.v4.runtime.Token_
+ /// instance itself.
+ ///
private var offendingToken: Token!
- private var offendingState: Int = -1
+ private var offendingState = -1
public var message: String?
- public init(_ recognizer: Recognizer?,
+
+ public init(_ recognizer: RecognizerProtocol?,
_ input: IntStream,
- _ ctx: ParserRuleContext?) {
+ _ ctx: ParserRuleContext? = nil,
+ _ message: String? = nil) {
self.recognizer = recognizer
self.input = input
self.ctx = ctx
- if let recognizer = recognizer {
- self.offendingState = recognizer.getState()
- }
- }
-
- public init(_ message: String,
- _ recognizer: Recognizer?,
- _ input: IntStream,
- _ ctx: ParserRuleContext?) {
self.message = message
- self.recognizer = recognizer
- self.input = input
- self.ctx = ctx
if let recognizer = recognizer {
self.offendingState = recognizer.getState()
}
}
- /**
- * Get the ATN state number the parser was in at the time the error
- * occurred. For {@link org.antlr.v4.runtime.NoViableAltException} and
- * {@link org.antlr.v4.runtime.LexerNoViableAltException} exceptions, this is the
- * {@link org.antlr.v4.runtime.atn.DecisionState} number. For others, it is the state whose outgoing
- * edge we couldn't match.
- *
- * If the state number is not known, this method returns -1.
- */
+ ///
+ /// Get the ATN state number the parser was in at the time the error
+ /// occurred. For _org.antlr.v4.runtime.NoViableAltException_ and
+ /// _org.antlr.v4.runtime.LexerNoViableAltException_ exceptions, this is the
+ /// _org.antlr.v4.runtime.atn.DecisionState_ number. For others, it is the state whose outgoing
+ /// edge we couldn't match.
+ ///
+ /// If the state number is not known, this method returns -1.
+ ///
public func getOffendingState() -> Int {
return offendingState
}
@@ -71,50 +62,52 @@ public class RecognitionException {
self.offendingState = offendingState
}
- /**
- * Gets the set of input symbols which could potentially follow the
- * previously matched symbol at the time this exception was thrown.
- *
- * If the set of expected tokens is not known and could not be computed,
- * this method returns {@code null}.
- *
- * @return The set of token types that could potentially follow the current
- * state in the ATN, or {@code null} if the information is not available.
- */
+ ///
+ /// Gets the set of input symbols which could potentially follow the
+ /// previously matched symbol at the time this exception was thrown.
+ ///
+ /// If the set of expected tokens is not known and could not be computed,
+ /// this method returns `null`.
+ ///
+ /// - Returns: The set of token types that could potentially follow the current
+ /// state in the ATN, or `null` if the information is not available.
+ ///
public func getExpectedTokens() -> IntervalSet? {
if let recognizer = recognizer {
return try? recognizer.getATN().getExpectedTokens(offendingState, ctx!)
}
-
return nil
}
- /**
- * Gets the {@link org.antlr.v4.runtime.RuleContext} at the time this exception was thrown.
- *
- * If the context is not available, this method returns {@code null}.
- *
- * @return The {@link org.antlr.v4.runtime.RuleContext} at the time this exception was thrown.
- * If the context is not available, this method returns {@code null}.
- */
+ ///
+ /// Gets the _org.antlr.v4.runtime.RuleContext_ at the time this exception was thrown.
+ ///
+ /// If the context is not available, this method returns `null`.
+ ///
+ /// - Returns: The _org.antlr.v4.runtime.RuleContext_ at the time this exception was thrown.
+ /// If the context is not available, this method returns `null`.
+ ///
public func getCtx() -> RuleContext? {
return ctx
}
- /**
- * Gets the input stream which is the symbol source for the recognizer where
- * this exception was thrown.
- *
- * If the input stream is not available, this method returns {@code null}.
- *
- * @return The input stream which is the symbol source for the recognizer
- * where this exception was thrown, or {@code null} if the stream is not
- * available.
- */
- public func getInputStream() -> IntStream {
+ ///
+ /// Gets the input stream which is the symbol source for the recognizer where
+ /// this exception was thrown.
+ ///
+ /// If the input stream is not available, this method returns `null`.
+ ///
+ /// - Returns: The input stream which is the symbol source for the recognizer
+ /// where this exception was thrown, or `null` if the stream is not
+ /// available.
+ ///
+ public func getInputStream() -> IntStream? {
return input
}
+ public func clearInputStream() {
+ input = nil
+ }
public func getOffendingToken() -> Token {
return offendingToken
@@ -124,15 +117,19 @@ public class RecognitionException {
self.offendingToken = offendingToken
}
- /**
- * Gets the {@link org.antlr.v4.runtime.Recognizer} where this exception occurred.
- *
- * If the recognizer is not available, this method returns {@code null}.
- *
- * @return The recognizer where this exception occurred, or {@code null} if
- * the recognizer is not available.
- */
- public func getRecognizer() -> Recognizer? {
+ ///
+ /// Gets the _org.antlr.v4.runtime.Recognizer_ where this exception occurred.
+ ///
+ /// If the recognizer is not available, this method returns `null`.
+ ///
+ /// - Returns: The recognizer where this exception occurred, or `null` if
+ /// the recognizer is not available.
+ ///
+ public func getRecognizer() -> RecognizerProtocol? {
return recognizer
}
+
+ public func clearRecognizer() {
+ self.recognizer = nil
+ }
}
diff --git a/runtime/Swift/Sources/Antlr4/Recognizer.swift b/runtime/Swift/Sources/Antlr4/Recognizer.swift
index 525e46756..6b3c60088 100644
--- a/runtime/Swift/Sources/Antlr4/Recognizer.swift
+++ b/runtime/Swift/Sources/Antlr4/Recognizer.swift
@@ -5,227 +5,172 @@
import Foundation
-open class Recognizer {
- //public static let EOF: Int = -1
+
+public protocol RecognizerProtocol {
+ func getATN() -> ATN
+ func getGrammarFileName() -> String
+ func getParseInfo() -> ParseInfo?
+ func getRuleNames() -> [String]
+ func getSerializedATN() -> String
+ func getState() -> Int
+ func getTokenType(_ tokenName: String) -> Int
+ func getVocabulary() -> Vocabulary
+}
+
+
+open class Recognizer: RecognizerProtocol {
//TODO: WeakKeyDictionary NSMapTable Dictionary MapTable>
- private let tokenTypeMapCache = HashMap>()
+ private let tokenTypeMapCache = HashMap()
- private let ruleIndexMapCache = HashMap,Dictionary>()
-
-
- private var _listeners: Array = [ConsoleErrorListener.INSTANCE]
+ private let ruleIndexMapCache = HashMap, [String : Int]>()
+ private var _listeners: [ANTLRErrorListener] = [ConsoleErrorListener.INSTANCE]
public var _interp: ATNInterpreter!
- private var _stateNumber: Int = -1
+ private var _stateNumber = -1
+ ///
/// mutex for tokenTypeMapCache updates
- private var tokenTypeMapCacheMutex = Mutex()
+ ///
+ private let tokenTypeMapCacheMutex = Mutex()
+ ///
/// mutex for ruleIndexMapCacheMutex updates
- private var ruleIndexMapCacheMutex = Mutex()
-
- /** Used to print out token names like ID during debugging and
- * error reporting. The generated parsers implement a method
- * that overrides this to point to their String[] tokenNames.
- *
- * @deprecated Use {@link #getVocabulary()} instead.
- */
- ////@Deprecated
- open func getTokenNames() -> [String?]? {
- RuntimeException(#function + " must be overridden")
- return []
- }
+ ///
+ private let ruleIndexMapCacheMutex = Mutex()
open func getRuleNames() -> [String] {
- RuntimeException(#function + " must be overridden")
- return []
+ fatalError(#function + " must be overridden")
}
-
- /**
- * Get the vocabulary used by the recognizer.
- *
- * @return A {@link org.antlr.v4.runtime.Vocabulary} instance providing information about the
- * vocabulary used by the grammar.
- */
-
+ ///
+ /// Get the vocabulary used by the recognizer.
+ ///
+ /// - Returns: A _org.antlr.v4.runtime.Vocabulary_ instance providing information about the
+ /// vocabulary used by the grammar.
+ ///
open func getVocabulary() -> Vocabulary {
- return Vocabulary.fromTokenNames(getTokenNames())
+ fatalError(#function + " must be overridden")
}
- /**
- * Get a map from token names to token types.
- *
- * Used for XPath and tree pattern compilation.
- */
- public func getTokenTypeMap() -> Dictionary {
- let vocabulary: Vocabulary = getVocabulary()
- var result: Dictionary? = self.tokenTypeMapCache[vocabulary]
- tokenTypeMapCacheMutex.synchronized {
- [unowned self] in
+ ///
+ /// Get a map from token names to token types.
+ ///
+ /// Used for XPath and tree pattern compilation.
+ ///
+ public func getTokenTypeMap() -> [String : Int] {
+ let vocabulary = getVocabulary()
+ var result = tokenTypeMapCache[vocabulary]
+ tokenTypeMapCacheMutex.synchronized { [unowned self] in
if result == nil {
- result = Dictionary()
+ result = [String : Int]()
let length = self.getATN().maxTokenType
for i in 0...length {
- let literalName: String? = vocabulary.getLiteralName(i)
- if literalName != nil {
- result![literalName!] = i
+ if let literalName = vocabulary.getLiteralName(i) {
+ result![literalName] = i
}
- let symbolicName: String? = vocabulary.getSymbolicName(i)
- if symbolicName != nil {
- result![symbolicName!] = i
+ if let symbolicName = vocabulary.getSymbolicName(i) {
+ result![symbolicName] = i
}
}
result!["EOF"] = CommonToken.EOF
- //TODO Result Collections.unmodifiableMap
-
self.tokenTypeMapCache[vocabulary] = result!
}
}
return result!
-
}
- /**
- * Get a map from rule names to rule indexes.
- *
- * Used for XPath and tree pattern compilation.
- */
- public func getRuleIndexMap() -> Dictionary {
- let ruleNames: [String] = getRuleNames()
+ ///
+ /// Get a map from rule names to rule indexes.
+ ///
+ /// Used for XPath and tree pattern compilation.
+ ///
+ public func getRuleIndexMap() -> [String : Int] {
+ let ruleNames = getRuleNames()
- let result: Dictionary? = self.ruleIndexMapCache[ArrayWrapper(ruleNames)]
- ruleIndexMapCacheMutex.synchronized {
- [unowned self] in
+ let result = ruleIndexMapCache[ArrayWrapper(ruleNames)]
+ ruleIndexMapCacheMutex.synchronized { [unowned self] in
if result == nil {
self.ruleIndexMapCache[ArrayWrapper(ruleNames)] = Utils.toMap(ruleNames)
}
}
return result!
-
}
public func getTokenType(_ tokenName: String) -> Int {
- let ttype: Int? = getTokenTypeMap()[tokenName]
- if ttype != nil {
- return ttype!
- }
- return CommonToken.INVALID_TYPE
+ return getTokenTypeMap()[tokenName] ?? CommonToken.INVALID_TYPE
}
- /**
- * If this recognizer was generated, it will have a serialized ATN
- * representation of the grammar.
- *
- * For interpreters, we don't know their serialized ATN despite having
- * created the interpreter from it.
- */
+ ///
+ /// If this recognizer was generated, it will have a serialized ATN
+ /// representation of the grammar.
+ ///
+ /// For interpreters, we don't know their serialized ATN despite having
+ /// created the interpreter from it.
+ ///
open func getSerializedATN() -> String {
- RuntimeException("there is no serialized ATN")
- fatalError()
- ///throw ANTLRError.UnsupportedOperation /* throw UnsupportedOperationException("there is no /serialized ATN"); */
+ fatalError("there is no serialized ATN")
}
- /** For debugging and other purposes, might want the grammar name.
- * Have ANTLR generate an implementation for this method.
- */
+ /// For debugging and other purposes, might want the grammar name.
+ /// Have ANTLR generate an implementation for this method.
+ ///
open func getGrammarFileName() -> String {
- RuntimeException(#function + " must be overridden")
- return ""
+ fatalError(#function + " must be overridden")
}
- /**
- * Get the {@link org.antlr.v4.runtime.atn.ATN} used by the recognizer for prediction.
- *
- * @return The {@link org.antlr.v4.runtime.atn.ATN} used by the recognizer for prediction.
- */
+ ///
+ /// Get the _org.antlr.v4.runtime.atn.ATN_ used by the recognizer for prediction.
+ ///
+ /// - Returns: The _org.antlr.v4.runtime.atn.ATN_ used by the recognizer for prediction.
+ ///
open func getATN() -> ATN {
- RuntimeException(#function + " must be overridden")
- fatalError()
+ fatalError(#function + " must be overridden")
}
- /**
- * Get the ATN interpreter used by the recognizer for prediction.
- *
- * @return The ATN interpreter used by the recognizer for prediction.
- */
+ ///
+ /// Get the ATN interpreter used by the recognizer for prediction.
+ ///
+ /// - Returns: The ATN interpreter used by the recognizer for prediction.
+ ///
open func getInterpreter() -> ATNInterpreter {
return _interp
}
- /** If profiling during the parse/lex, this will return DecisionInfo records
- * for each decision in recognizer in a ParseInfo object.
- *
- * @since 4.3
- */
+ /// If profiling during the parse/lex, this will return DecisionInfo records
+ /// for each decision in recognizer in a ParseInfo object.
+ ///
+ /// - Since: 4.3
+ ///
open func getParseInfo() -> ParseInfo? {
return nil
}
- /**
- * Set the ATN interpreter used by the recognizer for prediction.
- *
- * @param interpreter The ATN interpreter used by the recognizer for
- * prediction.
- */
+ ///
+ /// Set the ATN interpreter used by the recognizer for prediction.
+ ///
+ /// - Parameter interpreter: The ATN interpreter used by the recognizer for
+ /// prediction.
+ ///
open func setInterpreter(_ interpreter: ATNInterpreter) {
_interp = interpreter
}
- /** What is the error header, normally line/character position information? */
- //public func getErrorHeader(e : RecognitionException
-
- open func getErrorHeader(_ e: AnyObject) -> String {
- let line: Int = (e as! RecognitionException).getOffendingToken().getLine()
- let charPositionInLine: Int = (e as! RecognitionException).getOffendingToken().getCharPositionInLine()
- return "line " + String(line) + ":" + String(charPositionInLine)
+ ///
+ /// What is the error header, normally line/character position information?
+ ///
+ open func getErrorHeader(_ e: RecognitionException) -> String {
+ let offending = e.getOffendingToken()
+ let line = offending.getLine()
+ let charPositionInLine = offending.getCharPositionInLine()
+ return "line \(line):\(charPositionInLine)"
}
- /** How should a token be displayed in an error message? The default
- * is to display just the text, but during development you might
- * want to have a lot of information spit out. Override in that case
- * to use t.toString() (which, for CommonToken, dumps everything about
- * the token). This is better than forcing you to override a method in
- * your token objects because you don't have to go modify your lexer
- * so that it creates a new Java type.
- *
- * @deprecated This method is not called by the ANTLR 4 Runtime. Specific
- * implementations of {@link org.antlr.v4.runtime.ANTLRErrorStrategy} may provide a similar
- * feature when necessary. For example, see
- * {@link org.antlr.v4.runtime.DefaultErrorStrategy#getTokenErrorDisplay}.
- */
- ////@Deprecated
- open func getTokenErrorDisplay(_ t: Token?) -> String {
- guard let t = t else {
- return ""
- }
- var s: String
-
- if let text = t.getText() {
- s = text
- } else {
- if t.getType() == CommonToken.EOF {
- s = ""
- } else {
- s = "<\(t.getType())>"
- }
- }
- s = s.replacingOccurrences(of: "\n", with: "\\n")
- s = s.replacingOccurrences(of: "\r", with: "\\r")
- s = s.replacingOccurrences(of: "\t", with: "\\t")
- return "\(s)"
- }
-
- /**
- * @exception NullPointerException if {@code listener} is {@code null}.
- */
open func addErrorListener(_ listener: ANTLRErrorListener) {
-
_listeners.append(listener)
}
@@ -233,16 +178,13 @@ open class Recognizer {
_listeners = _listeners.filter() {
$0 !== listener
}
-
- // _listeners.removeObject(listener);
}
open func removeErrorListeners() {
_listeners.removeAll()
}
-
- open func getErrorListeners() -> Array {
+ open func getErrorListeners() -> [ANTLRErrorListener] {
return _listeners
}
@@ -256,7 +198,7 @@ open class Recognizer {
return true
}
- open func precpred(_ localctx: RuleContext?, _ precedence: Int) throws -> Bool {
+ open func precpred(_ localctx: RuleContext?, _ precedence: Int) -> Bool {
return true
}
@@ -267,13 +209,13 @@ open class Recognizer {
return _stateNumber
}
- /** Indicate that the recognizer has changed internal state that is
- * consistent with the ATN state passed in. This way we always know
- * where we are in the ATN as the parser goes along. The rule
- * context objects form a stack that lets us see the stack of
- * invoking rules. Combine this and we have complete ATN
- * configuration information.
- */
+ /// Indicate that the recognizer has changed internal state that is
+ /// consistent with the ATN state passed in. This way we always know
+ /// where we are in the ATN as the parser goes along. The rule
+ /// context objects form a stack that lets us see the stack of
+ /// invoking rules. Combine this and we have complete ATN
+ /// configuration information.
+ ///
public final func setState(_ atnState: Int) {
// System.err.println("setState "+atnState);
_stateNumber = atnState
@@ -281,26 +223,18 @@ open class Recognizer {
}
open func getInputStream() -> IntStream? {
- RuntimeException(#function + "Must be overridden")
- fatalError()
+ fatalError(#function + " must be overridden")
}
-
open func setInputStream(_ input: IntStream) throws {
- RuntimeException(#function + "Must be overridden")
-
+ fatalError(#function + " must be overridden")
}
-
open func getTokenFactory() -> TokenFactory {
- RuntimeException(#function + "Must be overridden")
- fatalError()
+ fatalError(#function + " must be overridden")
}
-
open func setTokenFactory(_ input: TokenFactory) {
- RuntimeException(#function + "Must be overridden")
-
+ fatalError(#function + " must be overridden")
}
-
}
diff --git a/runtime/Swift/Sources/Antlr4/RuleContext.swift b/runtime/Swift/Sources/Antlr4/RuleContext.swift
index ce19d0362..a44ce5373 100644
--- a/runtime/Swift/Sources/Antlr4/RuleContext.swift
+++ b/runtime/Swift/Sources/Antlr4/RuleContext.swift
@@ -4,69 +4,70 @@
*/
-/** A rule context is a record of a single rule invocation.
-*
-* We form a stack of these context objects using the parent
-* pointer. A parent pointer of null indicates that the current
-* context is the bottom of the stack. The ParserRuleContext subclass
-* as a children list so that we can turn this data structure into a
-* tree.
-*
-* The root node always has a null pointer and invokingState of -1.
-*
-* Upon entry to parsing, the first invoked rule function creates a
-* context object (asubclass specialized for that rule such as
-* SContext) and makes it the root of a parse tree, recorded by field
-* Parser._ctx.
-*
-* public final SContext s() throws RecognitionException {
-* SContext _localctx = new SContext(_ctx, getState()); <-- create new node
-* enterRule(_localctx, 0, RULE_s); <-- push it
-* ...
-* exitRule(); <-- pop back to _localctx
-* return _localctx;
-* }
-*
-* A subsequent rule invocation of r from the start rule s pushes a
-* new context object for r whose parent points at s and use invoking
-* state is the state with r emanating as edge label.
-*
-* The invokingState fields from a context object to the root
-* together form a stack of rule indication states where the root
-* (bottom of the stack) has a -1 sentinel value. If we invoke start
-* symbol s then call r1, which calls r2, the would look like
-* this:
-*
-* SContext[-1] <- root node (bottom of the stack)
-* R1Context[p] <- p in rule s called r1
-* R2Context[q] <- q in rule r1 called r2
-*
-* So the top of the stack, _ctx, represents a call to the current
-* rule and it holds the return address from another rule that invoke
-* to this rule. To invoke a rule, we must always have a current context.
-*
-* The parent contexts are useful for computing lookahead sets and
-* getting error information.
-*
-* These objects are used during parsing and prediction.
-* For the special case of parsers, we use the subclass
-* ParserRuleContext.
-*
-* @see org.antlr.v4.runtime.ParserRuleContext
-*/
+/// A rule context is a record of a single rule invocation.
+///
+/// We form a stack of these context objects using the parent
+/// pointer. A parent pointer of null indicates that the current
+/// context is the bottom of the stack. The ParserRuleContext subclass
+/// as a children list so that we can turn this data structure into a
+/// tree.
+///
+/// The root node always has a null pointer and invokingState of -1.
+///
+/// Upon entry to parsing, the first invoked rule function creates a
+/// context object (asubclass specialized for that rule such as
+/// SContext) and makes it the root of a parse tree, recorded by field
+/// Parser._ctx.
+///
+/// public final SContext s() throws RecognitionException {
+/// SContext _localctx = new SContext(_ctx, getState()); <-- create new node
+/// enterRule(_localctx, 0, RULE_s); <-- push it
+/// ...
+/// exitRule(); <-- pop back to _localctx
+/// return _localctx;
+/// }
+///
+/// A subsequent rule invocation of r from the start rule s pushes a
+/// new context object for r whose parent points at s and use invoking
+/// state is the state with r emanating as edge label.
+///
+/// The invokingState fields from a context object to the root
+/// together form a stack of rule indication states where the root
+/// (bottom of the stack) has a -1 sentinel value. If we invoke start
+/// symbol s then call r1, which calls r2, the would look like
+/// this:
+///
+/// SContext[-1] <- root node (bottom of the stack)
+/// R1Context[p] <- p in rule s called r1
+/// R2Context[q] <- q in rule r1 called r2
+///
+/// So the top of the stack, _ctx, represents a call to the current
+/// rule and it holds the return address from another rule that invoke
+/// to this rule. To invoke a rule, we must always have a current context.
+///
+/// The parent contexts are useful for computing lookahead sets and
+/// getting error information.
+///
+/// These objects are used during parsing and prediction.
+/// For the special case of parsers, we use the subclass
+/// ParserRuleContext.
+///
+/// - SeeAlso: org.antlr.v4.runtime.ParserRuleContext
+///
open class RuleContext: RuleNode {
- public static let EMPTY: ParserRuleContext = ParserRuleContext()
+ public static let EMPTY = ParserRuleContext()
- /** What context invoked this rule? */
- public var parent: RuleContext?
+ /// What context invoked this rule?
+ public weak var parent: RuleContext?
+
+ /// What state invoked the rule associated with this context?
+ /// The "return address" is the followState of invokingState
+ /// If parent is null, this should be -1 this context object represents
+ /// the start rule.
+ ///
+ public var invokingState = -1
- /** What state invoked the rule associated with this context?
- * The "return address" is the followState of invokingState
- * If parent is null, this should be -1 this context object represents
- * the start rule.
- */
- public var invokingState: Int = -1
override
public init() {
super.init()
@@ -79,7 +80,7 @@ open class RuleContext: RuleNode {
}
open func depth() -> Int {
- var n: Int = 0
+ var n = 0
var p: RuleContext? = self
while let pWrap = p {
p = pWrap.parent
@@ -88,9 +89,9 @@ open class RuleContext: RuleNode {
return n
}
- /** A context is empty if there is no invoking state; meaning nobody called
- * current context.
- */
+ /// A context is empty if there is no invoking state; meaning nobody called
+ /// current context.
+ ///
open func isEmpty() -> Bool {
return invokingState == -1
}
@@ -117,13 +118,13 @@ open class RuleContext: RuleNode {
return self
}
- /** Return the combined text of all child nodes. This method only considers
- * tokens which have been added to the parse tree.
- *
- * Since tokens on hidden channels (e.g. whitespace or comments) are not
- * added to the parse trees, they will not appear in the output of this
- * method.
- */
+ /// Return the combined text of all child nodes. This method only considers
+ /// tokens which have been added to the parse tree.
+ ///
+ /// Since tokens on hidden channels (e.g. whitespace or comments) are not
+ /// added to the parse trees, they will not appear in the output of this
+ /// method.
+ ///
open override func getText() -> String {
let length = getChildCount()
@@ -131,7 +132,7 @@ open class RuleContext: RuleNode {
return ""
}
- let builder: StringBuilder = StringBuilder()
+ let builder = StringBuilder()
for i in 0.. Future {
- var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null;
- return inspect(ruleNames);
- }
-
- public func inspect(ruleNames : Array) -> Future {
- var viewer : TreeViewer = TreeViewer(ruleNames, self);
- return viewer.open();
- }
-
- /** Save this tree in a postscript file */
- public func save(parser : Parser, _ fileName : String)
- throws; IOException, PrintException
- {
- var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null;
- save(ruleNames, fileName);
- }
-
- /** Save this tree in a postscript file using a particular font name and size */
- public func save(parser : Parser, _ fileName : String,
- _ fontName : String, _ fontSize : Int)
- throws; IOException
- {
- var ruleNames : Array = parser != nil ? Arrays.asList(parser.getRuleNames()) : null;
- save(ruleNames, fileName, fontName, fontSize);
- }
-
- /** Save this tree in a postscript file */
- public func save(ruleNames : Array, _ fileName : String)
- throws; IOException, PrintException
- {
- Trees.writePS(self, ruleNames, fileName);
- }
-
- /** Save this tree in a postscript file using a particular font name and size */
- public func save(ruleNames : Array, _ fileName : String,
- _ fontName : String, _ fontSize : Int)
- throws; IOException
- {
- Trees.writePS(self, ruleNames, fileName, fontName, fontSize);
- }
- */
- /** Print out a whole tree, not just a node, in LISP format
- * (root child1 .. childN). Print just a node if this is a leaf.
- * We have to know the recognizer so we can get rule names.
- */
-
+ /// Print out a whole tree, not just a node, in LISP format
+ /// (root child1 .. childN). Print just a node if this is a leaf.
+ /// We have to know the recognizer so we can get rule names.
+ ///
open override func toStringTree(_ recog: Parser) -> String {
return Trees.toStringTree(self, recog)
}
- /** Print out a whole tree, not just a node, in LISP format
- * (root child1 .. childN). Print just a node if this is a leaf.
- */
- public func toStringTree(_ ruleNames: Array?) -> String {
+ /// Print out a whole tree, not just a node, in LISP format
+ /// (root child1 .. childN). Print just a node if this is a leaf.
+ ///
+ public func toStringTree(_ ruleNames: [String]?) -> String {
return Trees.toStringTree(self, ruleNames)
}
-
open override func toStringTree() -> String {
- let info: Array? = nil
- return toStringTree(info)
+ return toStringTree(nil)
}
+
open override var description: String {
- let p1: Array? = nil
- let p2: RuleContext? = nil
- return toString(p1, p2)
+ return toString(nil, nil)
}
open override var debugDescription: String {
return description
}
- public final func toString(_ recog: Recognizer) -> String {
+ public final func toString(_ recog: Recognizer) -> String {
return toString(recog, ParserRuleContext.EMPTY)
}
- public final func toString(_ ruleNames: Array) -> String {
+ public final func toString(_ ruleNames: [String]) -> String {
return toString(ruleNames, nil)
}
// recog null unless ParserRuleContext, in which case we use subclass toString(...)
- open func toString(_ recog: Recognizer?, _ stop: RuleContext) -> String {
- let ruleNames: [String]? = recog != nil ? recog!.getRuleNames() : nil
- let ruleNamesList: Array? = ruleNames ?? nil
- return toString(ruleNamesList, stop)
+ open func toString(_ recog: Recognizer?, _ stop: RuleContext) -> String {
+ let ruleNames = recog?.getRuleNames()
+ return toString(ruleNames, stop)
}
- open func toString(_ ruleNames: Array?, _ stop: RuleContext?) -> String {
- let buf: StringBuilder = StringBuilder()
+ open func toString(_ ruleNames: [String]?, _ stop: RuleContext?) -> String {
+ let buf = StringBuilder()
var p: RuleContext? = self
buf.append("[")
- while let pWrap = p , pWrap !== stop {
- if ruleNames == nil {
+ while let pWrap = p, pWrap !== stop {
+ if let ruleNames = ruleNames {
+ let ruleIndex = pWrap.getRuleIndex()
+ let ruleIndexInRange = (ruleIndex >= 0 && ruleIndex < ruleNames.count)
+ let ruleName = (ruleIndexInRange ? ruleNames[ruleIndex] : String(ruleIndex))
+ buf.append(ruleName)
+ }
+ else {
if !pWrap.isEmpty() {
buf.append(pWrap.invokingState)
}
- } else {
- let ruleIndex: Int = pWrap.getRuleIndex()
- let ruleIndexInRange: Bool = ruleIndex >= 0 && ruleIndex < ruleNames!.count
- let ruleName: String = ruleIndexInRange ? ruleNames![ruleIndex] : String(ruleIndex)
- buf.append(ruleName)
}
if pWrap.parent != nil && (ruleNames != nil || !pWrap.parent!.isEmpty()) {
diff --git a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift
index edabad1c3..384fa6d11 100644
--- a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift
+++ b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift
@@ -4,142 +4,132 @@
*/
-/**
- * This class provides access to the current version of the ANTLR 4 runtime
- * library as compile-time and runtime constants, along with methods for
- * checking for matching version numbers and notifying listeners in the case
- * where a version mismatch is detected.
- *
- *
- * The runtime version information is provided by {@link #VERSION} and
- * {@link #getRuntimeVersion()}. Detailed information about these values is
- * provided in the documentation for each member.
- *
- *
- * The runtime version check is implemented by {@link #checkVersion}. Detailed
- * information about incorporating this call into user code, as well as its use
- * in generated code, is provided in the documentation for the method.
- *
- *
- * Version strings x.y and x.y.z are considered "compatible" and no error
- * would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are
- * considered "compatible" because the major and minor components x.y
- * are the same in each.
- *
- *
- * To trap any error messages issued by this code, use System.setErr()
- * in your main() startup code.
- *
- *
- * @since 4.3
- */
+///
+/// This class provides access to the current version of the ANTLR 4 runtime
+/// library as compile-time and runtime constants, along with methods for
+/// checking for matching version numbers and notifying listeners in the case
+/// where a version mismatch is detected.
+///
+///
+/// The runtime version information is provided by _#VERSION_ and
+/// _#getRuntimeVersion()_. Detailed information about these values is
+/// provided in the documentation for each member.
+///
+///
+/// The runtime version check is implemented by _#checkVersion_. Detailed
+/// information about incorporating this call into user code, as well as its use
+/// in generated code, is provided in the documentation for the method.
+///
+///
+/// Version strings x.y and x.y.z are considered "compatible" and no error
+/// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are
+/// considered "compatible" because the major and minor components x.y
+/// are the same in each.
+///
+///
+/// To trap any error messages issued by this code, use System.setErr()
+/// in your main() startup code.
+///
+///
+/// - Since: 4.3
+///
public class RuntimeMetaData {
- /**
- * A compile-time constant containing the current version of the ANTLR 4
- * runtime library.
- *
- *
- * This compile-time constant value allows generated parsers and other
- * libraries to include a literal reference to the version of the ANTLR 4
- * runtime library the code was compiled against. At each release, we
- * change this value.
- *
- * Version numbers are assumed to have the form
- *
- * major.minor.patch.revision-suffix,
- *
- * with the individual components defined as follows.
- *
- *
- * - major is a required non-negative integer, and is equal to
- * {@code 4} for ANTLR 4.
- * - minor is a required non-negative integer.
- * - patch is an optional non-negative integer. When
- * patch is omitted, the {@code .} (dot) appearing before it is
- * also omitted.
- * - revision is an optional non-negative integer, and may only
- * be included when patch is also included. When revision
- * is omitted, the {@code .} (dot) appearing before it is also omitted.
- * - suffix is an optional string. When suffix is
- * omitted, the {@code -} (hyphen-minus) appearing before it is also
- * omitted.
- *
- */
+ ///
+ /// A compile-time constant containing the current version of the ANTLR 4
+ /// runtime library.
+ ///
+ /// This compile-time constant value allows generated parsers and other
+ /// libraries to include a literal reference to the version of the ANTLR 4
+ /// runtime library the code was compiled against. At each release, we
+ /// change this value.
+ ///
+ /// Version numbers are assumed to have the form
+ ///
+ /// __major__.__minor__.__patch__.__revision__-__suffix__,
+ ///
+ /// with the individual components defined as follows.
+ ///
+ /// * __major__ is a required non-negative integer, and is equal to
+ /// `4` for ANTLR 4.
+ /// * __minor__ is a required non-negative integer.
+ /// * __patch__ is an optional non-negative integer. When
+ /// patch is omitted, the `.` (dot) appearing before it is
+ /// also omitted.
+ /// * __revision__ is an optional non-negative integer, and may only
+ /// be included when __patch__ is also included. When __revision__
+ /// is omitted, the `.` (dot) appearing before it is also omitted.
+ /// * __suffix__ is an optional string. When __suffix__ is
+ /// omitted, the `-` (hyphen-minus) appearing before it is also
+ /// omitted.
+ ///
public static let VERSION: String = "4.7"
- /**
- * Gets the currently executing version of the ANTLR 4 runtime library.
- *
- *
- * This method provides runtime access to the {@link #VERSION} field, as
- * opposed to directly referencing the field as a compile-time constant.
- *
- * @return The currently executing version of the ANTLR 4 library
- */
+ ///
+ /// Gets the currently executing version of the ANTLR 4 runtime library.
+ ///
+ ///
+ /// This method provides runtime access to the _#VERSION_ field, as
+ /// opposed to directly referencing the field as a compile-time constant.
+ ///
+ /// - Returns: The currently executing version of the ANTLR 4 library
+ ///
public static func getRuntimeVersion() -> String {
return RuntimeMetaData.VERSION
}
- /**
- * This method provides the ability to detect mismatches between the version
- * of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a
- * parser was compiled against, and the version of the ANTLR runtime which
- * is currently executing.
- *
- *
- * The version check is designed to detect the following two specific
- * scenarios.
- *
- *
- * - The ANTLR Tool version used for code generation does not match the
- * currently executing runtime version.
- * - The ANTLR Runtime version referenced at the time a parser was
- * compiled does not match the currently executing runtime version.
- *
- *
- *
- * Starting with ANTLR 4.3, the code generator emits a call to this method
- * using two constants in each generated lexer and parser: a hard-coded
- * constant indicating the version of the tool used to generate the parser
- * and a reference to the compile-time constant {@link #VERSION}. At
- * runtime, this method is called during the initialization of the generated
- * parser to detect mismatched versions, and notify the registered listeners
- * prior to creating instances of the parser.
- *
- *
- * This method does not perform any detection or filtering of semantic
- * changes between tool and runtime versions. It simply checks for a
- * version match and emits an error to stderr if a difference
- * is detected.
- *
- *
- * Note that some breaking changes between releases could result in other
- * types of runtime exceptions, such as a {@link LinkageError}, prior to
- * calling this method. In these cases, the underlying version mismatch will
- * not be reported here. This method is primarily intended to
- * notify users of potential semantic changes between releases that do not
- * result in binary compatibility problems which would be detected by the
- * class loader. As with semantic changes, changes that break binary
- * compatibility between releases are mentioned in the release notes
- * accompanying the affected release.
- *
- *
- * Additional note for target developers: The version check
- * implemented by this class is designed to address specific compatibility
- * concerns that may arise during the execution of Java applications. Other
- * targets should consider the implementation of this method in the context
- * of that target's known execution environment, which may or may not
- * resemble the design provided for the Java target.
- *
- * @param generatingToolVersion The version of the tool used to generate a parser.
- * This value may be null when called from user code that was not generated
- * by, and does not reference, the ANTLR 4 Tool itself.
- * @param compileTimeVersion The version of the runtime the parser was
- * compiled against. This should always be passed using a direct reference
- * to {@link #VERSION}.
- */
+ ///
+ /// This method provides the ability to detect mismatches between the version
+ /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a
+ /// parser was compiled against, and the version of the ANTLR runtime which
+ /// is currently executing.
+ ///
+ /// The version check is designed to detect the following two specific
+ /// scenarios.
+ ///
+ /// * The ANTLR Tool version used for code generation does not match the
+ /// currently executing runtime version.
+ /// * The ANTLR Runtime version referenced at the time a parser was
+ /// compiled does not match the currently executing runtime version.
+ ///
+ /// Starting with ANTLR 4.3, the code generator emits a call to this method
+ /// using two constants in each generated lexer and parser: a hard-coded
+ /// constant indicating the version of the tool used to generate the parser
+ /// and a reference to the compile-time constant _#VERSION_. At
+ /// runtime, this method is called during the initialization of the generated
+ /// parser to detect mismatched versions, and notify the registered listeners
+ /// prior to creating instances of the parser.
+ ///
+ /// This method does not perform any detection or filtering of semantic
+ /// changes between tool and runtime versions. It simply checks for a
+ /// version match and emits an error to stderr if a difference
+ /// is detected.
+ ///
+ /// Note that some breaking changes between releases could result in other
+ /// types of runtime exceptions, such as a _LinkageError_, prior to
+ /// calling this method. In these cases, the underlying version mismatch will
+ /// not be reported here. This method is primarily intended to
+ /// notify users of potential semantic changes between releases that do not
+ /// result in binary compatibility problems which would be detected by the
+ /// class loader. As with semantic changes, changes that break binary
+ /// compatibility between releases are mentioned in the release notes
+ /// accompanying the affected release.
+ ///
+ /// __ Additional note for target developers:__ The version check
+ /// implemented by this class is designed to address specific compatibility
+ /// concerns that may arise during the execution of Java applications. Other
+ /// targets should consider the implementation of this method in the context
+ /// of that target's known execution environment, which may or may not
+ /// resemble the design provided for the Java target.
+ ///
+ /// - Parameter generatingToolVersion: The version of the tool used to generate a parser.
+ /// This value may be null when called from user code that was not generated
+ /// by, and does not reference, the ANTLR 4 Tool itself.
+ /// - Parameter compileTimeVersion: The version of the runtime the parser was
+ /// compiled against. This should always be passed using a direct reference
+ /// to _#VERSION_.
+ ///
public static func checkVersion(_ generatingToolVersion: String, _ compileTimeVersion: String) {
let runtimeVersion: String = RuntimeMetaData.VERSION
var runtimeConflictsWithGeneratingTool: Bool = false
@@ -163,15 +153,15 @@ public class RuntimeMetaData {
}
}
- /**
- * Gets the major and minor version numbers from a version string. For
- * details about the syntax of the input {@code version}.
- * E.g., from x.y.z return x.y.
- *
- * @param version The complete version string.
- * @return A string of the form major.minor containing
- * only the major and minor components of the version string.
- */
+ ///
+ /// Gets the major and minor version numbers from a version string. For
+ /// details about the syntax of the input `version`.
+ /// E.g., from x.y.z return x.y.
+ ///
+ /// - Parameter version: The complete version string.
+ /// - Returns: A string of the form __major__.__minor__ containing
+ /// only the major and minor components of the version string.
+ ///
public static func getMajorMinorVersion(_ version: String) -> String {
let firstDot: Int = version.indexOf(".")
let secondDot: Int = firstDot >= 0 ? version.indexOf(".", startIndex: firstDot + 1) : -1
diff --git a/runtime/Swift/Sources/Antlr4/Token.swift b/runtime/Swift/Sources/Antlr4/Token.swift
index e41868fcb..264318ddd 100644
--- a/runtime/Swift/Sources/Antlr4/Token.swift
+++ b/runtime/Swift/Sources/Antlr4/Token.swift
@@ -5,98 +5,100 @@
-/** A token has properties: text, type, line, character position in the line
- * (so we can ignore tabs), token channel, index, and source from which
- * we obtained this token.
- */
+/// A token has properties: text, type, line, character position in the line
+/// (so we can ignore tabs), token channel, index, and source from which
+/// we obtained this token.
+///
public protocol Token: class, CustomStringConvertible {
//let INVALID_TYPE : Int = 0;
- /** During lookahead operations, this "token" signifies we hit rule end ATN state
- * and did not follow it despite needing to.
- */
+ /// During lookahead operations, this "token" signifies we hit rule end ATN state
+ /// and did not follow it despite needing to.
+ ///
//let EPSILON : Int = -2;
//let MIN_USER_TOKEN_TYPE : Int = 1;
//let EOF : Int = IntStream.EOF;
- /** All tokens go to the parser (unless skip() is called in that rule)
- * on a particular "channel". The parser tunes to a particular channel
- * so that whitespace etc... can go to the parser on a "hidden" channel.
- */
+ /// All tokens go to the parser (unless skip() is called in that rule)
+ /// on a particular "channel". The parser tunes to a particular channel
+ /// so that whitespace etc... can go to the parser on a "hidden" channel.
+ ///
//let DEFAULT_CHANNEL : Int = 0;
- /** Anything on different channel than DEFAULT_CHANNEL is not parsed
- * by parser.
- */
+ /// Anything on different channel than DEFAULT_CHANNEL is not parsed
+ /// by parser.
+ ///
//let HIDDEN_CHANNEL : Int = 1;
- /**
- * This is the minimum constant value which can be assigned to a
- * user-defined token channel.
- *
- *
- * The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are
- * assigned to the predefined channels {@link #DEFAULT_CHANNEL} and
- * {@link #HIDDEN_CHANNEL}.
- *
- * @see org.antlr.v4.runtime.Token#getChannel()
- */
+ ///
+ /// This is the minimum constant value which can be assigned to a
+ /// user-defined token channel.
+ ///
+ ///
+ /// The non-negative numbers less than _#MIN_USER_CHANNEL_VALUE_ are
+ /// assigned to the predefined channels _#DEFAULT_CHANNEL_ and
+ /// _#HIDDEN_CHANNEL_.
+ ///
+ /// - SeeAlso: org.antlr.v4.runtime.Token#getChannel()
+ ///
//let MIN_USER_CHANNEL_VALUE : Int = 2;
- /**
- * Get the text of the token.
- */
+ ///
+ /// Get the text of the token.
+ ///
func getText() -> String?
- /** Get the token type of the token */
+ /// Get the token type of the token
func getType() -> Int
- /** The line number on which the 1st character of this token was matched,
- * line=1..n
- */
+ /// The line number on which the 1st character of this token was matched,
+ /// line=1..n
+ ///
func getLine() -> Int
- /** The index of the first character of this token relative to the
- * beginning of the line at which it occurs, 0..n-1
- */
+ /// The index of the first character of this token relative to the
+ /// beginning of the line at which it occurs, 0..n-1
+ ///
func getCharPositionInLine() -> Int
- /** Return the channel this token. Each token can arrive at the parser
- * on a different channel, but the parser only "tunes" to a single channel.
- * The parser ignores everything not on DEFAULT_CHANNEL.
- */
+ /// Return the channel this token. Each token can arrive at the parser
+ /// on a different channel, but the parser only "tunes" to a single channel.
+ /// The parser ignores everything not on DEFAULT_CHANNEL.
+ ///
func getChannel() -> Int
- /** An index from 0..n-1 of the token object in the input stream.
- * This must be valid in order to print token streams and
- * use TokenRewriteStream.
- *
- * Return -1 to indicate that this token was conjured up since
- * it doesn't have a valid index.
- */
+ /// An index from 0..n-1 of the token object in the input stream.
+ /// This must be valid in order to print token streams and
+ /// use TokenRewriteStream.
+ ///
+ /// Return -1 to indicate that this token was conjured up since
+ /// it doesn't have a valid index.
+ ///
func getTokenIndex() -> Int
- /** The starting character index of the token
- * This method is optional; return -1 if not implemented.
- */
+ /// The starting character index of the token
+ /// This method is optional; return -1 if not implemented.
+ ///
func getStartIndex() -> Int
- /** The last character index of the token.
- * This method is optional; return -1 if not implemented.
- */
+ /// The last character index of the token.
+ /// This method is optional; return -1 if not implemented.
+ ///
func getStopIndex() -> Int
- /** Gets the {@link org.antlr.v4.runtime.TokenSource} which created this token.
- */
+ /// Gets the _org.antlr.v4.runtime.TokenSource_ which created this token.
+ ///
func getTokenSource() -> TokenSource?
- /**
- * Gets the {@link org.antlr.v4.runtime.CharStream} from which this token was derived.
- */
+ ///
+ /// Gets the _org.antlr.v4.runtime.CharStream_ from which this token was derived.
+ ///
func getInputStream() -> CharStream?
+ func getTokenSourceAndStream() -> TokenSourceAndStream
+
var visited: Bool { get set }
}
diff --git a/runtime/Swift/Sources/Antlr4/TokenFactory.swift b/runtime/Swift/Sources/Antlr4/TokenFactory.swift
index b9501b6a5..63fa74b3c 100644
--- a/runtime/Swift/Sources/Antlr4/TokenFactory.swift
+++ b/runtime/Swift/Sources/Antlr4/TokenFactory.swift
@@ -4,22 +4,44 @@
*/
-/** The default mechanism for creating tokens. It's used by default in Lexer and
- * the error handling strategy (to create missing tokens). Notifying the parser
- * of a new factory means that it notifies it's token source and error strategy.
- */
-
+/// The default mechanism for creating tokens. It's used by default in Lexer and
+/// the error handling strategy (to create missing tokens). Notifying the parser
+/// of a new factory means that it notifies it's token source and error strategy.
+///
public protocol TokenFactory {
//typealias Symbol
- /** This is the method used to create tokens in the lexer and in the
- * error handling strategy. If text!=null, than the start and stop positions
- * are wiped to -1 in the text override is set in the CommonToken.
- */
- func create(_ source: (TokenSource?, CharStream?), _ type: Int, _ text: String?,
+ /// This is the method used to create tokens in the lexer and in the
+ /// error handling strategy. If text!=null, than the start and stop positions
+ /// are wiped to -1 in the text override is set in the CommonToken.
+ ///
+ func create(_ source: TokenSourceAndStream, _ type: Int, _ text: String?,
_ channel: Int, _ start: Int, _ stop: Int,
_ line: Int, _ charPositionInLine: Int) -> Token
- /** Generically useful */
+ /// Generically useful
func create(_ type: Int, _ text: String) -> Token
}
+
+
+/**
+ Holds the references to the TokenSource and CharStream used to create a Token.
+ These are together to reduce memory footprint by having one instance of
+ TokenSourceAndStream shared across many tokens. The references here are weak
+ to avoid retain cycles.
+ */
+public class TokenSourceAndStream {
+ ///
+ /// An empty TokenSourceAndStream which is used as the default value of
+ /// _#source_ for tokens that do not have a source.
+ ///
+ public static let EMPTY = TokenSourceAndStream()
+
+ public weak var tokenSource: TokenSource?
+ public weak var stream: CharStream?
+
+ public init(_ tokenSource: TokenSource? = nil, _ stream: CharStream? = nil) {
+ self.tokenSource = tokenSource
+ self.stream = stream
+ }
+}
diff --git a/runtime/Swift/Sources/Antlr4/TokenSource.swift b/runtime/Swift/Sources/Antlr4/TokenSource.swift
index bbbd813b2..5b3780d76 100644
--- a/runtime/Swift/Sources/Antlr4/TokenSource.swift
+++ b/runtime/Swift/Sources/Antlr4/TokenSource.swift
@@ -4,79 +4,79 @@
*/
-/**
- * A source of tokens must provide a sequence of tokens via {@link #nextToken()}
- * and also must reveal it's source of characters; {@link org.antlr.v4.runtime.CommonToken}'s text is
- * computed from a {@link org.antlr.v4.runtime.CharStream}; it only store indices into the char
- * stream.
- *
- * Errors from the lexer are never passed to the parser. Either you want to keep
- * going or you do not upon token recognition error. If you do not want to
- * continue lexing then you do not want to continue parsing. Just throw an
- * exception not under {@link org.antlr.v4.runtime.RecognitionException} and Java will naturally toss
- * you all the way out of the recognizers. If you want to continue lexing then
- * you should not throw an exception to the parser--it has already requested a
- * token. Keep lexing until you get a valid one. Just report errors and keep
- * going, looking for a valid token.
- */
+///
+/// A source of tokens must provide a sequence of tokens via _#nextToken()_
+/// and also must reveal it's source of characters; _org.antlr.v4.runtime.CommonToken_'s text is
+/// computed from a _org.antlr.v4.runtime.CharStream_; it only store indices into the char
+/// stream.
+///
+/// Errors from the lexer are never passed to the parser. Either you want to keep
+/// going or you do not upon token recognition error. If you do not want to
+/// continue lexing then you do not want to continue parsing. Just throw an
+/// exception not under _org.antlr.v4.runtime.RecognitionException_ and Java will naturally toss
+/// you all the way out of the recognizers. If you want to continue lexing then
+/// you should not throw an exception to the parser--it has already requested a
+/// token. Keep lexing until you get a valid one. Just report errors and keep
+/// going, looking for a valid token.
+///
public protocol TokenSource: class {
- /**
- * Return a {@link org.antlr.v4.runtime.Token} object from your input stream (usually a
- * {@link org.antlr.v4.runtime.CharStream}). Do not fail/return upon lexing error; keep chewing
- * on the characters until you get a good one; errors are not passed through
- * to the parser.
- */
+ ///
+ /// Return a _org.antlr.v4.runtime.Token_ object from your input stream (usually a
+ /// _org.antlr.v4.runtime.CharStream_). Do not fail/return upon lexing error; keep chewing
+ /// on the characters until you get a good one; errors are not passed through
+ /// to the parser.
+ ///
func nextToken() throws -> Token
- /**
- * Get the line number for the current position in the input stream. The
- * first line in the input is line 1.
- *
- * @return The line number for the current position in the input stream, or
- * 0 if the current token source does not track line numbers.
- */
+ ///
+ /// Get the line number for the current position in the input stream. The
+ /// first line in the input is line 1.
+ ///
+ /// - Returns: The line number for the current position in the input stream, or
+ /// 0 if the current token source does not track line numbers.
+ ///
func getLine() -> Int
- /**
- * Get the index into the current line for the current position in the input
- * stream. The first character on a line has position 0.
- *
- * @return The line number for the current position in the input stream, or
- * -1 if the current token source does not track character positions.
- */
+ ///
+ /// Get the index into the current line for the current position in the input
+ /// stream. The first character on a line has position 0.
+ ///
+ /// - Returns: The line number for the current position in the input stream, or
+ /// -1 if the current token source does not track character positions.
+ ///
func getCharPositionInLine() -> Int
- /**
- * Get the {@link org.antlr.v4.runtime.CharStream} from which this token source is currently
- * providing tokens.
- *
- * @return The {@link org.antlr.v4.runtime.CharStream} associated with the current position in
- * the input, or {@code null} if no input stream is available for the token
- * source.
- */
+ ///
+ /// Get the _org.antlr.v4.runtime.CharStream_ from which this token source is currently
+ /// providing tokens.
+ ///
+ /// - Returns: The _org.antlr.v4.runtime.CharStream_ associated with the current position in
+ /// the input, or `null` if no input stream is available for the token
+ /// source.
+ ///
func getInputStream() -> CharStream?
- /**
- * Gets the name of the underlying input source. This method returns a
- * non-null, non-empty string. If such a name is not known, this method
- * returns {@link org.antlr.v4.runtime.IntStream#UNKNOWN_SOURCE_NAME}.
- */
+ ///
+ /// Gets the name of the underlying input source. This method returns a
+ /// non-null, non-empty string. If such a name is not known, this method
+ /// returns _org.antlr.v4.runtime.IntStream#UNKNOWN_SOURCE_NAME_.
+ ///
func getSourceName() -> String
- /**
- * Set the {@link org.antlr.v4.runtime.TokenFactory} this token source should use for creating
- * {@link org.antlr.v4.runtime.Token} objects from the input.
- *
- * @param factory The {@link org.antlr.v4.runtime.TokenFactory} to use for creating tokens.
- */
+ ///
+ /// Set the _org.antlr.v4.runtime.TokenFactory_ this token source should use for creating
+ /// _org.antlr.v4.runtime.Token_ objects from the input.
+ ///
+ /// - Parameter factory: The _org.antlr.v4.runtime.TokenFactory_ to use for creating tokens.
+ ///
func setTokenFactory(_ factory: TokenFactory)
- /**
- * Gets the {@link org.antlr.v4.runtime.TokenFactory} this token source is currently using for
- * creating {@link org.antlr.v4.runtime.Token} objects from the input.
- *
- * @return The {@link org.antlr.v4.runtime.TokenFactory} currently used by this token source.
- */
+ ///
+ /// Gets the _org.antlr.v4.runtime.TokenFactory_ this token source is currently using for
+ /// creating _org.antlr.v4.runtime.Token_ objects from the input.
+ ///
+ /// - Returns: The _org.antlr.v4.runtime.TokenFactory_ currently used by this token source.
+ ///
func getTokenFactory() -> TokenFactory
}
diff --git a/runtime/Swift/Sources/Antlr4/TokenStream.swift b/runtime/Swift/Sources/Antlr4/TokenStream.swift
index 92c35e549..536f33b90 100644
--- a/runtime/Swift/Sources/Antlr4/TokenStream.swift
+++ b/runtime/Swift/Sources/Antlr4/TokenStream.swift
@@ -5,134 +5,133 @@
-/**
- * An {@link org.antlr.v4.runtime.IntStream} whose symbols are {@link org.antlr.v4.runtime.Token} instances.
- */
+///
+/// An _org.antlr.v4.runtime.IntStream_ whose symbols are _org.antlr.v4.runtime.Token_ instances.
+///
public protocol TokenStream: IntStream {
- /**
- * Get the {@link org.antlr.v4.runtime.Token} instance associated with the value returned by
- * {@link #LA LA(k)}. This method has the same pre- and post-conditions as
- * {@link org.antlr.v4.runtime.IntStream#LA}. In addition, when the preconditions of this method
- * are met, the return value is non-null and the value of
- * {@code LT(k).getType()==LA(k)}.
- *
- * @see org.antlr.v4.runtime.IntStream#LA
- */
+ ///
+ /// Get the _org.antlr.v4.runtime.Token_ instance associated with the value returned by
+ /// _#LA LA(k)_. This method has the same pre- and post-conditions as
+ /// _org.antlr.v4.runtime.IntStream#LA_. In addition, when the preconditions of this method
+ /// are met, the return value is non-null and the value of
+ /// `LT(k).getType()==LA(k)`.
+ ///
+ /// - SeeAlso: org.antlr.v4.runtime.IntStream#LA
+ ///
func LT(_ k: Int) throws -> Token?
- /**
- * Gets the {@link org.antlr.v4.runtime.Token} at the specified {@code index} in the stream. When
- * the preconditions of this method are met, the return value is non-null.
- *
- * The preconditions for this method are the same as the preconditions of
- * {@link org.antlr.v4.runtime.IntStream#seek}. If the behavior of {@code seek(index)} is
- * unspecified for the current state and given {@code index}, then the
- * behavior of this method is also unspecified.
- *
- * The symbol referred to by {@code index} differs from {@code seek()} only
- * in the case of filtering streams where {@code index} lies before the end
- * of the stream. Unlike {@code seek()}, this method does not adjust
- * {@code index} to point to a non-ignored symbol.
- *
- * @throws IllegalArgumentException if {code index} is less than 0
- * @throws UnsupportedOperationException if the stream does not support
- * retrieving the token at the specified index
- */
+ ///
+ /// Gets the _org.antlr.v4.runtime.Token_ at the specified `index` in the stream. When
+ /// the preconditions of this method are met, the return value is non-null.
+ ///
+ /// The preconditions for this method are the same as the preconditions of
+ /// _org.antlr.v4.runtime.IntStream#seek_. If the behavior of `seek(index)` is
+ /// unspecified for the current state and given `index`, then the
+ /// behavior of this method is also unspecified.
+ ///
+ /// The symbol referred to by `index` differs from `seek()` only
+ /// in the case of filtering streams where `index` lies before the end
+ /// of the stream. Unlike `seek()`, this method does not adjust
+ /// `index` to point to a non-ignored symbol.
+ ///
+ /// - Throws: ANTLRError.illegalArgument if {code index} is less than 0
+ /// - Throws: ANTLRError.unsupportedOperation if the stream does not support
+ /// retrieving the token at the specified index
+ ///
func get(_ index: Int) throws -> Token
- /**
- * Gets the underlying {@link org.antlr.v4.runtime.TokenSource} which provides tokens for this
- * stream.
- */
+ ///
+ /// Gets the underlying _org.antlr.v4.runtime.TokenSource_ which provides tokens for this
+ /// stream.
+ ///
func getTokenSource() -> TokenSource
- /**
- * Return the text of all tokens within the specified {@code interval}. This
- * method behaves like the following code (including potential exceptions
- * for violating preconditions of {@link #get}, but may be optimized by the
- * specific implementation.
- *
- *
- * TokenStream stream = ...;
- * String text = "";
- * for (int i = interval.a; i <= interval.b; i++) {
- * text += stream.get(i).getText();
- * }
- *
- *
- * @param interval The interval of tokens within this stream to get text
- * for.
- * @return The text of all tokens within the specified interval in this
- * stream.
- *
- * @throws NullPointerException if {@code interval} is {@code null}
- */
+ ///
+ /// Return the text of all tokens within the specified `interval`. This
+ /// method behaves like the following code (including potential exceptions
+ /// for violating preconditions of _#get_, but may be optimized by the
+ /// specific implementation.
+ ///
+ ///
+ /// TokenStream stream = ...;
+ /// String text = "";
+ /// for (int i = interval.a; i <= interval.b; i++) {
+ /// text += stream.get(i).getText();
+ /// }
+ ///
+ ///
+ /// - Parameter interval: The interval of tokens within this stream to get text
+ /// for.
+ /// - Returns: The text of all tokens within the specified interval in this
+ /// stream.
+ ///
+ ///
func getText(_ interval: Interval) throws -> String
- /**
- * Return the text of all tokens in the stream. This method behaves like the
- * following code, including potential exceptions from the calls to
- * {@link org.antlr.v4.runtime.IntStream#size} and {@link #getText(org.antlr.v4.runtime.misc.Interval)}, but may be
- * optimized by the specific implementation.
- *
- *
- * TokenStream stream = ...;
- * String text = stream.getText(new Interval(0, stream.size()));
- *
- *
- * @return The text of all tokens in the stream.
- */
+ ///
+ /// Return the text of all tokens in the stream. This method behaves like the
+ /// following code, including potential exceptions from the calls to
+ /// _org.antlr.v4.runtime.IntStream#size_ and _#getText(org.antlr.v4.runtime.misc.Interval)_, but may be
+ /// optimized by the specific implementation.
+ ///
+ ///
+ /// TokenStream stream = ...;
+ /// String text = stream.getText(new Interval(0, stream.size()));
+ ///
+ ///
+ /// - Returns: The text of all tokens in the stream.
+ ///
func getText() throws -> String
- /**
- * Return the text of all tokens in the source interval of the specified
- * context. This method behaves like the following code, including potential
- * exceptions from the call to {@link #getText(org.antlr.v4.runtime.misc.Interval)}, but may be
- * optimized by the specific implementation.
- *
- * If {@code ctx.getSourceInterval()} does not return a valid interval of
- * tokens provided by this stream, the behavior is unspecified.
- *
- *
- * TokenStream stream = ...;
- * String text = stream.getText(ctx.getSourceInterval());
- *
- *
- * @param ctx The context providing the source interval of tokens to get
- * text for.
- * @return The text of all tokens within the source interval of {@code ctx}.
- */
+ ///
+ /// Return the text of all tokens in the source interval of the specified
+ /// context. This method behaves like the following code, including potential
+ /// exceptions from the call to _#getText(org.antlr.v4.runtime.misc.Interval)_, but may be
+ /// optimized by the specific implementation.
+ ///
+ /// If `ctx.getSourceInterval()` does not return a valid interval of
+ /// tokens provided by this stream, the behavior is unspecified.
+ ///
+ ///
+ /// TokenStream stream = ...;
+ /// String text = stream.getText(ctx.getSourceInterval());
+ ///
+ ///
+ /// - Parameter ctx: The context providing the source interval of tokens to get
+ /// text for.
+ /// - Returns: The text of all tokens within the source interval of `ctx`.
+ ///
func getText(_ ctx: RuleContext) throws -> String
- /**
- * Return the text of all tokens in this stream between {@code start} and
- * {@code stop} (inclusive).
- *
- * If the specified {@code start} or {@code stop} token was not provided by
- * this stream, or if the {@code stop} occurred before the {@code start}
- * token, the behavior is unspecified.
- *
- * For streams which ensure that the {@link org.antlr.v4.runtime.Token#getTokenIndex} method is
- * accurate for all of its provided tokens, this method behaves like the
- * following code. Other streams may implement this method in other ways
- * provided the behavior is consistent with this at a high level.
- *
- *
- * TokenStream stream = ...;
- * String text = "";
- * for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
- * text += stream.get(i).getText();
- * }
- *
- *
- * @param start The first token in the interval to get text for.
- * @param stop The last token in the interval to get text for (inclusive).
- * @return The text of all tokens lying between the specified {@code start}
- * and {@code stop} tokens.
- *
- * @throws UnsupportedOperationException if this stream does not support
- * this method for the specified tokens
- */
+ ///
+ /// Return the text of all tokens in this stream between `start` and
+ /// `stop` (inclusive).
+ ///
+ /// If the specified `start` or `stop` token was not provided by
+ /// this stream, or if the `stop` occurred before the `start`
+ /// token, the behavior is unspecified.
+ ///
+ /// For streams which ensure that the _org.antlr.v4.runtime.Token#getTokenIndex_ method is
+ /// accurate for all of its provided tokens, this method behaves like the
+ /// following code. Other streams may implement this method in other ways
+ /// provided the behavior is consistent with this at a high level.
+ ///
+ ///
+ /// TokenStream stream = ...;
+ /// String text = "";
+ /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
+ /// text += stream.get(i).getText();
+ /// }
+ ///
+ ///
+ /// - Parameter start: The first token in the interval to get text for.
+ /// - Parameter stop: The last token in the interval to get text for (inclusive).
+ /// - Throws: ANTLRError.unsupportedOperation if this stream does not support
+ /// this method for the specified tokens
+ /// - Returns: The text of all tokens lying between the specified `start`
+ /// and `stop` tokens.
+ ///
+ ///
func getText(_ start: Token?, _ stop: Token?) throws -> String
}
diff --git a/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift b/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift
index 9423ba7b7..2a74c3681 100644
--- a/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift
+++ b/runtime/Swift/Sources/Antlr4/TokenStreamRewriter.swift
@@ -4,82 +4,82 @@
*/
-/**
- * Useful for rewriting out a buffered input token stream after doing some
- * augmentation or other manipulations on it.
- *
- *
- * You can insert stuff, replace, and delete chunks. Note that the operations
- * are done lazily--only if you convert the buffer to a {@link String} with
- * {@link org.antlr.v4.runtime.TokenStream#getText()}. This is very efficient because you are not
- * moving data around all the time. As the buffer of tokens is converted to
- * strings, the {@link #getText()} method(s) scan the input token stream and
- * check to see if there is an operation at the current index. If so, the
- * operation is done and then normal {@link String} rendering continues on the
- * buffer. This is like having multiple Turing machine instruction streams
- * (programs) operating on a single input tape. :)
- *
- *
- * This rewriter makes no modifications to the token stream. It does not ask the
- * stream to fill itself up nor does it advance the input cursor. The token
- * stream {@link org.antlr.v4.runtime.TokenStream#index()} will return the same value before and
- * after any {@link #getText()} call.
- *
- *
- * The rewriter only works on tokens that you have in the buffer and ignores the
- * current input cursor. If you are buffering tokens on-demand, calling
- * {@link #getText()} halfway through the input will only do rewrites for those
- * tokens in the first half of the file.
- *
- *
- * Since the operations are done lazily at {@link #getText}-time, operations do
- * not screw up the token index values. That is, an insert operation at token
- * index {@code i} does not change the index values for tokens
- * {@code i}+1..n-1.
- *
- *
- * Because operations never actually alter the buffer, you may always get the
- * original token stream back without undoing anything. Since the instructions
- * are queued up, you can easily simulate transactions and roll back any changes
- * if there is an error just by removing instructions. For example,
- *
- *
- * CharStream input = new ANTLRFileStream("input");
- * TLexer lex = new TLexer(input);
- * CommonTokenStream tokens = new CommonTokenStream(lex);
- * T parser = new T(tokens);
- * TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
- * parser.startRule();
- *
- *
- *
- * Then in the rules, you can execute (assuming rewriter is visible):
- *
- *
- * Token t,u;
- * ...
- * rewriter.insertAfter(t, "text to put after t");}
- * rewriter.insertAfter(u, "text after u");}
- * System.out.println(rewriter.getText());
- *
- *
- *
- * You can also have multiple "instruction streams" and get multiple rewrites
- * from a single pass over the input. Just name the instruction streams and use
- * that name again when printing the buffer. This could be useful for generating
- * a C file and also its header file--all from the same buffer:
- *
- *
- * rewriter.insertAfter("pass1", t, "text to put after t");}
- * rewriter.insertAfter("pass2", u, "text after u");}
- * System.out.println(rewriter.getText("pass1"));
- * System.out.println(rewriter.getText("pass2"));
- *
- *
- *
- * If you don't use named rewrite streams, a "default" stream is used as the
- * first example shows.
- */
+///
+/// Useful for rewriting out a buffered input token stream after doing some
+/// augmentation or other manipulations on it.
+///
+///
+/// You can insert stuff, replace, and delete chunks. Note that the operations
+/// are done lazily--only if you convert the buffer to a _String_ with
+/// _org.antlr.v4.runtime.TokenStream#getText()_. This is very efficient because you are not
+/// moving data around all the time. As the buffer of tokens is converted to
+/// strings, the _#getText()_ method(s) scan the input token stream and
+/// check to see if there is an operation at the current index. If so, the
+/// operation is done and then normal _String_ rendering continues on the
+/// buffer. This is like having multiple Turing machine instruction streams
+/// (programs) operating on a single input tape. :)
+///
+///
+/// This rewriter makes no modifications to the token stream. It does not ask the
+/// stream to fill itself up nor does it advance the input cursor. The token
+/// stream _org.antlr.v4.runtime.TokenStream#index()_ will return the same value before and
+/// after any _#getText()_ call.
+///
+///
+/// The rewriter only works on tokens that you have in the buffer and ignores the
+/// current input cursor. If you are buffering tokens on-demand, calling
+/// _#getText()_ halfway through the input will only do rewrites for those
+/// tokens in the first half of the file.
+///
+///
+/// Since the operations are done lazily at _#getText_-time, operations do
+/// not screw up the token index values. That is, an insert operation at token
+/// index `i` does not change the index values for tokens
+/// `i`+1..n-1.
+///
+///
+/// Because operations never actually alter the buffer, you may always get the
+/// original token stream back without undoing anything. Since the instructions
+/// are queued up, you can easily simulate transactions and roll back any changes
+/// if there is an error just by removing instructions. For example,
+///
+///
+/// CharStream input = new ANTLRFileStream("input");
+/// TLexer lex = new TLexer(input);
+/// CommonTokenStream tokens = new CommonTokenStream(lex);
+/// T parser = new T(tokens);
+/// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+/// parser.startRule();
+///
+///
+///
+/// Then in the rules, you can execute (assuming rewriter is visible):
+///
+///
+/// Token t,u;
+/// ...
+/// rewriter.insertAfter(t, "text to put after t");}
+/// rewriter.insertAfter(u, "text after u");}
+/// System.out.println(rewriter.getText());
+///
+///
+///
+/// You can also have multiple "instruction streams" and get multiple rewrites
+/// from a single pass over the input. Just name the instruction streams and use
+/// that name again when printing the buffer. This could be useful for generating
+/// a C file and also its header file--all from the same buffer:
+///
+///
+/// rewriter.insertAfter("pass1", t, "text to put after t");}
+/// rewriter.insertAfter("pass2", u, "text after u");}
+/// System.out.println(rewriter.getText("pass1"));
+/// System.out.println(rewriter.getText("pass2"));
+///
+///
+///
+/// If you don't use named rewrite streams, a "default" stream is used as the
+/// first example shows.
+///
import Foundation
@@ -90,9 +90,9 @@ public class TokenStreamRewriter {
// Define the rewrite operation hierarchy
public class RewriteOperation: CustomStringConvertible {
- /** What index into rewrites List are we? */
+ /// What index into rewrites List are we?
internal var instructionIndex: Int = 0
- /** Token buffer index. */
+ /// Token buffer index.
internal var index: Int
internal var text: String?
internal var lastIndex: Int = 0
@@ -109,9 +109,9 @@ public class TokenStreamRewriter {
self.tokens = tokens
}
- /** Execute the rewrite operation by possibly adding to the buffer.
- * Return the index of the next token to operate on.
- */
+ /// Execute the rewrite operation by possibly adding to the buffer.
+ /// Return the index of the next token to operate on.
+ ///
public func execute(_ buf: StringBuilder) throws -> Int {
return index
}
@@ -144,9 +144,9 @@ public class TokenStreamRewriter {
}
}
- /** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
- * instructions.
- */
+ /// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+ /// instructions.
+ ///
public class ReplaceOp: RewriteOperation {
@@ -190,55 +190,55 @@ public class TokenStreamRewriter {
final var isEmpty: Bool{
return rewrites.isEmpty
}
- /** We need to combine operations and report invalid operations (like
- * overlapping replaces that are not completed nested). Inserts to
- * same index need to be combined etc... Here are the cases:
- *
- * I.i.u I.j.v leave alone, nonoverlapping
- * I.i.u I.i.v combine: Iivu
- *
- * R.i-j.u R.x-y.v | i-j in x-y delete first R
- * R.i-j.u R.i-j.v delete first R
- * R.i-j.u R.x-y.v | x-y in i-j ERROR
- * R.i-j.u R.x-y.v | boundaries overlap ERROR
- *
- * Delete special case of replace (text==null):
- * D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- *
- * I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
- * we're not deleting i)
- * I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
- * R.x-y.v I.i.u | i in x-y ERROR
- * R.x-y.v I.x.u R.x-y.uv (combine, delete I)
- * R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
- *
- * I.i.u = insert u before op @ index i
- * R.x-y.u = replace x-y indexed tokens with u
- *
- * First we need to examine replaces. For any replace op:
- *
- * 1. wipe out any insertions before op within that range.
- * 2. Drop any replace op before that is contained completely within
- * that range.
- * 3. Throw exception upon boundary overlap with any previous replace.
- *
- * Then we can deal with inserts:
- *
- * 1. for any inserts to same index, combine even if not adjacent.
- * 2. for any prior replace with same left boundary, combine this
- * insert with replace and delete this replace.
- * 3. throw exception if index in same range as previous replace
- *
- * Don't actually delete; make op null in list. Easier to walk list.
- * Later we can throw as we add to index → op map.
- *
- * Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
- * inserted stuff would be before the replace range. But, if you
- * add tokens in front of a method body '{' and then delete the method
- * body, I think the stuff before the '{' you added should disappear too.
- *
- * Return a map from token index to operation.
- */
+ /// We need to combine operations and report invalid operations (like
+ /// overlapping replaces that are not completed nested). Inserts to
+ /// same index need to be combined etc... Here are the cases:
+ ///
+ /// I.i.u I.j.v leave alone, nonoverlapping
+ /// I.i.u I.i.v combine: Iivu
+ ///
+ /// R.i-j.u R.x-y.v | i-j in x-y delete first R
+ /// R.i-j.u R.i-j.v delete first R
+ /// R.i-j.u R.x-y.v | x-y in i-j ERROR
+ /// R.i-j.u R.x-y.v | boundaries overlap ERROR
+ ///
+ /// Delete special case of replace (text==null):
+ /// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ ///
+ /// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+ /// we're not deleting i)
+ /// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+ /// R.x-y.v I.i.u | i in x-y ERROR
+ /// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+ /// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+ ///
+ /// I.i.u = insert u before op @ index i
+ /// R.x-y.u = replace x-y indexed tokens with u
+ ///
+ /// First we need to examine replaces. For any replace op:
+ ///
+ /// 1. wipe out any insertions before op within that range.
+ /// 2. Drop any replace op before that is contained completely within
+ /// that range.
+ /// 3. Throw exception upon boundary overlap with any previous replace.
+ ///
+ /// Then we can deal with inserts:
+ ///
+ /// 1. for any inserts to same index, combine even if not adjacent.
+ /// 2. for any prior replace with same left boundary, combine this
+ /// insert with replace and delete this replace.
+ /// 3. throw exception if index in same range as previous replace
+ ///
+ /// Don't actually delete; make op null in list. Easier to walk list.
+ /// Later we can throw as we add to index → op map.
+ ///
+ /// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+ /// inserted stuff would be before the replace range. But, if you
+ /// add tokens in front of a method body '{' and then delete the method
+ /// body, I think the stuff before the '{' you added should disappear too.
+ ///
+ /// Return a map from token index to operation.
+ ///
final func reduceToSingleOperationPerIndex() throws -> Dictionary {
let rewritesCount = rewrites.count
@@ -361,7 +361,7 @@ public class TokenStreamRewriter {
return x + y
}
- /** Get all operations before an index of a particular kind */
+ /// Get all operations before an index of a particular kind
final func getKindOfOps(_ rewrites: inout [RewriteOperation?], _ kind: T.Type, _ before: Int ) -> [Int] {
@@ -378,16 +378,16 @@ public class TokenStreamRewriter {
}
- /** Our source stream */
+ /// Our source stream
internal var tokens: TokenStream
- /** You may have multiple, named streams of rewrite operations.
- * I'm calling these things "programs."
- * Maps String (name) → rewrite (List)
- */
+ /// You may have multiple, named streams of rewrite operations.
+ /// I'm calling these things "programs."
+ /// Maps String (name) → rewrite (List)
+ ///
internal var programs: Dictionary //Array
- /** Map String (program name) → Integer index */
+ /// Map String (program name) → Integer index
internal final var lastRewriteTokenIndexes: Dictionary
public init(_ tokens: TokenStream) {
@@ -405,10 +405,10 @@ public class TokenStreamRewriter {
rollback(DEFAULT_PROGRAM_NAME, instructionIndex)
}
- /** Rollback the instruction stream for a program so that
- * the indicated instruction (via instructionIndex) is no
- * longer in the stream. UNTESTED!
- */
+ /// Rollback the instruction stream for a program so that
+ /// the indicated instruction (via instructionIndex) is no
+ /// longer in the stream. UNTESTED!
+ ///
public func rollback(_ programName: String, _ instructionIndex: Int) {
if let program = programs[programName] {
program.rollback(instructionIndex)
@@ -419,7 +419,7 @@ public class TokenStreamRewriter {
deleteProgram(DEFAULT_PROGRAM_NAME)
}
- /** Reset the program so that no instructions exist */
+ /// Reset the program so that no instructions exist
public func deleteProgram(_ programName: String) {
rollback(programName, TokenStreamRewriter.MIN_TOKEN_INDEX)
}
@@ -551,29 +551,29 @@ public class TokenStreamRewriter {
return program
}
- /** Return the text from the original tokens altered per the
- * instructions given to this rewriter.
- */
+ /// Return the text from the original tokens altered per the
+ /// instructions given to this rewriter.
+ ///
public func getText() throws -> String {
return try getText(DEFAULT_PROGRAM_NAME, Interval.of(0, tokens.size() - 1))
}
- /** Return the text from the original tokens altered per the
- * instructions given to this rewriter in programName.
- */
+ /// Return the text from the original tokens altered per the
+ /// instructions given to this rewriter in programName.
+ ///
public func getText(_ programName: String) throws -> String {
return try getText(programName, Interval.of(0, tokens.size() - 1))
}
- /** Return the text associated with the tokens in the interval from the
- * original token stream but with the alterations given to this rewriter.
- * The interval refers to the indexes in the original token stream.
- * We do not alter the token stream in any way, so the indexes
- * and intervals are still consistent. Includes any operations done
- * to the first and last token in the interval. So, if you did an
- * insertBefore on the first token, you would get that insertion.
- * The same is true if you do an insertAfter the stop token.
- */
+ /// Return the text associated with the tokens in the interval from the
+ /// original token stream but with the alterations given to this rewriter.
+ /// The interval refers to the indexes in the original token stream.
+ /// We do not alter the token stream in any way, so the indexes
+ /// and intervals are still consistent. Includes any operations done
+ /// to the first and last token in the interval. So, if you did an
+ /// insertBefore on the first token, you would get that insertion.
+ /// The same is true if you do an insertAfter the stop token.
+ ///
public func getText(_ interval: Interval) throws -> String {
return try getText(DEFAULT_PROGRAM_NAME, interval)
}
diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift
new file mode 100644
index 000000000..900a62d32
--- /dev/null
+++ b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+ * Use of this file is governed by the BSD 3-clause license that
+ * can be found in the LICENSE.txt file in the project root.
+ */
+
+import Foundation
+
+
+/** Do not buffer up the entire char stream. It does keep a small buffer
+ * for efficiency and also buffers while a mark exists (set by the
+ * lookahead prediction in parser). "Unbuffered" here refers to fact
+ * that it doesn't buffer all data, not that's it's on demand loading of char.
+ *
+ * Before 4.7, this class used the default environment encoding to convert
+ * bytes to UTF-16, and held the UTF-16 bytes in the buffer as chars.
+ *
+ * As of 4.7, the class uses UTF-8 by default, and the buffer holds Unicode
+ * code points in the buffer as ints.
+ */
+open class UnbufferedCharStream: CharStream {
+ /**
+ * A moving window buffer of the data being scanned. While there's a marker,
+ * we keep adding to buffer. Otherwise, {@link #consume consume()} resets so
+ * we start filling at index 0 again.
+ */
+ internal var data: [Int]
+
+ /**
+ * The number of characters currently in {@link #data data}.
+ *
+ * This is not the buffer capacity, that's {@code data.length}.
+ */
+ internal var n = 0
+
+ /**
+ * 0..n-1 index into {@link #data data} of next character.
+ *
+ * The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are
+ * out of buffered characters.
+ */
+ internal var p = 0
+
+ /**
+ * Count up with {@link #mark mark()} and down with
+ * {@link #release release()}. When we {@code release()} the last mark,
+ * {@code numMarkers} reaches 0 and we reset the buffer. Copy
+ * {@code data[p]..data[n-1]} to {@code data[0]..data[(n-1)-p]}.
+ */
+ internal var numMarkers = 0
+
+ /**
+ * This is the {@code LA(-1)} character for the current position.
+ */
+ internal var lastChar = -1
+
+ /**
+ * When {@code numMarkers > 0}, this is the {@code LA(-1)} character for the
+ * first character in {@link #data data}. Otherwise, this is unspecified.
+ */
+ internal var lastCharBufferStart = 0
+
+ /**
+ * Absolute character index. It's the index of the character about to be
+ * read via {@code LA(1)}. Goes from 0 to the number of characters in the
+ * entire stream, although the stream size is unknown before the end is
+ * reached.
+ */
+ internal var currentCharIndex = 0
+
+ internal let input: InputStream
+ private var unicodeIterator: UnicodeScalarStreamIterator
+
+
+ /** The name or source of this char stream. */
+ public var name: String = ""
+
+ public init(_ input: InputStream, _ bufferSize: Int = 256) {
+ self.input = input
+ self.data = [Int](repeating: 0, count: bufferSize)
+ let si = UInt8StreamIterator(input)
+ self.unicodeIterator = UnicodeScalarStreamIterator(si)
+ }
+
+ public func consume() throws {
+ if try LA(1) == CommonToken.EOF {
+ throw ANTLRError.illegalState(msg: "cannot consume EOF")
+ }
+
+ // buf always has at least data[p==0] in this method due to ctor
+ lastChar = data[p] // track last char for LA(-1)
+
+ if p == n - 1 && numMarkers == 0 {
+ n = 0
+ p = -1 // p++ will leave this at 0
+ lastCharBufferStart = lastChar
+ }
+
+ p += 1
+ currentCharIndex += 1
+ sync(1)
+ }
+
+ /**
+ * Make sure we have 'need' elements from current position {@link #p p}.
+ * Last valid {@code p} index is {@code data.length-1}. {@code p+need-1} is
+ * the char index 'need' elements ahead. If we need 1 element,
+ * {@code (p+1-1)==p} must be less than {@code data.length}.
+ */
+ internal func sync(_ want: Int) {
+ let need = (p + want - 1) - n + 1 // how many more elements we need?
+ if need > 0 {
+ fill(need)
+ }
+ }
+
+ /**
+ * Add {@code n} characters to the buffer. Returns the number of characters
+ * actually added to the buffer. If the return value is less than {@code n},
+ * then EOF was reached before {@code n} characters could be added.
+ */
+ @discardableResult internal func fill(_ toAdd: Int) -> Int {
+ for i in 0 ..< toAdd {
+ if n > 0 && data[n - 1] == CommonToken.EOF {
+ return i
+ }
+
+ guard let c = nextChar() else {
+ return i
+ }
+ add(c)
+ }
+
+ return n
+ }
+
+ /**
+ * Override to provide different source of characters than
+ * {@link #input input}.
+ */
+ internal func nextChar() -> Int? {
+ if let next = unicodeIterator.next() {
+ return Int(next.value)
+ }
+ else if unicodeIterator.hasErrorOccurred {
+ return nil
+ }
+ else {
+ return nil
+ }
+ }
+
+ internal func add(_ c: Int) {
+ if n >= data.count {
+ data += [Int](repeating: 0, count: data.count)
+ }
+ data[n] = c
+ n += 1
+ }
+
+ public func LA(_ i: Int) throws -> Int {
+ let result = try LA_(i)
+ print("LA(\(i)) -> \(result)")
+ return result
+ }
+
+ private func LA_(_ i: Int) throws -> Int {
+
+ if i == -1 {
+ return lastChar // special case
+ }
+ sync(i)
+ let index = p + i - 1
+ if index < 0 {
+ throw ANTLRError.indexOutOfBounds(msg: "")
+ }
+ if index >= n {
+ return CommonToken.EOF
+ }
+ return data[index]
+ }
+
+ /**
+ * Return a marker that we can release later.
+ *
+ * The specific marker value used for this class allows for some level of
+ * protection against misuse where {@code seek()} is called on a mark or
+ * {@code release()} is called in the wrong order.
+ */
+ public func mark() -> Int {
+ if numMarkers == 0 {
+ lastCharBufferStart = lastChar
+ }
+
+ let mark = -numMarkers - 1
+ numMarkers += 1
+ return mark
+ }
+
+ /** Decrement number of markers, resetting buffer if we hit 0.
+ * @param marker
+ */
+ public func release(_ marker: Int) throws {
+ let expectedMark = -numMarkers
+ if marker != expectedMark {
+ preconditionFailure("release() called with an invalid marker.")
+ }
+
+ numMarkers -= 1
+ if numMarkers == 0 && p > 0 {
+ // release buffer when we can, but don't do unnecessary work
+
+ // Copy data[p]..data[n-1] to data[0]..data[(n-1)-p], reset ptrs
+ // p is last valid char; move nothing if p==n as we have no valid char
+ let dataCapacity = data.capacity
+ data = Array(data[p ..< n])
+ data += [Int](repeating: 0, count: dataCapacity - (n - p))
+ precondition(data.capacity == dataCapacity)
+ n = n - p
+ p = 0
+ lastCharBufferStart = lastChar
+ }
+ }
+
+ public func index() -> Int {
+ return currentCharIndex
+ }
+
+ /** Seek to absolute character index, which might not be in the current
+ * sliding window. Move {@code p} to {@code index-bufferStartIndex}.
+ */
+ public func seek(_ index_: Int) throws {
+ var index = index_
+
+ if index == currentCharIndex {
+ return
+ }
+
+ if index > currentCharIndex {
+ sync(index - currentCharIndex)
+ index = min(index, getBufferStartIndex() + n - 1)
+ }
+
+ // index == to bufferStartIndex should set p to 0
+ let i = index - getBufferStartIndex()
+ if i < 0 {
+ throw ANTLRError.illegalArgument(msg: "cannot seek to negative index \(index)")
+ }
+ else if i >= n {
+ let si = getBufferStartIndex()
+ let ei = si + n
+ let msg = "seek to index outside buffer: \(index) not in \(si)..\(ei)"
+ throw ANTLRError.unsupportedOperation(msg: msg)
+ }
+
+ p = i
+ currentCharIndex = index
+ if p == 0 {
+ lastChar = lastCharBufferStart
+ }
+ else {
+ lastChar = data[p - 1]
+ }
+ }
+
+ public func size() -> Int {
+ preconditionFailure("Unbuffered stream cannot know its size")
+ }
+
+ public func getSourceName() -> String {
+ return name
+ }
+
+ public func getText(_ interval: Interval) throws -> String {
+ if interval.a < 0 || interval.b < interval.a - 1 {
+ throw ANTLRError.illegalArgument(msg: "invalid interval")
+ }
+
+ let bufferStartIndex = getBufferStartIndex()
+ if n > 0 &&
+ data[n - 1] == CommonToken.EOF &&
+ interval.a + interval.length() > bufferStartIndex + n {
+ throw ANTLRError.illegalArgument(msg: "the interval extends past the end of the stream")
+ }
+
+ if interval.a < bufferStartIndex || interval.b >= bufferStartIndex + n {
+ let msg = "interval \(interval) outside buffer: \(bufferStartIndex)...\(bufferStartIndex + n - 1)"
+ throw ANTLRError.unsupportedOperation(msg: msg)
+ }
+
+ if interval.b < interval.a {
+ // The EOF token.
+ return ""
+ }
+
+ // convert from absolute to local index
+ let i = interval.a - bufferStartIndex
+ let j = interval.b - bufferStartIndex
+
+ // Convert from Int codepoints to a String.
+ let codepoints = data[i ... j].map { Character(Unicode.Scalar($0)!) }
+ return String(codepoints)
+ }
+
+ internal func getBufferStartIndex() -> Int {
+ return currentCharIndex - p
+ }
+}
+
+
+fileprivate struct UInt8StreamIterator: IteratorProtocol {
+ private static let bufferSize = 1024
+
+ private let stream: InputStream
+ private var buffer = [UInt8](repeating: 0, count: UInt8StreamIterator.bufferSize)
+ private var buffGen: IndexingIterator>
+
+ var hasErrorOccurred = false
+
+
+ init(_ stream: InputStream) {
+ self.stream = stream
+ self.buffGen = buffer[0..<0].makeIterator()
+ }
+
+ mutating func next() -> UInt8? {
+ if let result = buffGen.next() {
+ return result
+ }
+
+ if hasErrorOccurred {
+ return nil
+ }
+
+ switch stream.streamStatus {
+ case .notOpen, .writing, .closed:
+ preconditionFailure()
+ case .atEnd:
+ return nil
+ case .error:
+ hasErrorOccurred = true
+ return nil
+ case .opening, .open, .reading:
+ break
+ }
+
+ let count = stream.read(&buffer, maxLength: buffer.capacity)
+ if count <= 0 {
+ hasErrorOccurred = true
+ return nil
+ }
+
+ buffGen = buffer.prefix(count).makeIterator()
+ return buffGen.next()
+ }
+}
+
+
+fileprivate struct UnicodeScalarStreamIterator: IteratorProtocol {
+ private var streamIterator: UInt8StreamIterator
+ private var codec = Unicode.UTF8()
+
+ var hasErrorOccurred = false
+
+ init(_ streamIterator: UInt8StreamIterator) {
+ self.streamIterator = streamIterator
+ }
+
+ mutating func next() -> Unicode.Scalar? {
+ if streamIterator.hasErrorOccurred {
+ hasErrorOccurred = true
+ return nil
+ }
+
+ switch codec.decode(&streamIterator) {
+ case .scalarValue(let scalar):
+ return scalar
+ case .emptyInput:
+ return nil
+ case .error:
+ hasErrorOccurred = true
+ return nil
+ }
+ }
+}
diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift
index 75b7008b2..da1c8476a 100644
--- a/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift
+++ b/runtime/Swift/Sources/Antlr4/UnbufferedTokenStream.swift
@@ -4,58 +4,58 @@
*/
-public class UnbufferedTokenStream: TokenStream {
+public class UnbufferedTokenStream: TokenStream {
internal var tokenSource: TokenSource
- /**
- * A moving window buffer of the data being scanned. While there's a marker,
- * we keep adding to buffer. Otherwise, {@link #consume consume()} resets so
- * we start filling at index 0 again.
- */
+ ///
+ /// A moving window buffer of the data being scanned. While there's a marker,
+ /// we keep adding to buffer. Otherwise, _#consume consume()_ resets so
+ /// we start filling at index 0 again.
+ ///
internal var tokens: [Token]
- /**
- * The number of tokens currently in {@link #tokens tokens}.
- *
- * This is not the buffer capacity, that's {@code tokens.length}.
- */
+ ///
+ /// The number of tokens currently in _#tokens tokens_.
+ ///
+ /// This is not the buffer capacity, that's `tokens.length`.
+ ///
internal var n: Int
- /**
- * 0..n-1 index into {@link #tokens tokens} of next token.
- *
- * The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are
- * out of buffered tokens.
- */
+ ///
+ /// 0..n-1 index into _#tokens tokens_ of next token.
+ ///
+ /// The `LT(1)` token is `tokens[p]`. If `p == n`, we are
+ /// out of buffered tokens.
+ ///
internal var p: Int = 0
- /**
- * Count up with {@link #mark mark()} and down with
- * {@link #release release()}. When we {@code release()} the last mark,
- * {@code numMarkers} reaches 0 and we reset the buffer. Copy
- * {@code tokens[p]..tokens[n-1]} to {@code tokens[0]..tokens[(n-1)-p]}.
- */
+ ///
+ /// Count up with _#mark mark()_ and down with
+ /// _#release release()_. When we `release()` the last mark,
+ /// `numMarkers` reaches 0 and we reset the buffer. Copy
+ /// `tokens[p]..tokens[n-1]` to `tokens[0]..tokens[(n-1)-p]`.
+ ///
internal var numMarkers: Int = 0
- /**
- * This is the {@code LT(-1)} token for the current position.
- */
+ ///
+ /// This is the `LT(-1)` token for the current position.
+ ///
internal var lastToken: Token!
- /**
- * When {@code numMarkers > 0}, this is the {@code LT(-1)} token for the
- * first token in {@link #tokens}. Otherwise, this is {@code null}.
- */
+ ///
+ /// When `numMarkers > 0`, this is the `LT(-1)` token for the
+ /// first token in _#tokens_. Otherwise, this is `null`.
+ ///
internal var lastTokenBufferStart: Token!
- /**
- * Absolute token index. It's the index of the token about to be read via
- * {@code LT(1)}. Goes from 0 to the number of tokens in the entire stream,
- * although the stream size is unknown before the end is reached.
- *
- * This value is used to set the token indexes if the stream provides tokens
- * that implement {@link org.antlr.v4.runtime.WritableToken}.
- */
+ ///
+ /// Absolute token index. It's the index of the token about to be read via
+ /// `LT(1)`. Goes from 0 to the number of tokens in the entire stream,
+ /// although the stream size is unknown before the end is reached.
+ ///
+ /// This value is used to set the token indexes if the stream provides tokens
+ /// that implement _org.antlr.v4.runtime.WritableToken_.
+ ///
internal var currentTokenIndex: Int = 0
public convenience init(_ tokenSource: TokenSource) throws {
@@ -148,10 +148,10 @@ public class UnbufferedTokenStream: TokenStream {
try sync(1)
}
- /** Make sure we have 'need' elements from current position {@link #p p}. Last valid
- * {@code p} index is {@code tokens.length-1}. {@code p+need-1} is the tokens index 'need' elements
- * ahead. If we need 1 element, {@code (p+1-1)==p} must be less than {@code tokens.length}.
- */
+ /// Make sure we have 'need' elements from current position _#p p_. Last valid
+ /// `p` index is `tokens.length-1`. `p+need-1` is the tokens index 'need' elements
+ /// ahead. If we need 1 element, `(p+1-1)==p` must be less than `tokens.length`.
+ ///
internal func sync(_ want: Int) throws {
let need: Int = (p + want - 1) - n + 1 // how many more elements we need?
if need > 0 {
@@ -159,11 +159,11 @@ public class UnbufferedTokenStream: TokenStream {
}
}
- /**
- * Add {@code n} elements to the buffer. Returns the number of tokens
- * actually added to the buffer. If the return value is less than {@code n},
- * then EOF was reached before {@code n} tokens could be added.
- */
+ ///
+ /// Add `n` elements to the buffer. Returns the number of tokens
+ /// actually added to the buffer. If the return value is less than `n`,
+ /// then EOF was reached before `n` tokens could be added.
+ ///
@discardableResult
internal func fill(_ n: Int) throws -> Int {
for i in 0..: TokenStream {
n += 1
}
- /**
- * Return a marker that we can release later.
- *
- * The specific marker value used for this class allows for some level of
- * protection against misuse where {@code seek()} is called on a mark or
- * {@code release()} is called in the wrong order.
- */
+ ///
+ /// Return a marker that we can release later.
+ ///
+ /// The specific marker value used for this class allows for some level of
+ /// protection against misuse where `seek()` is called on a mark or
+ /// `release()` is called in the wrong order.
+ ///
public func mark() -> Int {
if numMarkers == 0 {
@@ -274,10 +274,7 @@ public class UnbufferedTokenStream: TokenStream {
public func size() -> Int {
-
- RuntimeException("Unbuffered stream cannot know its size")
- fatalError()
-
+ fatalError("Unbuffered stream cannot know its size")
}
diff --git a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift
index 49abcb0ee..5b63ff4d4 100644
--- a/runtime/Swift/Sources/Antlr4/VocabularySingle.swift
+++ b/runtime/Swift/Sources/Antlr4/VocabularySingle.swift
@@ -4,24 +4,24 @@
*/
-/**
-* This class provides a default implementation of the {@link org.antlr.v4.runtime.Vocabulary}
-* interface.
-*
-* @author Sam Harwell
-*/
+///
+/// This class provides a default implementation of the _org.antlr.v4.runtime.Vocabulary_
+/// interface.
+///
+/// - Author: Sam Harwell
+///
public class Vocabulary: Hashable {
private static let EMPTY_NAMES: [String?] = [String?](repeating: "", count: 1)
- /**
- * Gets an empty {@link org.antlr.v4.runtime.Vocabulary} instance.
- *
- *
- * No literal or symbol names are assigned to token types, so
- * {@link #getDisplayName(int)} returns the numeric value for all tokens
- * except {@link org.antlr.v4.runtime.Token#EOF}.
- */
+ ///
+ /// Gets an empty _org.antlr.v4.runtime.Vocabulary_ instance.
+ ///
+ ///
+ /// No literal or symbol names are assigned to token types, so
+ /// _#getDisplayName(int)_ returns the numeric value for all tokens
+ /// except _org.antlr.v4.runtime.Token#EOF_.
+ ///
public static let EMPTY_VOCABULARY: Vocabulary = Vocabulary(EMPTY_NAMES, EMPTY_NAMES, EMPTY_NAMES)
@@ -31,59 +31,59 @@ public class Vocabulary: Hashable {
private final var displayNames: [String?]
- /**
- * Constructs a new instance of {@link org.antlr.v4.runtime.Vocabulary} from the specified
- * literal and symbolic token names.
- *
- * @param literalNames The literal names assigned to tokens, or {@code null}
- * if no literal names are assigned.
- * @param symbolicNames The symbolic names assigned to tokens, or
- * {@code null} if no symbolic names are assigned.
- *
- * @see #getLiteralName(int)
- * @see #getSymbolicName(int)
- */
+ ///
+ /// Constructs a new instance of _org.antlr.v4.runtime.Vocabulary_ from the specified
+ /// literal and symbolic token names.
+ ///
+ /// - SeeAlso: #getLiteralName(int)
+ /// - SeeAlso: #getSymbolicName(int)
+ /// - Parameter literalNames: The literal names assigned to tokens, or `null`
+ /// if no literal names are assigned.
+ /// - Parameter symbolicNames: The symbolic names assigned to tokens, or
+ /// `null` if no symbolic names are assigned.
+ ///
+ ///
public convenience init(_ literalNames: [String?], _ symbolicNames: [String?]) {
self.init(literalNames, symbolicNames, nil)
}
- /**
- * Constructs a new instance of {@link org.antlr.v4.runtime.Vocabulary} from the specified
- * literal, symbolic, and display token names.
- *
- * @param literalNames The literal names assigned to tokens, or {@code null}
- * if no literal names are assigned.
- * @param symbolicNames The symbolic names assigned to tokens, or
- * {@code null} if no symbolic names are assigned.
- * @param displayNames The display names assigned to tokens, or {@code null}
- * to use the values in {@code literalNames} and {@code symbolicNames} as
- * the source of display names, as described in
- * {@link #getDisplayName(int)}.
- *
- * @see #getLiteralName(int)
- * @see #getSymbolicName(int)
- * @see #getDisplayName(int)
- */
+ ///
+ /// Constructs a new instance of _org.antlr.v4.runtime.Vocabulary_ from the specified
+ /// literal, symbolic, and display token names.
+ ///
+ /// - SeeAlso: #getLiteralName(int)
+ /// - SeeAlso: #getSymbolicName(int)
+ /// - SeeAlso: #getDisplayName(int)
+ /// - Parameter literalNames: The literal names assigned to tokens, or `null`
+ /// if no literal names are assigned.
+ /// - Parameter symbolicNames: The symbolic names assigned to tokens, or
+ /// `null` if no symbolic names are assigned.
+ /// - Parameter displayNames: The display names assigned to tokens, or `null`
+ /// to use the values in `literalNames` and `symbolicNames` as
+ /// the source of display names, as described in
+ /// _#getDisplayName(int)_.
+ ///
+ ///
public init(_ literalNames: [String?]?, _ symbolicNames: [String?]?, _ displayNames: [String?]?) {
self.literalNames = literalNames != nil ? literalNames! : Vocabulary.EMPTY_NAMES
self.symbolicNames = symbolicNames != nil ? symbolicNames! : Vocabulary.EMPTY_NAMES
self.displayNames = displayNames != nil ? displayNames! : Vocabulary.EMPTY_NAMES
}
- /**
- * Returns a {@link org.antlr.v4.runtime.Vocabulary} instance from the specified set of token
- * names. This method acts as a compatibility layer for the single
- * {@code tokenNames} array generated by previous releases of ANTLR.
- *
- * The resulting vocabulary instance returns {@code null} for
- * {@link #getLiteralName(int)} and {@link #getSymbolicName(int)}, and the
- * value from {@code tokenNames} for the display names.
- *
- * @param tokenNames The token names, or {@code null} if no token names are
- * available.
- * @return A {@link org.antlr.v4.runtime.Vocabulary} instance which uses {@code tokenNames} for
- * the display names of tokens.
- */
+ ///
+ /// Returns a _org.antlr.v4.runtime.Vocabulary_ instance from the specified set of token
+ /// names. This method acts as a compatibility layer for the single
+ /// `tokenNames` array generated by previous releases of ANTLR.
+ ///
+ /// The resulting vocabulary instance returns `null` for
+ /// _#getLiteralName(int)_ and _#getSymbolicName(int)_, and the
+ /// value from `tokenNames` for the display names.
+ ///
+ /// - Parameter tokenNames: The token names, or `null` if no token names are
+ /// available.
+ /// - Returns: A _org.antlr.v4.runtime.Vocabulary_ instance which uses `tokenNames` for
+ /// the display names of tokens.
+ ///
public static func fromTokenNames(_ tokenNames: [String?]?) -> Vocabulary {
guard let tokenNames = tokenNames , tokenNames.count > 0 else {
return EMPTY_VOCABULARY
diff --git a/runtime/Swift/Sources/Antlr4/atn/ATN.swift b/runtime/Swift/Sources/Antlr4/atn/ATN.swift
index e67343fb6..bdfc938cd 100644
--- a/runtime/Swift/Sources/Antlr4/atn/ATN.swift
+++ b/runtime/Swift/Sources/Antlr4/atn/ATN.swift
@@ -1,6 +1,8 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
public class ATN {
@@ -9,67 +11,87 @@ public class ATN {
public final var states: Array = Array()
+ ///
/// Each subrule/rule is a decision point and we must track them so we
/// can go back later and build DFA predictors for them. This includes
/// all the rules, subrules, optional blocks, ()+, ()* etc...
+ ///
public final var decisionToState: Array = Array()
+ ///
/// Maps from rule index to starting state number.
+ ///
public final var ruleToStartState: [RuleStartState]!
+ ///
/// Maps from rule index to stop state number.
+ ///
public final var ruleToStopState: [RuleStopState]!
public final let modeNameToStartState: Dictionary = Dictionary()
//LinkedHashMap();
+ ///
/// The type of the ATN.
+ ///
public let grammarType: ATNType!
+ ///
/// The maximum value for any symbol recognized by a transition in the ATN.
+ ///
public let maxTokenType: Int
+ ///
/// For lexer ATNs, this maps the rule index to the resulting token type.
/// For parser ATNs, this maps the rule index to the generated bypass token
/// type if the
- /// {@link org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions}
- /// deserialization option was specified; otherwise, this is {@code null}.
+ /// _org.antlr.v4.runtime.atn.ATNDeserializationOptions#isGenerateRuleBypassTransitions_
+ /// deserialization option was specified; otherwise, this is `null`.
+ ///
public final var ruleToTokenType: [Int]!
- /// For lexer ATNs, this is an array of {@link org.antlr.v4.runtime.atn.LexerAction} objects which may
+ ///
+ /// For lexer ATNs, this is an array of _org.antlr.v4.runtime.atn.LexerAction_ objects which may
/// be referenced by action transitions in the ATN.
+ ///
public final var lexerActions: [LexerAction]!
public final var modeToStartState: Array = Array()
+ ///
/// Used for runtime deserialization of ATNs from strings
+ ///
public init(_ grammarType: ATNType, _ maxTokenType: Int) {
self.grammarType = grammarType
self.maxTokenType = maxTokenType
}
- /// Compute the set of valid tokens that can occur starting in state {@code s}.
- /// If {@code ctx} is null, the set of tokens will not include what can follow
- /// the rule surrounding {@code s}. In other words, the set will be
- /// restricted to tokens reachable staying within {@code s}'s rule.
- public func nextTokens(_ s: ATNState, _ ctx: RuleContext?)throws -> IntervalSet {
- let anal: LL1Analyzer = LL1Analyzer(self)
- let next: IntervalSet = try anal.LOOK(s, ctx)
+ ///
+ /// Compute the set of valid tokens that can occur starting in state `s`.
+ /// If `ctx` is null, the set of tokens will not include what can follow
+ /// the rule surrounding `s`. In other words, the set will be
+ /// restricted to tokens reachable staying within `s`'s rule.
+ ///
+ public func nextTokens(_ s: ATNState, _ ctx: RuleContext?) -> IntervalSet {
+ let anal = LL1Analyzer(self)
+ let next = anal.LOOK(s, ctx)
return next
}
- /// Compute the set of valid tokens that can occur starting in {@code s} and
- /// staying in same rule. {@link org.antlr.v4.runtime.Token#EPSILON} is in set if we reach end of
+ ///
+ /// Compute the set of valid tokens that can occur starting in `s` and
+ /// staying in same rule. _org.antlr.v4.runtime.Token#EPSILON_ is in set if we reach end of
/// rule.
- public func nextTokens(_ s: ATNState) throws -> IntervalSet {
+ ///
+ public func nextTokens(_ s: ATNState) -> IntervalSet {
if let nextTokenWithinRule = s.nextTokenWithinRule
{
return nextTokenWithinRule
}
- let intervalSet = try nextTokens(s, nil)
+ let intervalSet = nextTokens(s, nil)
s.nextTokenWithinRule = intervalSet
- try intervalSet.setReadonly(true)
+ try! intervalSet.setReadonly(true)
return intervalSet
}
@@ -104,52 +126,52 @@ public class ATN {
return decisionToState.count
}
+ ///
/// Computes the set of input symbols which could follow ATN state number
- /// {@code stateNumber} in the specified full {@code context}. This method
+ /// `stateNumber` in the specified full `context`. This method
/// considers the complete parser context, but does not evaluate semantic
/// predicates (i.e. all predicates encountered during the calculation are
/// assumed true). If a path in the ATN exists from the starting state to the
- /// {@link org.antlr.v4.runtime.atn.RuleStopState} of the outermost context without matching any
- /// symbols, {@link org.antlr.v4.runtime.Token#EOF} is added to the returned set.
- ///
- /// If {@code context} is {@code null}, it is treated as
- /// {@link org.antlr.v4.runtime.ParserRuleContext#EMPTY}.
- ///
+ /// _org.antlr.v4.runtime.atn.RuleStopState_ of the outermost context without matching any
+ /// symbols, _org.antlr.v4.runtime.Token#EOF_ is added to the returned set.
+ ///
+ /// If `context` is `null`, it is treated as
+ /// _org.antlr.v4.runtime.ParserRuleContext#EMPTY_.
+ ///
/// - parameter stateNumber: the ATN state number
/// - parameter context: the full parse context
/// - returns: The set of potentially valid input symbols which could follow the
/// specified state in the specified context.
- /// - IllegalArgumentException if the ATN does not contain a state with
- /// number {@code stateNumber}
+ /// - throws: _ANTLRError.illegalArgument_ if the ATN does not contain a state with
+ /// number `stateNumber`
+ ///
public func getExpectedTokens(_ stateNumber: Int, _ context: RuleContext) throws -> IntervalSet {
if stateNumber < 0 || stateNumber >= states.count {
throw ANTLRError.illegalArgument(msg: "Invalid state number.")
- /// throw IllegalArgumentException("Invalid state number.");
}
var ctx: RuleContext? = context
- //TODO: s may be nil
- let s: ATNState = states[stateNumber]!
- var following: IntervalSet = try nextTokens(s)
+ let s = states[stateNumber]!
+ var following = nextTokens(s)
if !following.contains(CommonToken.EPSILON) {
return following
}
- let expected: IntervalSet = try IntervalSet()
- try expected.addAll(following)
- try expected.remove(CommonToken.EPSILON)
+ let expected = IntervalSet()
+ try! expected.addAll(following)
+ try! expected.remove(CommonToken.EPSILON)
- while let ctxWrap = ctx , ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) {
- let invokingState: ATNState = states[ctxWrap.invokingState]!
- let rt: RuleTransition = invokingState.transition(0) as! RuleTransition
- following = try nextTokens(rt.followState)
- try expected.addAll(following)
- try expected.remove(CommonToken.EPSILON)
+ while let ctxWrap = ctx, ctxWrap.invokingState >= 0 && following.contains(CommonToken.EPSILON) {
+ let invokingState = states[ctxWrap.invokingState]!
+ let rt = invokingState.transition(0) as! RuleTransition
+ following = nextTokens(rt.followState)
+ try! expected.addAll(following)
+ try! expected.remove(CommonToken.EPSILON)
ctx = ctxWrap.parent
}
if following.contains(CommonToken.EPSILON) {
- try expected.add(CommonToken.EOF)
+ try! expected.add(CommonToken.EOF)
}
return expected
diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift
index 9ee49ecd1..f91854531 100644
--- a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift
+++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift
@@ -1,53 +1,67 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
+///
/// A tuple: (ATN state, predicted alt, syntactic, semantic context).
/// The syntactic context is a graph-structured stack node whose
/// path(s) to the root is the rule invocation(s)
/// chain used to arrive at the state. The semantic context is
/// the tree of semantic predicates encountered before reaching
/// an ATN state.
+///
public class ATNConfig: Hashable, CustomStringConvertible {
+ ///
/// This field stores the bit mask for implementing the
- /// {@link #isPrecedenceFilterSuppressed} property as a bit within the
- /// existing {@link #reachesIntoOuterContext} field.
+ /// _#isPrecedenceFilterSuppressed_ property as a bit within the
+ /// existing _#reachesIntoOuterContext_ field.
+ ///
private final let SUPPRESS_PRECEDENCE_FILTER: Int = 0x40000000
+ ///
/// The ATN state associated with this configuration
+ ///
public final let state: ATNState
+ ///
/// What alt (or lexer rule) is predicted by this configuration
+ ///
public final let alt: Int
+ ///
/// The stack of invoking states leading to the rule/states associated
/// with this config. We track only those contexts pushed during
/// execution of the ATN simulator.
+ ///
public final var context: PredictionContext?
+ ///
/// We cannot execute predicates dependent upon local context unless
/// we know for sure we are in the correct context. Because there is
/// no way to do this efficiently, we simply cannot evaluate
/// dependent predicates unless we are in the rule that initially
/// invokes the ATN simulator.
- ///
- ///
+ ///
+ ///
/// closure() tracks the depth of how far we dip into the outer context:
/// depth > 0. Note that it may not be totally accurate depth since I
- /// don't ever decrement. TODO: make it a boolean then
- ///
- ///
- /// For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
+ /// don't ever decrement. TODO: make it a boolean then
+ ///
+ ///
+ /// For memory efficiency, the _#isPrecedenceFilterSuppressed_ method
/// is also backed by this field. Since the field is publicly accessible, the
/// highest bit which would not cause the value to become negative is used to
/// store this field. This choice minimizes the risk that code which only
/// compares this value to 0 would be affected by the new purpose of the
- /// flag. It also ensures the performance of the existing {@link org.antlr.v4.runtime.atn.ATNConfig}
+ /// flag. It also ensures the performance of the existing _org.antlr.v4.runtime.atn.ATNConfig_
/// constructors as well as certain operations like
- /// {@link org.antlr.v4.runtime.atn.ATNConfigSet#add(org.antlr.v4.runtime.atn.ATNConfig, DoubleKeyMap)} method are
- /// completely unaffected by the change.
+ /// _org.antlr.v4.runtime.atn.ATNConfigSet#add(org.antlr.v4.runtime.atn.ATNConfig, DoubleKeyMap)_ method are
+ /// __completely__ unaffected by the change.
+ ///
public final var reachesIntoOuterContext: Int = 0
//=0 intital by janyou
@@ -108,9 +122,11 @@ public class ATNConfig: Hashable, CustomStringConvertible {
self.reachesIntoOuterContext = c.reachesIntoOuterContext
}
- /// This method gets the value of the {@link #reachesIntoOuterContext} field
+ ///
+ /// This method gets the value of the _#reachesIntoOuterContext_ field
/// as it existed prior to the introduction of the
- /// {@link #isPrecedenceFilterSuppressed} method.
+ /// _#isPrecedenceFilterSuppressed_ method.
+ ///
public final func getOuterContextDepth() -> Int {
return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER
}
@@ -127,18 +143,19 @@ public class ATNConfig: Hashable, CustomStringConvertible {
}
}
+ ///
/// An ATN configuration is equal to another if both have
/// the same state, they predict the same alternative, and
/// syntactic/semantic contexts are the same.
+ ///
public var hashValue: Int {
- var hashCode: Int = MurmurHash.initialize(7)
+ var hashCode = MurmurHash.initialize(7)
hashCode = MurmurHash.update(hashCode, state.stateNumber)
hashCode = MurmurHash.update(hashCode, alt)
hashCode = MurmurHash.update(hashCode, context)
hashCode = MurmurHash.update(hashCode, semanticContext)
- hashCode = MurmurHash.finish(hashCode, 4)
- return hashCode
+ return MurmurHash.finish(hashCode, 4)
}
@@ -149,26 +166,21 @@ public class ATNConfig: Hashable, CustomStringConvertible {
//return "MyClass \(string)"
return toString(nil, true)
}
- public func toString(_ recog: Recognizer?, _ showAlt: Bool) -> String {
+ public func toString(_ recog: Recognizer?, _ showAlt: Bool) -> String {
let buf: StringBuilder = StringBuilder()
-// if ( state.ruleIndex>=0 ) {
-// if ( recog!=null ) buf.append(recog.getRuleNames()[state.ruleIndex]+":");
-// else buf.append(state.ruleIndex+":");
-// }
buf.append("(")
buf.append(state)
if showAlt {
buf.append(",")
buf.append(alt)
}
- //TODO: context can be nil ?
+
if context != nil {
buf.append(",[")
buf.append(context!)
buf.append("]")
}
- //TODO: semanticContext can be nil ?
- //if ( semanticContext != nil && semanticContext != SemanticContext.NONE ) {
+
if semanticContext != SemanticContext.NONE {
buf.append(",")
buf.append(semanticContext)
@@ -186,10 +198,7 @@ public func ==(lhs: ATNConfig, rhs: ATNConfig) -> Bool {
if lhs === rhs {
return true
}
- //TODO : rhs nil?
- /// else { if (other == nil) {
- /// return false;
- /// }
+
if (lhs is LexerATNConfig) && (rhs is LexerATNConfig) {
return (lhs as! LexerATNConfig) == (rhs as! LexerATNConfig)
diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift
index 91643513f..3c52bcd14 100644
--- a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift
+++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift
@@ -1,58 +1,72 @@
+///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
+///
-/// Specialized {@link java.util.Set}{@code <}{@link org.antlr.v4.runtime.atn.ATNConfig}{@code >} that can track
+///
+/// Specialized _java.util.Set_`<`_org.antlr.v4.runtime.atn.ATNConfig_`>` that can track
/// info about the set, with support for combining similar configurations using a
/// graph-structured stack.
-//: Set
-
+///
public class ATNConfigSet: Hashable, CustomStringConvertible {
+ ///
/// The reason that we need this is because we don't want the hash map to use
/// the standard hash code and equals. We need all configurations with the same
- /// {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
+ /// `(s,i,_,semctx)` to be equal. Unfortunately, this key effectively doubles
/// the number of objects associated with ATNConfigs. The other solution is to
/// use a hash table that lets us specify the equals/hashcode operation.
+ ///
- /// Indicates that the set of configurations is read-only. Do not
+ ///
+ /// Indicates that the set of configurations is read-only. Do not
/// allow any code to manipulate the set; DFA states will point at
/// the sets and they must not change. This does not protect the other
/// fields; in particular, conflictingAlts is set after
/// we've made this readonly.
- internal final var readonly: Bool = false
+ ///
+ internal final var readonly = false
+ ///
/// All configs but hashed by (s, i, _, pi) not including context. Wiped out
/// when we go readonly as this set becomes a DFA state.
+ ///
public final var configLookup: LookupDictionary
+ ///
/// Track the elements as they are added to the set; supports get(i)
- public final var configs: Array = Array()
+ ///
+ public final var configs = [ATNConfig]()
// TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
// TODO: can we track conflicts as they are added to save scanning configs later?
- public final var uniqueAlt: Int = 0
+ public final var uniqueAlt = 0
//TODO no default
+ ///
/// Currently this is only used when we detect SLL conflict; this does
/// not necessarily represent the ambiguous alternatives. In fact,
/// I should also point out that this seems to include predicated alternatives
/// that have predicates that evaluate to false. Computed in computeTargetState().
+ ///
internal final var conflictingAlts: BitSet?
// Used in parser and lexer. In lexer, it indicates we hit a pred
// while computing a closure operation. Don't make a DFA state from this.
- public final var hasSemanticContext: Bool = false
+ public final var hasSemanticContext = false
//TODO no default
- public final var dipsIntoOuterContext: Bool = false
+ public final var dipsIntoOuterContext = false
//TODO no default
+ ///
/// Indicates that this configuration set is part of a full context
/// LL prediction. It will be used to determine how to merge $. With SLL
/// it's a wildcard whereas it is not for LL context merge.
+ ///
public final var fullCtx: Bool
- private var cachedHashCode: Int = -1
+ private var cachedHashCode = -1
public init(_ fullCtx: Bool) {
configLookup = LookupDictionary()
@@ -62,9 +76,9 @@ public class ATNConfigSet: Hashable, CustomStringConvertible {
self.init(true)
}
- public convenience init(_ old: ATNConfigSet) throws {
+ public convenience init(_ old: ATNConfigSet) {
self.init(old.fullCtx)
- try addAll(old)
+ try! addAll(old)
self.uniqueAlt = old.uniqueAlt
self.conflictingAlts = old.conflictingAlts
self.hasSemanticContext = old.hasSemanticContext
@@ -78,21 +92,22 @@ public class ATNConfigSet: Hashable, CustomStringConvertible {
return try add(config, &mergeCache)
}
+ ///
/// Adding a new config means merging contexts with existing configs for
- /// {@code (s, i, pi, _)}, where {@code s} is the
- /// {@link org.antlr.v4.runtime.atn.ATNConfig#state}, {@code i} is the {@link org.antlr.v4.runtime.atn.ATNConfig#alt}, and
- /// {@code pi} is the {@link org.antlr.v4.runtime.atn.ATNConfig#semanticContext}. We use
- /// {@code (s,i,pi)} as key.
- ///
- /// This method updates {@link #dipsIntoOuterContext} and
- /// {@link #hasSemanticContext} when necessary.
+ /// `(s, i, pi, _)`, where `s` is the
+ /// _org.antlr.v4.runtime.atn.ATNConfig#state_, `i` is the _org.antlr.v4.runtime.atn.ATNConfig#alt_, and
+ /// `pi` is the _org.antlr.v4.runtime.atn.ATNConfig#semanticContext_. We use
+ /// `(s,i,pi)` as key.
+ ///
+ /// This method updates _#dipsIntoOuterContext_ and
+ /// _#hasSemanticContext_ when necessary.
+ ///
@discardableResult
public final func add(
_ config: ATNConfig,
_ mergeCache: inout DoubleKeyMap?) throws -> Bool {
if readonly {
throw ANTLRError.illegalState(msg: "This set is readonly")
-
}
if config.semanticContext != SemanticContext.NONE {
@@ -109,10 +124,9 @@ public class ATNConfigSet: Hashable, CustomStringConvertible {
return true
}
// a previous (s,i,pi,_), merge with it and save result
- let rootIsWildcard: Bool = !fullCtx
+ let rootIsWildcard = !fullCtx
- let merged: PredictionContext =
- PredictionContext.merge(existing.context!, config.context!, rootIsWildcard, &mergeCache)
+ let merged = PredictionContext.merge(existing.context!, config.context!, rootIsWildcard, &mergeCache)
// no need to check for existing.context, config.context in cache
// since only way to create new graphs is "call rule" and here. We
@@ -135,43 +149,42 @@ public class ATNConfigSet: Hashable, CustomStringConvertible {
}
+ ///
/// Return a List holding list of configs
- public final func elements() -> Array {
+ ///
+ public final func elements() -> [ATNConfig] {
return configs
}
public final func getStates() -> Set {
-
- let length = configs.count
- var states: Set