Merge branch 'master' into coverity_fixes

This commit is contained in:
Terence Parr 2017-10-27 10:44:29 -07:00 committed by GitHub
commit 1230aa08ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
267 changed files with 10957 additions and 8285 deletions

View File

@ -1,5 +1,8 @@
root = true
[*]
tab_width = 4
[*.{java,stg}]
charset = utf-8
insert_final_newline = true

View File

@ -2,30 +2,26 @@ sudo: true
language: java
cache:
directories:
- $HOME/.m2
- $HOME/Library/Caches/Homebrew
stages:
- smoke-test
- main-test
- extended-test
matrix:
include:
- os: linux
compiler: clang
jdk: oraclejdk7
jdk: openjdk7
env:
- TARGET=cpp
- CXX=g++-5
- GROUP=ALL
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- g++-5
- uuid-dev
- clang-3.7
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=cpp
- GROUP=LEXER
stage: main-test
addons:
apt:
sources:
@ -35,106 +31,150 @@ matrix:
- g++-5
- uuid-dev
- clang-3.7
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=cpp
- GROUP=PARSER
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- g++-5
- uuid-dev
- clang-3.7
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=cpp
- GROUP=RECURSION
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- g++-5
- uuid-dev
- clang-3.7
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=swift
- GROUP=LEXER
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=swift
- GROUP=PARSER
- os: osx
compiler: clang
osx_image: xcode8.1
env:
- TARGET=swift
- GROUP=RECURSION
- os: linux
compiler: clang
jdk: openjdk7
env:
- TARGET=cpp
- CXX=g++-5
- GROUP=PARSER
stage: main-test
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- g++-5
- uuid-dev
- clang-3.7
- os: linux
compiler: clang
jdk: openjdk7
env:
- TARGET=cpp
- CXX=g++-5
- GROUP=RECURSION
stage: main-test
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-precise-3.7
packages:
- g++-5
- uuid-dev
- clang-3.7
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=cpp
- GROUP=LEXER
stage: extended-test
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=cpp
- GROUP=PARSER
stage: extended-test
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=cpp
- GROUP=RECURSION
stage: extended-test
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=swift
- GROUP=LEXER
stage: main-test
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=swift
- GROUP=PARSER
stage: main-test
- os: osx
compiler: clang
osx_image: xcode9
env:
- TARGET=swift
- GROUP=RECURSION
stage: main-test
- os: linux
dist: trusty
compiler: clang
env:
- TARGET=swift
- GROUP=ALL
stage: extended-test
- os: osx
osx_image: xcode8.2
osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=LEXER
stage: extended-test
- os: osx
osx_image: xcode8.2
osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=PARSER
stage: extended-test
- os: osx
osx_image: xcode8.2
osx_image: xcode9
env:
- TARGET=dotnet
- GROUP=RECURSION
stage: extended-test
- os: linux
jdk: oraclejdk7
jdk: openjdk7
env: TARGET=java
stage: extended-test
- os: linux
jdk: openjdk8
env: TARGET=java
stage: extended-test
- os: linux
jdk: oraclejdk8
env: TARGET=java
stage: smoke-test
- os: linux
jdk: oraclejdk7
jdk: openjdk7
env: TARGET=csharp
stage: extended-test
- os: linux
jdk: oraclejdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=LEXER
stage: main-test
- os: linux
jdk: oraclejdk8
jdk: openjdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=PARSER
stage: main-test
- os: linux
jdk: oraclejdk8
dist: trusty
env:
- TARGET=dotnet
- GROUP=RECURSION
stage: main-test
- os: linux
jdk: oraclejdk7
jdk: openjdk7
env: TARGET=python2
stage: extended-test
- os: linux
jdk: oraclejdk7
jdk: openjdk7
env: TARGET=python3
addons:
apt:
@ -142,16 +182,20 @@ matrix:
- deadsnakes # source required so it finds the package definition below
packages:
- python3.5
stage: main-test
- os: linux
jdk: oraclejdk7
dist: trusty
jdk: openjdk8
env: TARGET=javascript
stage: main-test
- os: linux
jdk: oraclejdk7
dist: trusty
jdk: openjdk8
env: TARGET=go
stage: main-test
before_install:
- ./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh
- f="./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh"; ! [ -x "$f" ] || "$f"
script:
- cd runtime-testsuite; ../.travis/run-tests-$TARGET.sh
- cd runtime-testsuite; travis_wait 40 ../.travis/run-tests-$TARGET.sh

View File

@ -1,14 +1,12 @@
set -euo pipefail
# make sure we use trusty repositories (travis by default uses precise)
curl https://repogen.simplylinux.ch/txt/trusty/sources_c4aa56bd26c0f54f391d8fae3e687ef5f6e97c26.txt | sudo tee /etc/apt/sources.list
# install dependencies
# some packages below will be update, swift assumes newer versions
# of, for example, sqlite3 and libicu, without the update some
# tools will not work
sudo apt-get update
sudo apt-get install clang libicu-dev libxml2 sqlite3
sudo apt-get install clang-3.6 libxml2
sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 100
# This would fix a know linker issue mentioned in:
# https://bugs.swift.org/browse/SR-2299

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -euo pipefail
thisdir=$(dirname "$0")
brew update
brew install cmake
# Work around apparent rvm bug that is in Travis's Xcode image.
# https://github.com/direnv/direnv/issues/210
# https://github.com/travis-ci/travis-ci/issues/6307
shell_session_update() { :; }

View File

@ -4,9 +4,7 @@ set -euo pipefail
thisdir=$(dirname "$0")
# pre-requisites for dotnet core
brew update
brew install openssl
# OpenSSL setup for dotnet core
mkdir -p /usr/local/lib
ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/
ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/
@ -19,9 +17,3 @@ sudo installer -pkg /tmp/dotnet-dev-osx-x64.1.0.4.pkg -target /
# make the link
ln -s /usr/local/share/dotnet/dotnet /usr/local/bin/
# Work around apparent rvm bug that is in Travis's Xcode image.
# https://github.com/direnv/direnv/issues/210
# https://github.com/travis-ci/travis-ci/issues/6307
shell_session_update() { :; }

View File

@ -1,12 +0,0 @@
#!/bin/bash
set -euo pipefail
thisdir=$(dirname "$0")
brew update
# Work around apparent rvm bug that is in Travis's Xcode image.
# https://github.com/direnv/direnv/issues/210
# https://github.com/travis-ci/travis-ci/issues/6307
shell_session_update() { :; }

View File

@ -4,7 +4,7 @@
# here since environment variables doesn't pass
# across scripts
if [ $TRAVIS_OS_NAME == "linux" ]; then
export SWIFT_VERSION=swift-3.1.1
export SWIFT_VERSION=swift-4.0
export SWIFT_HOME=$(pwd)/swift/$SWIFT_VERSION-RELEASE-ubuntu14.04/usr/bin/
export PATH=$SWIFT_HOME:$PATH

View File

@ -395,7 +395,7 @@ public class Antlr4Mojo extends AbstractMojo {
String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens";
File outputFile = new File(outputDirectory, tokensFileName);
if ( (! outputFile.exists()) ||
outputFile.lastModified() < grammarFile.lastModified() ||
outputFile.lastModified() <= grammarFile.lastModified() ||
dependencies.isDependencyChanged(grammarFile)) {
grammarFilesToProcess.add(grammarFile);
}
@ -412,10 +412,7 @@ public class Antlr4Mojo extends AbstractMojo {
// Iterate each grammar file we were given and add it into the tool's list of
// grammars to process.
for (File grammarFile : grammarFiles) {
if (!buildContext.hasDelta(grammarFile)) {
continue;
}
buildContext.refresh(grammarFile);
buildContext.removeMessages(grammarFile);
getLog().debug("Grammar file '" + grammarFile.getPath() + "' detected.");

View File

@ -216,14 +216,14 @@ class GrammarDependencies {
return;
for (GrammarAST importDecl : grammar.getAllChildrenWithType(ANTLRParser.IMPORT)) {
Tree id = importDecl.getFirstChildWithType(ANTLRParser.ID);
for (Tree id: importDecl.getAllChildrenWithType(ANTLRParser.ID)) {
// missing id is not valid, but we don't want to prevent the root cause from
// being reported by the ANTLR tool
if (id != null) {
String grammarPath = getRelativePath(grammarFile);
// missing id is not valid, but we don't want to prevent the root cause from
// being reported by the ANTLR tool
if (id != null) {
String grammarPath = getRelativePath(grammarFile);
graph.addEdge(id.getText() + ".g4", grammarPath);
graph.addEdge(id.getText() + ".g4", grammarPath);
}
}
}

View File

@ -202,6 +202,7 @@ public class Antlr4MojoTest {
Path genHello = generatedSources.resolve("test/HelloParser.java");
Path baseGrammar = antlrDir.resolve("imports/TestBaseLexer.g4");
Path baseGrammar2 = antlrDir.resolve("imports/TestBaseLexer2.g4");
Path lexerGrammar = antlrDir.resolve("test/TestLexer.g4");
Path parserGrammar = antlrDir.resolve("test/TestParser.g4");
@ -222,21 +223,20 @@ public class Antlr4MojoTest {
assertTrue(Files.exists(genHello));
assertTrue(Files.exists(genTestParser));
assertTrue(Files.exists(genTestLexer));
byte[] origTestLexerSum = checksum(genTestLexer);
byte[] origTestParserSum = checksum(genTestParser);
byte[] origHelloSum = checksum(genHello);
////////////////////////////////////////////////////////////////////////
// 2nd - nothing has been modified, no grammars have to be processed
////////////////////////////////////////////////////////////////////////
{
byte[] testLexerSum = checksum(genTestLexer);
byte[] testParserSum = checksum(genTestParser);
byte[] helloSum = checksum(genHello);
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(testParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(helloSum, checksum(genHello)));
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
////////////////////////////////////////////////////////////////////////
@ -245,50 +245,71 @@ public class Antlr4MojoTest {
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(baseGrammar, "DOT: '.' ;")) {
byte[] testLexerSum = checksum(genTestLexer);
byte[] testParserSum = checksum(genTestParser);
byte[] helloSum = checksum(genHello);
maven.executeMojo(session, project, exec);
assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(helloSum, checksum(genHello)));
assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
// Restore file and confirm it was restored.
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
////////////////////////////////////////////////////////////////////////
// 4th - the lexer grammar changed, the parser grammar has to be processed as well
// 4th - the second imported grammar changed, every dependency has to be processed
////////////////////////////////////////////////////////////////////////
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(lexerGrammar)) {
byte[] testLexerSum = checksum(genTestLexer);
byte[] testParserSum = checksum(genTestParser);
byte[] helloSum = checksum(genHello);
try(Change change = Change.of(baseGrammar2, "BANG: '!' ;")) {
maven.executeMojo(session, project, exec);
assertFalse(Arrays.equals(testLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(helloSum, checksum(genHello)));
assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
// Restore file and confirm it was restored.
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
////////////////////////////////////////////////////////////////////////
// 5th - the parser grammar changed, no other grammars have to be processed
// 5th - the lexer grammar changed, the parser grammar has to be processed as well
////////////////////////////////////////////////////////////////////////
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(lexerGrammar, "FOO: 'foo' ;")) {
maven.executeMojo(session, project, exec);
assertFalse(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
// Restore file and confirm it was restored.
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
////////////////////////////////////////////////////////////////////////
// 6th - the parser grammar changed, no other grammars have to be processed
////////////////////////////////////////////////////////////////////////
// modify the grammar to make checksum comparison detect a change
try(Change change = Change.of(parserGrammar, " t : WS* ;")) {
byte[] testLexerSum = checksum(genTestLexer);
byte[] testParserSum = checksum(genTestParser);
byte[] helloSum = checksum(genHello);
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(testLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(testParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(helloSum, checksum(genHello)));
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertFalse(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
// Restore file and confirm it was restored.
maven.executeMojo(session, project, exec);
assertTrue(Arrays.equals(origTestLexerSum, checksum(genTestLexer)));
assertTrue(Arrays.equals(origTestParserSum, checksum(genTestParser)));
assertTrue(Arrays.equals(origHelloSum, checksum(genHello)));
}
@Test

View File

@ -10,7 +10,4 @@ fragment
Whitespace : ' ' | '\n' | '\t' | '\r' ;
fragment
Hexdigit : [a-fA-F0-9] ;
fragment
Digit : [0-9] ;
Hexdigit : [a-fA-F0-9] ;

View File

@ -0,0 +1,4 @@
lexer grammar TestBaseLexer2;
fragment
Digit : [0-9] ;

View File

@ -1,6 +1,6 @@
lexer grammar TestLexer;
import TestBaseLexer;
import TestBaseLexer, TestBaseLexer2;
WS : Whitespace+ -> skip;
TEXT : ~[<&]+ ; // match any 16 bit char other than < and &
TEXT : ~[<&]+ ; // match any 16 bit char other than < and &

View File

@ -1,8 +1,8 @@
version: '4.6-SNAPSHOT+AppVeyor.{build}'
os: Windows Server 2012
version: '4.7.1-SNAPSHOT+AppVeyor.{build}'
build: off
build_script:
- mvn -DskipTests install -q --batch-mode
- mvn -DskipTests install --batch-mode
- msbuild runtime/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed
- msbuild ./runtime-testsuite/target/classes/CSharp/runtime/CSharp/Antlr4.vs2013.sln /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed
test_script:
- mvn install -q -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode
build:
verbosity: minimal
- mvn install -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode

View File

@ -151,4 +151,20 @@ YYYY/MM/DD, github id, Full name, email
2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net
2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com
2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com
2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me
2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in
2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com
2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com
2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com
2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com
2017/08/29, Eddy Reyes, eddy@mindsight.io
2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com
2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com
2017/10/06, bramp, Andrew Brampton, brampton@gmail.com
2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com
2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com
2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de

View File

@ -6,7 +6,7 @@ Hi and welcome to the version 4 release of ANTLR! It's named after the fearless
ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library.
The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.5.3-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3).
The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.7-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3).
If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems.
@ -16,19 +16,21 @@ If you are going to integrate ANTLR into your existing build system using mvn, a
1. Download
```
$ cd /usr/local/lib
$ curl -O http://www.antlr.org/download/antlr-4.5.3-complete.jar
$ curl -O http://www.antlr.org/download/antlr-4.7-complete.jar
```
Or just download in browser from website:
[http://www.antlr.org/download.html](http://www.antlr.org/download.html)
and put it somewhere rational like `/usr/local/lib`.
2. Add `antlr-4.5.3-complete.jar` to your `CLASSPATH`:
2. Add `antlr-4.7-complete.jar` to your `CLASSPATH`:
```
$ export CLASSPATH=".:/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH"
$ export CLASSPATH=".:/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH"
```
It's also a good idea to put this in your `.bash_profile` or whatever your startup script is.
3. Create aliases for the ANTLR Tool, and `TestRig`.
```
$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.5.3-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.7-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
$ alias grun='java org.antlr.v4.gui.TestRig'
```
@ -39,11 +41,11 @@ $ alias grun='java org.antlr.v4.gui.TestRig'
0. Install Java (version 1.6 or higher)
1. Download antlr-4.5.3-complete.jar (or whatever version) from [http://www.antlr.org/download/](http://www.antlr.org/download/)
Save to your directory for 3rd party Java libraries, say `C:\Javalib`
2. Add `antlr-4.5-complete.jar` to CLASSPATH, either:
2. Add `antlr-4.5.3-complete.jar` to CLASSPATH, either:
* Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable
* Temporarily, at command line:
```
SET CLASSPATH=.;C:\Javalib\antlr-4.5.3-complete.jar;%CLASSPATH%
SET CLASSPATH=.;C:\Javalib\antlr-4.7-complete.jar;%CLASSPATH%
```
3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands:
* Batch files (in directory in system PATH) antlr4.bat and grun.bat
@ -65,7 +67,7 @@ Either launch org.antlr.v4.Tool directly:
```
$ java org.antlr.v4.Tool
ANTLR Parser Generator Version 4.5.3
ANTLR Parser Generator Version 4.7
-o ___ specify output directory where all output is generated
-lib ___ specify location of .tokens files
...
@ -74,8 +76,8 @@ ANTLR Parser Generator Version 4.5.3
or use -jar option on java:
```
$ java -jar /usr/local/lib/antlr-4.5.3-complete.jar
ANTLR Parser Generator Version 4.5.3
$ java -jar /usr/local/lib/antlr-4.7-complete.jar
ANTLR Parser Generator Version 4.7
-o ___ specify output directory where all output is generated
-lib ___ specify location of .tokens files
...

Binary file not shown.

After

Width:  |  Height:  |  Size: 379 KiB

View File

@ -1,9 +1,15 @@
# ANTLR4 Language Target, Runtime for Swift
## Performance Note
To use ANTLR4 Swift target in production environment, make sure to turn on compiler optimizations by following [these instructions](https://github.com/apple/swift-package-manager/blob/master/Documentation/Usage.md#build-configurations) if you use SwiftPM to build your project. If you are using Xcode to build your project, it's unlikely you will not use `release` build for production build.
Conclusion is, you need to turn on `release` mode (which will have all the optimization pre configured for you) so the ANTLR4 Swift target can have reasonable parsing speed.
## Install ANTLR4
Make sure you have the ANTLR
installed.[The getting started guide](getting-started.md) should get
installed. [The getting started guide](getting-started.md) should get
you started.
## Create a Swift lexer or parser
@ -18,82 +24,120 @@ For a full list of antlr4 tool options, please visit the
## Build your Swift project with ANTLR runtime
The following instructions are assuming Xcode as the IDE:
### Note
* __Add parser/lexer to project__. Make sure the parsers/lexers
We use __boot.py__ script located at the root of the Swift runtime folder
`antlr4/runtime/Swift` to provide additional support for both Xcode-based
projects and SPM-based projects. Below sections are organized for both of
the flavors. If you want to quickly get started, try:
```
python boot.py --help
```
for information about this script.
### Xcode Projects
Note that even if you are otherwise using ANTLR from a binary distribution,
you should compile the ANTLR Swift runtime from source, because the Swift
language does not yet have a stable ABI.
ANTLR uses Swift Package Manager to generate Xcode project files. Note that
Swift Package Manager does not currently support iOS, watchOS, or tvOS, so
if you wish to use those platforms, you will need to alter the project build
settings manually as appropriate.
#### Download source code for ANTLR
```
git clone https://github.com/antlr/antlr4
```
#### Generate Xcode project for ANTLR runtime
The `boot.py` script includes a wrapper around `swift package
generate-xcodeproj`. Use this to generate `Antlr4.xcodeproj` for the ANTLR
Swift runtime. (using _swift package generate-xcodeproj_ is not recommended)
since the project is dependent on some parser files generated by _boot.py_.
```
cd antlr4/runtime/Swift
python boot.py --gen-xcodeproj
```
#### Import ANTLR Swift runtime into your project
Open your own project in Xcode.
Open Finder in the `runtime/Swift` directory:
```
# From antlr4/runtime/Swift
open .
```
Drag `Antlr4.xcodeproj` into your project.
After this is done, your Xcode project navigator will be something like the
screenshot below. In this example, your own project is "Smalltalk", and you
will be able to see `Antlr4.xcodeproj` shown as a contained project.
<img src=images/xcodenav.png width="300">
#### Edit the build settings if necessary
Swift Package Manager currently does not support iOS, watchOS, or tvOS. If
you wish to build for those platforms, you will need to alter the project
build settings manually.
#### Add generated parser and lexer to project
Make sure the parsers/lexers
generated in __step 2__ are added to the project. To do this, you can
drag the generated files from Finder to the Xcode IDE. Remember to
check __Copy items if needed__ to make sure the files are actually
moved into the project folder instead of symbolic links (see the
screenshot below). After moving you will be able to see your files in
the project navigator. But when you open one of the files, you will
see Xcode complaining the module "Antlr4" could not be found at the
import statement. This is expected, since we still need the ANTLR
Swift runtime for those missing symbols.
the project navigator. Make sure that the Target Membership settings
are correct for your project.
<img src=images/dragfile.png width="500">
* __Download ANTLR runtime__. Due to unstable ABI of Swift language,
there will not be a single "library" for the Swift ANTLR runtime for
now. To get Swift ANTLR runtime, clone the ANTLR repository. Open it
in finder. From the root directory of the repo, go to runtime/Swift
folder. You will see the Xcode project manifest file:
__Antlr4.xcodeproj__.
#### Add the ANTLR Swift runtime as a dependency
* __Import ANTLR Swift runtime into project__. Drag Antlr4.xcodeproj
into your project, after this is done, your Xcode project navigator
will be something like the screenshot below. In this case, your own
project is "Smalltalk", and you will be able to see the
Antlr4.xcodeproj shown as a contained project. The error message will
still be there, that's because we still need to tell Xcode how to find
the runtime.
<img src=images/xcodenav.png width="300">
* __Build ANTLR runtime__. By expanding the "Products" folder in the
inner project (Antlr4.xcodeproj), you will see two Antlr4.framework
files. ".framework" file is the swift version of ".jar", ".a" as in
JAVA, C/C++ Initially those two files should be red, that's because
they are not built. To build, click the "target selection" button
right next to your Xcode run button. And in the drop down select the
target you want to build. And you will see the two Antlr4.framework
files are for iOS and OSX, as shown below. After target selection,
press "CMD+B", and Xcode will build the framework for you. Then you
will see one of the frameworks become black.
<img src=images/targetselection.png width="500">
* __Add dependencies__. Simply adding ANTLR Swift runtime and build
the artifact is not enough. You still need to specify
dependencies. Click your own project (Smalltalk), and you will see
project setting page. Go to "Build Phase", and inside it make sure
your ANTLR Swift runtime framework is added to both "__Target
Dependencies__" and "__Link Binary With Libraries__" sections, as
shown below. After correctly added dependencies, the error message for
importing library will be gone.
Select your own project in Xcode and go to the Build Phases settings panel.
Add the ANTLR runtime under __Target Dependencies__ and __Link Binary With
Libraries__.
<img src=images/xcodedep.png width="800">
## Example playground
#### Build your project
The Swift runtime includes an Xcode playground to get started with.
The runtime and generated grammar should now build correctly.
First go to the ANTLR4 repository, and open
`runtime/Swift/Antlr4.xcworkspace` in Xcode. Select "Antlr4 OSX > My
Mac" as the build target, and build the project as normal. The
playground should then be active.
### Swift Package Manager Projects
The playground includes a simple grammar called "Hello", and an
example for walking the parse tree. You should see in the playground
output that it is printing messages for each node in the parse tree as
it walks.
Since we cannot have a separate repository for Swift target (see issue [#1774](https://github.com/antlr/antlr4/issues/1774)),
and Swift is currently not ABI stable. We currently support support SPM-based
projects by creating temporary local repository.
The grammar is defined in the playground's `Resources/Hello.g4`. The
parser was generated from the grammar using ANTLR like this:
For people using [Swift Package Manager](https://swift.org/package-manager/),
the __boot.py__ script supports generating local repository that can be used
as a dependency to your project. Simply run:
```
antlr4 -Dlanguage=Swift -visitor -o ../Sources/Autogen Hello.g4
```
python boot.py --gen-spm-module
```
The example tree walker is in Sources/HelloWalker.swift.
The prompt will show something like below:
<img src=images/gen_spm_module.png width="800">
Put the SPM directive that contains the url to temporary repository to your
project's Package.swift. And run `swift build` in your project.
The project is generated in your system's `/tmp/` directory, if you find it
inconvenient, consider copy that generated ANTLR repository to some place
that won't be cleaned automatically and update `url` parameter in your
`Package.swift` file.

View File

@ -72,7 +72,7 @@ TokenStartColumnEquals(i) ::= <%self._tokenStartCharPositionInLine == <i>%>
ImportListener(X) ::= ""
GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.tokenNames)"
GetExpectedTokenNames() ::= "try self.getExpectedTokens().toString(self.getVocabulary())"
RuleInvocationStack() ::= "getRuleInvocationStack().description.replacingOccurrences(of: \"\\\"\", with: \"\")"

View File

@ -23,6 +23,7 @@ public class TestCodePointCharStream {
CodePointCharStream s = CharStreams.fromString("");
assertEquals(0, s.size());
assertEquals(0, s.index());
assertEquals("", s.toString());
}
@Test

View File

@ -618,4 +618,28 @@ public class ParserErrorsDescriptors {
public String grammar;
}
public static class ExtraneousInput extends BaseParserTestDescriptor {
public String input = "baa";
public String output = null;
public String errors = "line 1:0 mismatched input 'b' expecting {<EOF>, 'a'}\n";
public String startRule = "file";
public String grammarName = "T";
/**
grammar T;
member : 'a';
body : member*;
file : body EOF;
B : 'b';
*/
@CommentHasStringValue
public String grammar;
@Override
public boolean ignore(String targetName) {
return !"Java".equals(targetName);
}
}
}

View File

@ -283,11 +283,16 @@ public class SemPredEvalParserDescriptors {
public String input = "s\n\n\nx\n";
public String output = "(file_ (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) <EOF>)\n";
/**
line 5:0 mismatched input '<EOF>' expecting '
'
line 5:0 mismatched input '<EOF>' expecting {'s', '
', 'x'}
*/
@CommentHasStringValue
public String errors;
@Override
public boolean ignore(String targetName) {
return !"Java".equals(targetName);
}
}
public static class PredFromAltTestedInLoopBack_2 extends PredFromAltTestedInLoopBack {

View File

@ -145,7 +145,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
String projectName = "testcase-" + System.currentTimeMillis();
String projectDir = getTmpDir() + "/" + projectName;
buildProject(projectDir);
buildProject(projectDir, projectName);
return execTest(projectDir, projectName);
}
@ -183,12 +183,12 @@ public class BaseSwiftTest implements RuntimeTestSupport {
Collections.addAll(this.sourceFiles, files);
}
private void buildProject(String projectDir) {
private void buildProject(String projectDir, String projectName) {
mkdir(projectDir);
fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable");
for (String sourceFile: sourceFiles) {
String absPath = getTmpDir() + "/" + sourceFile;
fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/");
fastFailRunProcess(getTmpDir(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName);
}
fastFailRunProcess(getTmpDir(), "mv", "-f", "input", projectDir);
@ -201,7 +201,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
"-Xlinker", "-rpath",
"-Xlinker", dylibPath);
if (buildResult.b.length() > 0) {
throw new RuntimeException("unit test build failed: " + buildResult.b);
throw new RuntimeException("unit test build failed: " + buildResult.a + "\n" + buildResult.b);
}
} catch (IOException | InterruptedException e) {
e.printStackTrace();
@ -251,7 +251,7 @@ public class BaseSwiftTest implements RuntimeTestSupport {
addSourceFiles("main.swift");
String projectName = "testcase-" + System.currentTimeMillis();
String projectDir = getTmpDir() + "/" + projectName;
buildProject(projectDir);
buildProject(projectDir, projectName);
return execTest(projectDir, projectName);
}

View File

@ -1092,7 +1092,10 @@ nextTransition_continue: ;
protected internal Guid ReadUUID()
{
byte[] d = BitConverter.GetBytes (ReadLong ());
Array.Reverse(d);
if(BitConverter.IsLittleEndian)
{
Array.Reverse(d);
}
short c = (short)ReadInt();
short b = (short)ReadInt();
int a = ReadInt32();

View File

@ -33,6 +33,7 @@ endif()
if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR
CMAKE_VERSION VERSION_GREATER "3.3.0")
CMAKE_POLICY(SET CMP0059 OLD)
CMAKE_POLICY(SET CMP0054 OLD)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
@ -61,7 +62,11 @@ if (WITH_DEMO)
endif()
endif(WITH_DEMO)
set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W")
if (MSVC_VERSION)
set(MY_CXX_WARNING_FLAGS " /W4")
else()
set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W")
endif()
# Initialize CXXFLAGS.
if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0)
@ -75,11 +80,18 @@ else()
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -std=c++11")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}")
if (MSVC_VERSION)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /O2 /Oi /Ob2 /Gy /MP /Zi ${MY_CXX_WARNING_FLAGS}")
else()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -Os -DNDEBUG ${MY_CXX_WARNING_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -DNDEBUG ${MY_CXX_WARNING_FLGAS}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g ${MY_CXX_WARNING_FLAGS}")
endif()
# Compiler-specific C++11 activation.
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
@ -101,6 +113,8 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND CMAKE_SYSTEM_NAME MATCHES
if (WITH_LIBCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
elseif ( MSVC_VERSION GREATER 1800 OR MSVC_VERSION EQUAL 1800 )
# Visual Studio 2012+ supports c++11 features
else ()
message(FATAL_ERROR "Your C++ compiler does not support C++11.")
endif ()

View File

@ -91,7 +91,7 @@ using namespace antlr4::misc;
- (void)testANTLRInputStreamUse {
std::string text(u8"🚧Lorem ipsum dolor sit amet🕶");
std::u32string wtext = utfConverter.from_bytes(text); // Convert to UTF-32.
std::u32string wtext = utf8_to_utf32(text.c_str(), text.c_str() + text.size()); // Convert to UTF-32.
ANTLRInputStream stream(text);
XCTAssertEqual(stream.index(), 0U);
XCTAssertEqual(stream.size(), wtext.size());
@ -116,8 +116,8 @@ using namespace antlr4::misc;
XCTAssertEqual(stream.LA(0), 0ULL);
for (size_t i = 1; i < wtext.size(); ++i) {
XCTAssertEqual(stream.LA((ssize_t)i), wtext[i - 1]); // LA(1) means: current char.
XCTAssertEqual(stream.LT((ssize_t)i), wtext[i - 1]); // LT is mapped to LA.
XCTAssertEqual(stream.LA(static_cast<ssize_t>(i)), wtext[i - 1]); // LA(1) means: current char.
XCTAssertEqual(stream.LT(static_cast<ssize_t>(i)), wtext[i - 1]); // LT is mapped to LA.
XCTAssertEqual(stream.index(), 0U); // No consumption when looking ahead.
}
@ -128,7 +128,7 @@ using namespace antlr4::misc;
XCTAssertEqual(stream.index(), wtext.size() / 2);
stream.seek(wtext.size() - 1);
for (ssize_t i = 1; i < (ssize_t)wtext.size() - 1; ++i) {
for (ssize_t i = 1; i < static_cast<ssize_t>(wtext.size()) - 1; ++i) {
XCTAssertEqual(stream.LA(-i), wtext[wtext.size() - i - 1]); // LA(-1) means: previous char.
XCTAssertEqual(stream.LT(-i), wtext[wtext.size() - i - 1]); // LT is mapped to LA.
XCTAssertEqual(stream.index(), wtext.size() - 1); // No consumption when looking ahead.
@ -150,7 +150,7 @@ using namespace antlr4::misc;
misc::Interval interval1(2, 10UL); // From - to, inclusive.
std::string output = stream.getText(interval1);
std::string sub = utfConverter.to_bytes(wtext.substr(2, 9));
std::string sub = utf32_to_utf8(wtext.substr(2, 9));
XCTAssertEqual(output, sub);
misc::Interval interval2(200, 10UL); // Start beyond bounds.

View File

@ -92,7 +92,7 @@ using namespace antlrcpp;
// in a deterministic and a random sequence of 100K values each.
std::set<size_t> hashs;
for (size_t i = 0; i < 100000; ++i) {
std::vector<size_t> data = { i, (size_t)(i * M_PI), arc4random()};
std::vector<size_t> data = { i, static_cast<size_t>(i * M_PI), arc4random() };
size_t hash = 0;
for (auto value : data)
hash = MurmurHash::update(hash, value);
@ -103,7 +103,7 @@ using namespace antlrcpp;
hashs.clear();
for (size_t i = 0; i < 100000; ++i) {
std::vector<size_t> data = { i, (size_t)(i * M_PI)};
std::vector<size_t> data = { i, static_cast<size_t>(i * M_PI) };
size_t hash = 0;
for (auto value : data)
hash = MurmurHash::update(hash, value);
@ -232,19 +232,25 @@ using namespace antlrcpp;
{ 78, Interval(1000, 1000UL), Interval(20, 100UL), { false, false, true, true, false, true, false, false } },
// It's possible to add more tests with borders that touch each other (e.g. first starts before/on/after second
// and first ends directly before/after second. However, such cases are not handled differently in the Interval class
// and first ends directly before/after second. However, such cases are not handled differently in the Interval
// class
// (only adjacent intervals, where first ends directly before second starts and vice versa. So I ommitted them here.
};
for (auto &entry : testData) {
XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.startsBeforeDisjoint(entry.interval2) == entry.results[0], @"entry: %zu",
entry.runningNumber);
XCTAssert(entry.interval1.startsBeforeNonDisjoint(entry.interval2) == entry.results[1], @"entry: %zu",
entry.runningNumber);
XCTAssert(entry.interval1.startsAfter(entry.interval2) == entry.results[2], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.startsAfterDisjoint(entry.interval2) == entry.results[3], @"entry: %zu",
entry.runningNumber);
XCTAssert(entry.interval1.startsAfterNonDisjoint(entry.interval2) == entry.results[4], @"entry: %zu",
entry.runningNumber);
XCTAssert(entry.interval1.disjoint(entry.interval2) == entry.results[5], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.adjacent(entry.interval2) == entry.results[6], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu", entry.runningNumber);
XCTAssert(entry.interval1.properlyContains(entry.interval2) == entry.results[7], @"entry: %zu",
entry.runningNumber);
}
XCTAssert(Interval().Union(Interval(10, 100UL)) == Interval(-1L, 100));
@ -327,30 +333,34 @@ using namespace antlrcpp;
try {
set4.clear();
XCTFail(@"Expected exception");
}
catch (IllegalStateException &e) {
} catch (IllegalStateException &e) {
}
try {
set4.setReadOnly(false);
XCTFail(@"Expected exception");
} catch (IllegalStateException &e) {
}
catch (IllegalStateException &e) {
}
set4 = IntervalSet::of(12345);
XCTAssertEqual(set4.getSingleElement(), 12345);
XCTAssertEqual(set4.getMinElement(), 12345);
XCTAssertEqual(set4.getMaxElement(), 12345);
IntervalSet set5(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50);
XCTAssertEqual(set5.getMinElement(), 5);
XCTAssertEqual(set5.getMaxElement(), 50);
XCTAssertEqual(set5.size(), 10U);
set5.add(12, 18);
XCTAssertEqual(set5.size(), 16U); // (15, 15) replaced by (12, 18)
set5.add(9, 33);
XCTAssertEqual(set5.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33)
try {
set4 = IntervalSet::of(12345);
XCTFail(@"Expected exception");
} catch (IllegalStateException &e) {
}
IntervalSet set5 = IntervalSet::of(12345);
XCTAssertEqual(set5.getSingleElement(), 12345);
XCTAssertEqual(set5.getMinElement(), 12345);
XCTAssertEqual(set5.getMaxElement(), 12345);
IntervalSet set6(10, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50);
XCTAssertEqual(set6.getMinElement(), 5);
XCTAssertEqual(set6.getMaxElement(), 50);
XCTAssertEqual(set6.size(), 10U);
set6.add(12, 18);
XCTAssertEqual(set6.size(), 16U); // (15, 15) replaced by (12, 18)
set6.add(9, 33);
XCTAssertEqual(set6.size(), 30U); // (10, 10), (12, 18), (20, 20), (25, 25) and (30, 30) replaced by (9, 33)
XCTAssert(IntervalSet(3, 1, 2, 10).Or(IntervalSet(3, 1, 2, 5)) == IntervalSet(4, 1, 2, 5, 10));
XCTAssert(IntervalSet({ Interval(2, 10UL) }).Or(IntervalSet({ Interval(5, 8UL) })) == IntervalSet({ Interval(2, 10UL) }));
@ -358,8 +368,10 @@ using namespace antlrcpp;
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(7, 55)) == IntervalSet::of(11, 55));
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(20, 55)) == IntervalSet::of(20, 55));
XCTAssert(IntervalSet::of(1, 10).complement(IntervalSet::of(5, 6)) == IntervalSet::EMPTY_SET);
XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) }));
XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) == IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) }));
XCTAssert(IntervalSet::of(15, 20).complement(IntervalSet::of(7, 55)) ==
IntervalSet({ Interval(7, 14UL), Interval(21, 55UL) }));
XCTAssert(IntervalSet({ Interval(1, 10UL), Interval(30, 35UL) }).complement(IntervalSet::of(7, 55)) ==
IntervalSet({ Interval(11, 29UL), Interval(36, 55UL) }));
XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(7, 55)) == IntervalSet::of(7, 10));
XCTAssert(IntervalSet::of(1, 10).And(IntervalSet::of(20, 55)) == IntervalSet::EMPTY_SET);
@ -368,7 +380,8 @@ using namespace antlrcpp;
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(7, 55)) == IntervalSet::of(1, 6));
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(20, 55)) == IntervalSet::of(1, 10));
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) == IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) }));
XCTAssert(IntervalSet::of(1, 10).subtract(IntervalSet::of(5, 6)) ==
IntervalSet({ Interval(1, 4UL), Interval(7, 10UL) }));
XCTAssert(IntervalSet::of(15, 20).subtract(IntervalSet::of(7, 55)) == IntervalSet::EMPTY_SET);
}

View File

@ -12,7 +12,8 @@ rem Headers
xcopy runtime\src\*.h antlr4-runtime\ /s
rem Binaries
if exist "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" (
rem VS 2013 disabled by default. Change the X to a C to enable it.
if exist "X:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat" (
call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\Common7\Tools\VsDevCmd.bat"
pushd runtime

View File

@ -44,7 +44,11 @@ elseif(APPLE)
target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY})
endif()
set(disabled_compile_warnings "-Wno-overloaded-virtual")
if (MSVC_VERSION)
set(disabled_compile_warnings "/wd4251")
else()
set(disabled_compile_warnings "-Wno-overloaded-virtual")
endif()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants")
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
@ -57,6 +61,15 @@ if (WIN32)
set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS")
set(extra_static_compile_flags "-DANTLR4CPP_STATIC")
endif(WIN32)
if (MSVC_VERSION)
target_compile_options(antlr4_shared PRIVATE "/MD$<$<CONFIG:Debug>:d>")
target_compile_options(antlr4_static PRIVATE "/MT$<$<CONFIG:Debug>:d>")
endif()
set(static_lib_suffix "")
if (MSVC_VERSION)
set(static_lib_suffix "-static")
endif()
set_target_properties(antlr4_shared
PROPERTIES VERSION ${ANTLR_VERSION}
@ -72,7 +85,7 @@ set_target_properties(antlr4_shared
set_target_properties(antlr4_static
PROPERTIES VERSION ${ANTLR_VERSION}
SOVERSION ${ANTLR_VERSION}
OUTPUT_NAME antlr4-runtime
OUTPUT_NAME "antlr4-runtime${static_lib_suffix}"
ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR}
COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}")

View File

@ -321,6 +321,8 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="src\ANTLRErrorListener.cpp" />
<ClCompile Include="src\ANTLRErrorStrategy.cpp" />
<ClCompile Include="src\ANTLRFileStream.cpp" />
<ClCompile Include="src\ANTLRInputStream.cpp" />
<ClCompile Include="src\atn\AbstractPredicateTransition.cpp" />
@ -339,6 +341,7 @@
<ClCompile Include="src\atn\BasicBlockStartState.cpp" />
<ClCompile Include="src\atn\BasicState.cpp" />
<ClCompile Include="src\atn\BlockEndState.cpp" />
<ClCompile Include="src\atn\BlockStartState.cpp" />
<ClCompile Include="src\atn\ContextSensitivityInfo.cpp" />
<ClCompile Include="src\atn\DecisionEventInfo.cpp" />
<ClCompile Include="src\atn\DecisionInfo.cpp" />
@ -346,6 +349,7 @@
<ClCompile Include="src\atn\EmptyPredictionContext.cpp" />
<ClCompile Include="src\atn\EpsilonTransition.cpp" />
<ClCompile Include="src\atn\ErrorInfo.cpp" />
<ClCompile Include="src\atn\LexerAction.cpp" />
<ClCompile Include="src\atn\LexerActionExecutor.cpp" />
<ClCompile Include="src\atn\LexerATNConfig.cpp" />
<ClCompile Include="src\atn\LexerATNSimulator.cpp" />
@ -412,6 +416,7 @@
<ClCompile Include="src\misc\Interval.cpp" />
<ClCompile Include="src\misc\IntervalSet.cpp" />
<ClCompile Include="src\misc\MurmurHash.cpp" />
<ClCompile Include="src\misc\Predicate.cpp" />
<ClCompile Include="src\NoViableAltException.cpp" />
<ClCompile Include="src\Parser.cpp" />
<ClCompile Include="src\ParserInterpreter.cpp" />
@ -422,16 +427,23 @@
<ClCompile Include="src\RuleContext.cpp" />
<ClCompile Include="src\RuleContextWithAltNum.cpp" />
<ClCompile Include="src\RuntimeMetaData.cpp" />
<ClCompile Include="src\support\Any.cpp" />
<ClCompile Include="src\support\Arrays.cpp" />
<ClCompile Include="src\support\CPPUtils.cpp" />
<ClCompile Include="src\support\guid.cpp" />
<ClCompile Include="src\support\StringUtils.cpp" />
<ClCompile Include="src\Token.cpp" />
<ClCompile Include="src\TokenSource.cpp" />
<ClCompile Include="src\TokenStream.cpp" />
<ClCompile Include="src\TokenStreamRewriter.cpp" />
<ClCompile Include="src\tree\ErrorNode.cpp" />
<ClCompile Include="src\tree\ErrorNodeImpl.cpp" />
<ClCompile Include="src\tree\IterativeParseTreeWalker.cpp" />
<ClCompile Include="src\tree\ParseTree.cpp" />
<ClCompile Include="src\tree\ParseTreeListener.cpp" />
<ClCompile Include="src\tree\ParseTreeVisitor.cpp" />
<ClCompile Include="src\tree\ParseTreeWalker.cpp" />
<ClCompile Include="src\tree\pattern\Chunk.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreeMatch.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreePattern.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreePatternMatcher.cpp" />
@ -439,6 +451,7 @@
<ClCompile Include="src\tree\pattern\TagChunk.cpp" />
<ClCompile Include="src\tree\pattern\TextChunk.cpp" />
<ClCompile Include="src\tree\pattern\TokenTagToken.cpp" />
<ClCompile Include="src\tree\TerminalNode.cpp" />
<ClCompile Include="src\tree\TerminalNodeImpl.cpp" />
<ClCompile Include="src\tree\Trees.cpp" />
<ClCompile Include="src\tree\xpath\XPath.cpp" />
@ -454,6 +467,7 @@
<ClCompile Include="src\UnbufferedCharStream.cpp" />
<ClCompile Include="src\UnbufferedTokenStream.cpp" />
<ClCompile Include="src\Vocabulary.cpp" />
<ClCompile Include="src\WritableToken.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\antlr4-common.h" />
@ -620,4 +634,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
</Project>

View File

@ -938,5 +938,47 @@
<ClCompile Include="src\tree\IterativeParseTreeWalker.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\ANTLRErrorListener.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ANTLRErrorStrategy.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\Token.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\TokenSource.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\WritableToken.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\tree\ErrorNode.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\ParseTreeListener.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\ParseTreeVisitor.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\TerminalNode.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\support\Any.cpp">
<Filter>Source Files\support</Filter>
</ClCompile>
<ClCompile Include="src\atn\BlockStartState.cpp">
<Filter>Source Files\atn</Filter>
</ClCompile>
<ClCompile Include="src\atn\LexerAction.cpp">
<Filter>Source Files\atn</Filter>
</ClCompile>
<ClCompile Include="src\tree\pattern\Chunk.cpp">
<Filter>Source Files\tree\pattern</Filter>
</ClCompile>
<ClCompile Include="src\misc\Predicate.cpp">
<Filter>Source Files\misc</Filter>
</ClCompile>
</ItemGroup>
</Project>
</Project>

View File

@ -334,6 +334,8 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="src\ANTLRErrorListener.cpp" />
<ClCompile Include="src\ANTLRErrorStrategy.cpp" />
<ClCompile Include="src\ANTLRFileStream.cpp" />
<ClCompile Include="src\ANTLRInputStream.cpp" />
<ClCompile Include="src\atn\AbstractPredicateTransition.cpp" />
@ -352,6 +354,7 @@
<ClCompile Include="src\atn\BasicBlockStartState.cpp" />
<ClCompile Include="src\atn\BasicState.cpp" />
<ClCompile Include="src\atn\BlockEndState.cpp" />
<ClCompile Include="src\atn\BlockStartState.cpp" />
<ClCompile Include="src\atn\ContextSensitivityInfo.cpp" />
<ClCompile Include="src\atn\DecisionEventInfo.cpp" />
<ClCompile Include="src\atn\DecisionInfo.cpp" />
@ -359,6 +362,7 @@
<ClCompile Include="src\atn\EmptyPredictionContext.cpp" />
<ClCompile Include="src\atn\EpsilonTransition.cpp" />
<ClCompile Include="src\atn\ErrorInfo.cpp" />
<ClCompile Include="src\atn\LexerAction.cpp" />
<ClCompile Include="src\atn\LexerActionExecutor.cpp" />
<ClCompile Include="src\atn\LexerATNConfig.cpp" />
<ClCompile Include="src\atn\LexerATNSimulator.cpp" />
@ -425,6 +429,7 @@
<ClCompile Include="src\misc\Interval.cpp" />
<ClCompile Include="src\misc\IntervalSet.cpp" />
<ClCompile Include="src\misc\MurmurHash.cpp" />
<ClCompile Include="src\misc\Predicate.cpp" />
<ClCompile Include="src\NoViableAltException.cpp" />
<ClCompile Include="src\Parser.cpp" />
<ClCompile Include="src\ParserInterpreter.cpp" />
@ -435,16 +440,23 @@
<ClCompile Include="src\RuleContext.cpp" />
<ClCompile Include="src\RuleContextWithAltNum.cpp" />
<ClCompile Include="src\RuntimeMetaData.cpp" />
<ClCompile Include="src\support\Any.cpp" />
<ClCompile Include="src\support\Arrays.cpp" />
<ClCompile Include="src\support\CPPUtils.cpp" />
<ClCompile Include="src\support\guid.cpp" />
<ClCompile Include="src\support\StringUtils.cpp" />
<ClCompile Include="src\Token.cpp" />
<ClCompile Include="src\TokenSource.cpp" />
<ClCompile Include="src\TokenStream.cpp" />
<ClCompile Include="src\TokenStreamRewriter.cpp" />
<ClCompile Include="src\tree\ErrorNode.cpp" />
<ClCompile Include="src\tree\ErrorNodeImpl.cpp" />
<ClCompile Include="src\tree\IterativeParseTreeWalker.cpp" />
<ClCompile Include="src\tree\ParseTree.cpp" />
<ClCompile Include="src\tree\ParseTreeListener.cpp" />
<ClCompile Include="src\tree\ParseTreeVisitor.cpp" />
<ClCompile Include="src\tree\ParseTreeWalker.cpp" />
<ClCompile Include="src\tree\pattern\Chunk.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreeMatch.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreePattern.cpp" />
<ClCompile Include="src\tree\pattern\ParseTreePatternMatcher.cpp" />
@ -452,6 +464,7 @@
<ClCompile Include="src\tree\pattern\TagChunk.cpp" />
<ClCompile Include="src\tree\pattern\TextChunk.cpp" />
<ClCompile Include="src\tree\pattern\TokenTagToken.cpp" />
<ClCompile Include="src\tree\TerminalNode.cpp" />
<ClCompile Include="src\tree\TerminalNodeImpl.cpp" />
<ClCompile Include="src\tree\Trees.cpp" />
<ClCompile Include="src\tree\xpath\XPath.cpp" />
@ -467,6 +480,7 @@
<ClCompile Include="src\UnbufferedCharStream.cpp" />
<ClCompile Include="src\UnbufferedTokenStream.cpp" />
<ClCompile Include="src\Vocabulary.cpp" />
<ClCompile Include="src\WritableToken.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\antlr4-common.h" />
@ -633,4 +647,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
</Project>

View File

@ -938,5 +938,47 @@
<ClCompile Include="src\tree\IterativeParseTreeWalker.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\ANTLRErrorListener.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ANTLRErrorStrategy.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\atn\BlockStartState.cpp">
<Filter>Source Files\atn</Filter>
</ClCompile>
<ClCompile Include="src\atn\LexerAction.cpp">
<Filter>Source Files\atn</Filter>
</ClCompile>
<ClCompile Include="src\misc\Predicate.cpp">
<Filter>Source Files\misc</Filter>
</ClCompile>
<ClCompile Include="src\Token.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\TokenSource.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\WritableToken.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\support\Any.cpp">
<Filter>Source Files\support</Filter>
</ClCompile>
<ClCompile Include="src\tree\ErrorNode.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\ParseTreeListener.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\ParseTreeVisitor.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\TerminalNode.cpp">
<Filter>Source Files\tree</Filter>
</ClCompile>
<ClCompile Include="src\tree\pattern\Chunk.cpp">
<Filter>Source Files\tree\pattern</Filter>
</ClCompile>
</ItemGroup>
</Project>
</Project>

View File

@ -534,9 +534,6 @@
276E5F411CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; };
276E5F421CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; };
276E5F431CDB57AA003FF4B4 /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CBF1CDB57AA003FF4B4 /* IntStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; };
276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; };
276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; };
276E5F471CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
276E5F481CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
276E5F491CDB57AA003FF4B4 /* Lexer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */; };
@ -800,6 +797,45 @@
27745F081CE49C000067C6A3 /* RuntimeMetaData.h in Headers */ = {isa = PBXBuildFile; fileRef = 27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */; };
27874F1E1CCB7A0700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; };
27874F211CCB7B1700AF1C53 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */; };
2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC841F08083F00A84290 /* TokenSource.cpp */; };
2793DC891F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC881F08087500A84290 /* Chunk.cpp */; };
2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */; };
2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC901F0808A200A84290 /* TerminalNode.cpp */; };
2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC941F0808E100A84290 /* ErrorNode.cpp */; };
2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */; };
2793DC9D1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
2793DC9E1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
2793DC9F1F08090D00A84290 /* Any.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DC9C1F08090D00A84290 /* Any.cpp */; };
2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */; };
2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */; };
2793DCAA1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
2793DCAB1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
2793DCAC1F08095F00A84290 /* Token.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA21F08095F00A84290 /* Token.cpp */; };
2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCA31F08095F00A84290 /* WritableToken.cpp */; };
2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB01F08099C00A84290 /* BlockStartState.cpp */; };
2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2793DCB11F08099C00A84290 /* LexerAction.cpp */; };
2794D8561CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
2794D8571CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
2794D8581CE7821B00FADD0F /* antlr4-common.h in Headers */ = {isa = PBXBuildFile; fileRef = 2794D8551CE7821B00FADD0F /* antlr4-common.h */; };
@ -1061,7 +1097,6 @@
276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InterpreterRuleContext.h; sourceTree = "<group>"; wrapsLines = 0; };
276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IntStream.cpp; sourceTree = "<group>"; };
276E5CBF1CDB57AA003FF4B4 /* IntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntStream.h; sourceTree = "<group>"; };
276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IRecognizer.h; sourceTree = "<group>"; };
276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Lexer.cpp; sourceTree = "<group>"; wrapsLines = 0; };
276E5CC21CDB57AA003FF4B4 /* Lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Lexer.h; sourceTree = "<group>"; };
276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerInterpreter.cpp; sourceTree = "<group>"; wrapsLines = 0; };
@ -1152,6 +1187,19 @@
27874F1D1CCB7A0700AF1C53 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
278E313E1D9D6534001C28F9 /* Tests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Tests.m; sourceTree = "<group>"; };
278E31401D9D6534001C28F9 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
2793DC841F08083F00A84290 /* TokenSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TokenSource.cpp; sourceTree = "<group>"; };
2793DC881F08087500A84290 /* Chunk.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Chunk.cpp; sourceTree = "<group>"; };
2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeListener.cpp; sourceTree = "<group>"; };
2793DC901F0808A200A84290 /* TerminalNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TerminalNode.cpp; sourceTree = "<group>"; };
2793DC941F0808E100A84290 /* ErrorNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorNode.cpp; sourceTree = "<group>"; };
2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParseTreeVisitor.cpp; sourceTree = "<group>"; };
2793DC9C1F08090D00A84290 /* Any.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Any.cpp; sourceTree = "<group>"; };
2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorListener.cpp; sourceTree = "<group>"; };
2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ANTLRErrorStrategy.cpp; sourceTree = "<group>"; };
2793DCA21F08095F00A84290 /* Token.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Token.cpp; sourceTree = "<group>"; };
2793DCA31F08095F00A84290 /* WritableToken.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WritableToken.cpp; sourceTree = "<group>"; };
2793DCB01F08099C00A84290 /* BlockStartState.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockStartState.cpp; sourceTree = "<group>"; };
2793DCB11F08099C00A84290 /* LexerAction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LexerAction.cpp; sourceTree = "<group>"; };
2794D8551CE7821B00FADD0F /* antlr4-common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-common.h"; sourceTree = "<group>"; };
27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "antlr4-runtime.h"; sourceTree = "<group>"; };
27B36AC41DACE7AF0069C868 /* RuleContextWithAltNum.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RuleContextWithAltNum.cpp; sourceTree = "<group>"; };
@ -1230,7 +1278,9 @@
276E5CF91CDB57AA003FF4B4 /* tree */,
2794D8551CE7821B00FADD0F /* antlr4-common.h */,
27AC52CF1CE773A80093AAAB /* antlr4-runtime.h */,
2793DCA01F08095F00A84290 /* ANTLRErrorListener.cpp */,
276E5C0C1CDB57AA003FF4B4 /* ANTLRErrorListener.h */,
2793DCA11F08095F00A84290 /* ANTLRErrorStrategy.cpp */,
276E5C0D1CDB57AA003FF4B4 /* ANTLRErrorStrategy.h */,
276E5C0E1CDB57AA003FF4B4 /* ANTLRFileStream.cpp */,
276E5C0F1CDB57AA003FF4B4 /* ANTLRFileStream.h */,
@ -1266,7 +1316,6 @@
276E5CBD1CDB57AA003FF4B4 /* InterpreterRuleContext.h */,
276E5CBE1CDB57AA003FF4B4 /* IntStream.cpp */,
276E5CBF1CDB57AA003FF4B4 /* IntStream.h */,
276E5CC01CDB57AA003FF4B4 /* IRecognizer.h */,
276E5CC11CDB57AA003FF4B4 /* Lexer.cpp */,
276E5CC21CDB57AA003FF4B4 /* Lexer.h */,
276E5CC31CDB57AA003FF4B4 /* LexerInterpreter.cpp */,
@ -1295,8 +1344,10 @@
27B36AC51DACE7AF0069C868 /* RuleContextWithAltNum.h */,
27745EFB1CE49C000067C6A3 /* RuntimeMetaData.cpp */,
27745EFC1CE49C000067C6A3 /* RuntimeMetaData.h */,
2793DCA21F08095F00A84290 /* Token.cpp */,
276E5CF01CDB57AA003FF4B4 /* Token.h */,
276E5CF21CDB57AA003FF4B4 /* TokenFactory.h */,
2793DC841F08083F00A84290 /* TokenSource.cpp */,
276E5CF41CDB57AA003FF4B4 /* TokenSource.h */,
276E5CF51CDB57AA003FF4B4 /* TokenStream.cpp */,
276E5CF61CDB57AA003FF4B4 /* TokenStream.h */,
@ -1308,6 +1359,7 @@
276E5D251CDB57AA003FF4B4 /* UnbufferedTokenStream.h */,
276E5D271CDB57AA003FF4B4 /* Vocabulary.cpp */,
276E5D281CDB57AA003FF4B4 /* Vocabulary.h */,
2793DCA31F08095F00A84290 /* WritableToken.cpp */,
276E5D2A1CDB57AA003FF4B4 /* WritableToken.h */,
);
name = runtime;
@ -1350,6 +1402,7 @@
276E5C321CDB57AA003FF4B4 /* BasicState.h */,
276E5C331CDB57AA003FF4B4 /* BlockEndState.cpp */,
276E5C341CDB57AA003FF4B4 /* BlockEndState.h */,
2793DCB01F08099C00A84290 /* BlockStartState.cpp */,
276E5C351CDB57AA003FF4B4 /* BlockStartState.h */,
276E5C371CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp */,
276E5C381CDB57AA003FF4B4 /* ContextSensitivityInfo.h */,
@ -1365,6 +1418,7 @@
276E5C421CDB57AA003FF4B4 /* EpsilonTransition.h */,
276E5C431CDB57AA003FF4B4 /* ErrorInfo.cpp */,
276E5C441CDB57AA003FF4B4 /* ErrorInfo.h */,
2793DCB11F08099C00A84290 /* LexerAction.cpp */,
276E5C451CDB57AA003FF4B4 /* LexerAction.h */,
276E5C461CDB57AA003FF4B4 /* LexerActionExecutor.cpp */,
276E5C471CDB57AA003FF4B4 /* LexerActionExecutor.h */,
@ -1483,6 +1537,7 @@
276E5CE41CDB57AA003FF4B4 /* support */ = {
isa = PBXGroup;
children = (
2793DC9C1F08090D00A84290 /* Any.cpp */,
27F4A8551D4CEB2A00E067EE /* Any.h */,
276E5CE51CDB57AA003FF4B4 /* Arrays.cpp */,
276E5CE61CDB57AA003FF4B4 /* Arrays.h */,
@ -1504,6 +1559,7 @@
276E5D061CDB57AA003FF4B4 /* pattern */,
27DB448A1D045537007E790B /* xpath */,
276E5CFA1CDB57AA003FF4B4 /* AbstractParseTreeVisitor.h */,
2793DC941F0808E100A84290 /* ErrorNode.cpp */,
276E5CFB1CDB57AA003FF4B4 /* ErrorNode.h */,
276E5CFC1CDB57AA003FF4B4 /* ErrorNodeImpl.cpp */,
276E5CFD1CDB57AA003FF4B4 /* ErrorNodeImpl.h */,
@ -1511,11 +1567,14 @@
27D414511DEB0D3D00D0F3F9 /* IterativeParseTreeWalker.h */,
276566DF1DA93BFB000869BE /* ParseTree.cpp */,
276E5CFE1CDB57AA003FF4B4 /* ParseTree.h */,
2793DC8C1F08088F00A84290 /* ParseTreeListener.cpp */,
276E5D001CDB57AA003FF4B4 /* ParseTreeListener.h */,
276E5D021CDB57AA003FF4B4 /* ParseTreeProperty.h */,
2793DC951F0808E100A84290 /* ParseTreeVisitor.cpp */,
276E5D031CDB57AA003FF4B4 /* ParseTreeVisitor.h */,
276E5D041CDB57AA003FF4B4 /* ParseTreeWalker.cpp */,
276E5D051CDB57AA003FF4B4 /* ParseTreeWalker.h */,
2793DC901F0808A200A84290 /* TerminalNode.cpp */,
276E5D181CDB57AA003FF4B4 /* TerminalNode.h */,
276E5D191CDB57AA003FF4B4 /* TerminalNodeImpl.cpp */,
276E5D1A1CDB57AA003FF4B4 /* TerminalNodeImpl.h */,
@ -1529,6 +1588,7 @@
isa = PBXGroup;
children = (
276E5D071CDB57AA003FF4B4 /* Chunk.h */,
2793DC881F08087500A84290 /* Chunk.cpp */,
276E5D081CDB57AA003FF4B4 /* ParseTreeMatch.cpp */,
276E5D091CDB57AA003FF4B4 /* ParseTreeMatch.h */,
276E5D0A1CDB57AA003FF4B4 /* ParseTreePattern.cpp */,
@ -1707,7 +1767,6 @@
27DB44CC1D0463DB007E790B /* XPathElement.h in Headers */,
276E5F581CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D811CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
276E5F461CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
27DB44B61D0463CC007E790B /* XPathLexer.h in Headers */,
276E5FC41CDB57AA003FF4B4 /* guid.h in Headers */,
276E602D1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
@ -1875,7 +1934,6 @@
276E60141CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */,
276E5F571CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D801CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
276E5F451CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
276E5FC31CDB57AA003FF4B4 /* guid.h in Headers */,
276E602C1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
276E5E941CDB57AA003FF4B4 /* RuleStopState.h in Headers */,
@ -2033,7 +2091,6 @@
276E60131CDB57AA003FF4B4 /* ParseTreeMatch.h in Headers */,
276E5F561CDB57AA003FF4B4 /* LexerNoViableAltException.h in Headers */,
276E5D7F1CDB57AA003FF4B4 /* ATNSimulator.h in Headers */,
276E5F441CDB57AA003FF4B4 /* IRecognizer.h in Headers */,
276E5FC21CDB57AA003FF4B4 /* guid.h in Headers */,
276E602B1CDB57AA003FF4B4 /* TagChunk.h in Headers */,
276E5E931CDB57AA003FF4B4 /* RuleStopState.h in Headers */,
@ -2225,10 +2282,12 @@
276E60451CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD21CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
276E5F551CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
2793DCB81F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5E561CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1D1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBC1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
276E5D721CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
2793DC8B1F08087500A84290 /* Chunk.cpp in Sources */,
276E5E2F1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFF1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
276E60511CDB57AA003FF4B4 /* Trees.cpp in Sources */,
@ -2256,6 +2315,8 @@
276E5E921CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */,
276E60631CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DDB1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
2793DC981F0808E100A84290 /* ErrorNode.cpp in Sources */,
2793DCAF1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9E1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
276E5EC81CDB57AA003FF4B4 /* Transition.cpp in Sources */,
276E601E1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */,
@ -2263,12 +2324,15 @@
276E5D481CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
276E5DC61CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED41CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
2793DC9B1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
2793DCAC1F08095F00A84290 /* Token.cpp in Sources */,
276E5FA31CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6C1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60361CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
27DB44D51D0463DB007E790B /* XPathTokenElement.cpp in Sources */,
27DB44D11D0463DB007E790B /* XPathRuleElement.cpp in Sources */,
276E5DED1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
2793DCB51F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606C1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1C1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60181CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@ -2293,7 +2357,9 @@
276E5D781CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F051CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAE1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
2793DCA61F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D661CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
2793DC9F1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAF1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECE1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E861CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@ -2301,6 +2367,7 @@
276E5D9C1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FC11CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E801CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
2793DCA91F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F401CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F5B1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6D1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@ -2315,6 +2382,7 @@
27DB44CF1D0463DB007E790B /* XPathRuleAnywhereElement.cpp in Sources */,
276E5E441CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */,
276E5DCC1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */,
2793DC8F1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5D5A1CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE61CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
276E5EE01CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
@ -2333,6 +2401,8 @@
276E5DC01CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E981CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF81CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
2793DC871F08083F00A84290 /* TokenSource.cpp in Sources */,
2793DC931F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60121CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E21DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEC1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,
@ -2365,10 +2435,12 @@
276E60441CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD11CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
276E5F541CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
2793DCB71F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5E551CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1C1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBB1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
276E5D711CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
2793DC8A1F08087500A84290 /* Chunk.cpp in Sources */,
276E5E2E1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFE1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
276E60501CDB57AA003FF4B4 /* Trees.cpp in Sources */,
@ -2396,6 +2468,8 @@
276E5E911CDB57AA003FF4B4 /* RuleStopState.cpp in Sources */,
276E60621CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DDA1CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
2793DC971F0808E100A84290 /* ErrorNode.cpp in Sources */,
2793DCAE1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9D1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
276E5EC71CDB57AA003FF4B4 /* Transition.cpp in Sources */,
276E601D1CDB57AA003FF4B4 /* ParseTreePatternMatcher.cpp in Sources */,
@ -2403,12 +2477,15 @@
276E5D471CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
276E5DC51CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED31CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
2793DC9A1F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
2793DCAB1F08095F00A84290 /* Token.cpp in Sources */,
276E5FA21CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6B1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60351CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
27DB44C31D0463DA007E790B /* XPathTokenElement.cpp in Sources */,
27DB44BF1D0463DA007E790B /* XPathRuleElement.cpp in Sources */,
276E5DEC1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
2793DCB41F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606B1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1B1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60171CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@ -2433,7 +2510,9 @@
276E5D771CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F041CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAD1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
2793DCA51F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D651CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
2793DC9E1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAE1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECD1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E851CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@ -2441,6 +2520,7 @@
276E5D9B1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FC01CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E7F1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
2793DCA81F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F3F1CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F5A1CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6C1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@ -2455,6 +2535,7 @@
27DB44BD1D0463DA007E790B /* XPathRuleAnywhereElement.cpp in Sources */,
276E5E431CDB57AA003FF4B4 /* OrderedATNConfigSet.cpp in Sources */,
276E5DCB1CDB57AA003FF4B4 /* EpsilonTransition.cpp in Sources */,
2793DC8E1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5D591CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE51CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
276E5EDF1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
@ -2473,6 +2554,8 @@
276E5DBF1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E971CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF71CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
2793DC861F08083F00A84290 /* TokenSource.cpp in Sources */,
2793DC921F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60111CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E11DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEB1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,
@ -2505,10 +2588,12 @@
276E5DB21CDB57AA003FF4B4 /* DecisionEventInfo.cpp in Sources */,
276E60431CDB57AA003FF4B4 /* TerminalNodeImpl.cpp in Sources */,
276E5DD01CDB57AA003FF4B4 /* ErrorInfo.cpp in Sources */,
2793DCB61F08099C00A84290 /* LexerAction.cpp in Sources */,
276E5F531CDB57AA003FF4B4 /* LexerNoViableAltException.cpp in Sources */,
276E5E541CDB57AA003FF4B4 /* PlusBlockStartState.cpp in Sources */,
276E5E1B1CDB57AA003FF4B4 /* LexerSkipAction.cpp in Sources */,
276E5EBA1CDB57AA003FF4B4 /* StarLoopEntryState.cpp in Sources */,
2793DC891F08087500A84290 /* Chunk.cpp in Sources */,
276E5D701CDB57AA003FF4B4 /* ATNDeserializer.cpp in Sources */,
276E5E2D1CDB57AA003FF4B4 /* LookaheadEventInfo.cpp in Sources */,
276E5DFD1CDB57AA003FF4B4 /* LexerIndexedCustomAction.cpp in Sources */,
@ -2536,6 +2621,8 @@
276E60611CDB57AA003FF4B4 /* UnbufferedTokenStream.cpp in Sources */,
276E5DD91CDB57AA003FF4B4 /* LexerActionExecutor.cpp in Sources */,
27DB449D1D045537007E790B /* XPath.cpp in Sources */,
2793DC961F0808E100A84290 /* ErrorNode.cpp in Sources */,
2793DCAD1F08095F00A84290 /* WritableToken.cpp in Sources */,
276E5E9C1CDB57AA003FF4B4 /* SemanticContext.cpp in Sources */,
27DB44AD1D045537007E790B /* XPathWildcardElement.cpp in Sources */,
276E5EC61CDB57AA003FF4B4 /* Transition.cpp in Sources */,
@ -2543,12 +2630,15 @@
27DB44A51D045537007E790B /* XPathRuleElement.cpp in Sources */,
276E5F201CDB57AA003FF4B4 /* DiagnosticErrorListener.cpp in Sources */,
276E5D461CDB57AA003FF4B4 /* ActionTransition.cpp in Sources */,
2793DC991F0808E100A84290 /* ParseTreeVisitor.cpp in Sources */,
2793DCAA1F08095F00A84290 /* Token.cpp in Sources */,
276E5DC41CDB57AA003FF4B4 /* EmptyPredictionContext.cpp in Sources */,
276E5ED21CDB57AA003FF4B4 /* BailErrorStrategy.cpp in Sources */,
276E5FA11CDB57AA003FF4B4 /* Recognizer.cpp in Sources */,
276E5D6A1CDB57AA003FF4B4 /* ATNDeserializationOptions.cpp in Sources */,
276E60341CDB57AA003FF4B4 /* TokenTagToken.cpp in Sources */,
276E5DEB1CDB57AA003FF4B4 /* LexerATNSimulator.cpp in Sources */,
2793DCB31F08099C00A84290 /* BlockStartState.cpp in Sources */,
276E606A1CDB57AA003FF4B4 /* Vocabulary.cpp in Sources */,
276E5F1A1CDB57AA003FF4B4 /* LexerDFASerializer.cpp in Sources */,
276E60161CDB57AA003FF4B4 /* ParseTreePattern.cpp in Sources */,
@ -2573,7 +2663,9 @@
276E5D761CDB57AA003FF4B4 /* ATNSerializer.cpp in Sources */,
27745F031CE49C000067C6A3 /* RuntimeMetaData.cpp in Sources */,
276E5DAC1CDB57AA003FF4B4 /* ContextSensitivityInfo.cpp in Sources */,
2793DCA41F08095F00A84290 /* ANTLRErrorListener.cpp in Sources */,
276E5D641CDB57AA003FF4B4 /* ATNConfigSet.cpp in Sources */,
2793DC9D1F08090D00A84290 /* Any.cpp in Sources */,
276E5FAD1CDB57AA003FF4B4 /* Arrays.cpp in Sources */,
276E5ECC1CDB57AA003FF4B4 /* WildcardTransition.cpp in Sources */,
276E5E841CDB57AA003FF4B4 /* RangeTransition.cpp in Sources */,
@ -2581,6 +2673,7 @@
276E5D9A1CDB57AA003FF4B4 /* BasicState.cpp in Sources */,
276E5FBF1CDB57AA003FF4B4 /* guid.cpp in Sources */,
276E5E7E1CDB57AA003FF4B4 /* ProfilingATNSimulator.cpp in Sources */,
2793DCA71F08095F00A84290 /* ANTLRErrorStrategy.cpp in Sources */,
276E5F3E1CDB57AA003FF4B4 /* IntStream.cpp in Sources */,
276E5F591CDB57AA003FF4B4 /* ListTokenSource.cpp in Sources */,
276E5F6B1CDB57AA003FF4B4 /* MurmurHash.cpp in Sources */,
@ -2595,6 +2688,7 @@
276E5D581CDB57AA003FF4B4 /* ATN.cpp in Sources */,
276E5EE41CDB57AA003FF4B4 /* CharStream.cpp in Sources */,
27DB44AB1D045537007E790B /* XPathWildcardAnywhereElement.cpp in Sources */,
2793DC8D1F08088F00A84290 /* ParseTreeListener.cpp in Sources */,
276E5EDE1CDB57AA003FF4B4 /* BufferedTokenStream.cpp in Sources */,
276E5F021CDB57AA003FF4B4 /* DefaultErrorStrategy.cpp in Sources */,
276E5D401CDB57AA003FF4B4 /* AbstractPredicateTransition.cpp in Sources */,
@ -2613,6 +2707,8 @@
276E5DBE1CDB57AA003FF4B4 /* DecisionState.cpp in Sources */,
276E5E961CDB57AA003FF4B4 /* RuleTransition.cpp in Sources */,
276E5EF61CDB57AA003FF4B4 /* CommonTokenStream.cpp in Sources */,
2793DC851F08083F00A84290 /* TokenSource.cpp in Sources */,
2793DC911F0808A200A84290 /* TerminalNode.cpp in Sources */,
276E60101CDB57AA003FF4B4 /* ParseTreeMatch.cpp in Sources */,
276566E01DA93BFB000869BE /* ParseTree.cpp in Sources */,
276E5EEA1CDB57AA003FF4B4 /* CommonToken.cpp in Sources */,

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "ANTLRErrorListener.h"
antlr4::ANTLRErrorListener::~ANTLRErrorListener()

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "ANTLRErrorStrategy.h"
antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy()

View File

@ -27,7 +27,7 @@ namespace antlr4 {
/// </summary>
class ANTLR4CPP_PUBLIC IntStream {
public:
static const size_t EOF = std::numeric_limits<size_t>::max();
static const size_t EOF = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013
/// The value returned by <seealso cref="#LA LA()"/> when the end of the stream is
/// reached.

View File

@ -11,7 +11,7 @@ namespace antlr4 {
class ANTLR4CPP_PUBLIC Recognizer {
public:
static const size_t EOF = std::numeric_limits<size_t>::max();
static const size_t EOF = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013.
Recognizer();
Recognizer(Recognizer const&) = delete;

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "Token.h"
antlr4::Token::~Token() {

View File

@ -18,7 +18,7 @@ namespace antlr4 {
/// During lookahead operations, this "token" signifies we hit rule end ATN state
/// and did not follow it despite needing to.
static const size_t EPSILON = std::numeric_limits<size_t>::max() - 1;
static const size_t EPSILON = static_cast<size_t>(-2);
static const size_t MIN_USER_TOKEN_TYPE = 1;
static const size_t EOF = IntStream::EOF;

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "TokenSource.h"
antlr4::TokenSource::~TokenSource() {

View File

@ -52,7 +52,7 @@ void UnbufferedCharStream::sync(size_t want) {
size_t UnbufferedCharStream::fill(size_t n) {
for (size_t i = 0; i < n; i++) {
if (_data.size() > 0 && _data.back() == (uint32_t)EOF) {
if (_data.size() > 0 && _data.back() == 0xFFFF) {
return i;
}
@ -89,23 +89,23 @@ size_t UnbufferedCharStream::LA(ssize_t i) {
}
// We can look back only as many chars as we have buffered.
ssize_t index = (ssize_t)_p + i - 1;
ssize_t index = static_cast<ssize_t>(_p) + i - 1;
if (index < 0) {
throw IndexOutOfBoundsException();
}
if (i > 0) {
sync((size_t)i); // No need to sync if we look back.
sync(static_cast<size_t>(i)); // No need to sync if we look back.
}
if ((size_t)index >= _data.size()) {
if (static_cast<size_t>(index) >= _data.size()) {
return EOF;
}
if (_data[(size_t)index] == (uint32_t)EOF) {
if (_data[static_cast<size_t>(index)] == 0xFFFF) {
return EOF;
}
return _data[(size_t)index];
return _data[static_cast<size_t>(index)];
}
ssize_t UnbufferedCharStream::mark() {
@ -113,13 +113,13 @@ ssize_t UnbufferedCharStream::mark() {
_lastCharBufferStart = _lastChar;
}
ssize_t mark = -(ssize_t)_numMarkers - 1;
ssize_t mark = -static_cast<ssize_t>(_numMarkers) - 1;
_numMarkers++;
return mark;
}
void UnbufferedCharStream::release(ssize_t marker) {
ssize_t expectedMark = -(ssize_t)_numMarkers;
ssize_t expectedMark = -static_cast<ssize_t>(_numMarkers);
if (marker != expectedMark) {
throw IllegalStateException("release() called with an invalid marker.");
}
@ -147,16 +147,16 @@ void UnbufferedCharStream::seek(size_t index) {
}
// index == to bufferStartIndex should set p to 0
ssize_t i = (ssize_t)index - (ssize_t)getBufferStartIndex();
ssize_t i = static_cast<ssize_t>(index) - static_cast<ssize_t>(getBufferStartIndex());
if (i < 0) {
throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index));
} else if (i >= (ssize_t)_data.size()) {
} else if (i >= static_cast<ssize_t>(_data.size())) {
throw UnsupportedOperationException("Seek to index outside buffer: " + std::to_string(index) +
" not in " + std::to_string(getBufferStartIndex()) + ".." +
std::to_string(getBufferStartIndex() + _data.size()));
}
_p = (size_t)i;
_p = static_cast<size_t>(i);
_currentCharIndex = index;
if (_p == 0) {
_lastChar = _lastCharBufferStart;
@ -189,7 +189,7 @@ std::string UnbufferedCharStream::getText(const misc::Interval &interval) {
}
}
if (interval.a < (ssize_t)bufferStartIndex || interval.b >= ssize_t(bufferStartIndex + _data.size())) {
if (interval.a < static_cast<ssize_t>(bufferStartIndex) || interval.b >= ssize_t(bufferStartIndex + _data.size())) {
throw UnsupportedOperationException("interval " + interval.toString() + " outside buffer: " +
std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _data.size() - 1));
}

View File

@ -46,17 +46,17 @@ Token* UnbufferedTokenStream::LT(ssize_t i)
}
sync(i);
ssize_t index = (ssize_t)_p + i - 1;
ssize_t index = static_cast<ssize_t>(_p) + i - 1;
if (index < 0) {
throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) + std::string(") gives negative index"));
}
if (index >= (ssize_t)_tokens.size()) {
if (index >= static_cast<ssize_t>(_tokens.size())) {
assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF);
return _tokens.back().get();
}
return _tokens[(size_t)index].get();
return _tokens[static_cast<size_t>(index)].get();
}
size_t UnbufferedTokenStream::LA(ssize_t i)
@ -113,9 +113,9 @@ void UnbufferedTokenStream::consume()
/// </summary>
void UnbufferedTokenStream::sync(ssize_t want)
{
ssize_t need = ((ssize_t)_p + want - 1) - (ssize_t)_tokens.size() + 1; // how many more elements we need?
ssize_t need = (static_cast<ssize_t>(_p) + want - 1) - static_cast<ssize_t>(_tokens.size()) + 1; // how many more elements we need?
if (need > 0) {
fill((size_t)need);
fill(static_cast<size_t>(need));
}
}
@ -177,7 +177,7 @@ void UnbufferedTokenStream::release(ssize_t marker)
if (_p > 0) {
// Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs
// p is last valid token; move nothing if p==n as we have no valid char
_tokens.erase(_tokens.begin(), _tokens.begin() + (ssize_t)_p);
_tokens.erase(_tokens.begin(), _tokens.begin() + static_cast<ssize_t>(_p));
_p = 0;
}

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "WritableToken.h"
antlr4::WritableToken::~WritableToken() {

View File

@ -63,6 +63,8 @@
typedef std::basic_string<__int32> i32string;
typedef i32string UTF32String;
#else
typedef std::u32string UTF32String;
#endif
#ifdef ANTLR4CPP_EXPORTS

View File

@ -752,6 +752,7 @@ Ref<LexerAction> ATNDeserializer::lexerActionFactory(LexerActionType type, int d
return std::make_shared<LexerTypeAction>(data1);
default:
throw IllegalArgumentException("The specified lexer action type " + std::to_string((size_t)type) + " is not valid.");
throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast<size_t>(type)) +
" is not valid.");
}
}

View File

@ -58,7 +58,7 @@ std::vector<size_t> ATNSerializer::serialize() {
serializeUUID(data, ATNDeserializer::SERIALIZED_UUID());
// convert grammar type to ATN const to avoid dependence on ANTLRParser
data.push_back((size_t)atn->grammarType);
data.push_back(static_cast<size_t>(atn->grammarType));
data.push_back(atn->maxTokenType);
size_t nedges = 0;
@ -288,7 +288,7 @@ std::vector<size_t> ATNSerializer::serialize() {
if (atn->grammarType == ATNType::LEXER) {
data.push_back(atn->lexerActions.size());
for (Ref<LexerAction> &action : atn->lexerActions) {
data.push_back((size_t)action->getActionType());
data.push_back(static_cast<size_t>(action->getActionType()));
switch (action->getActionType()) {
case LexerActionType::CHANNEL:
{
@ -348,7 +348,8 @@ std::vector<size_t> ATNSerializer::serialize() {
default:
throw IllegalArgumentException("The specified lexer action type " +
std::to_string((size_t)action->getActionType()) + " is not valid.");
std::to_string(static_cast<size_t>(action->getActionType())) +
" is not valid.");
}
}
}

View File

@ -77,7 +77,7 @@ namespace atn {
virtual ~ATNState();
static const size_t INITIAL_NUM_TRANSITIONS = 4;
static const size_t INVALID_STATE_NUMBER = std::numeric_limits<size_t>::max();
static const size_t INVALID_STATE_NUMBER = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max();
enum {
ATN_INVALID_TYPE = 0,

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "BlockStartState.h"
antlr4::atn::BlockStartState::~BlockStartState() {

View File

@ -144,12 +144,12 @@ void LL1Analyzer::_LOOK(ATNState *s, ATNState *stopState, Ref<PredictionContext>
} else if (t->isEpsilon()) {
_LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
} else if (t->getSerializationType() == Transition::WILDCARD) {
look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType));
look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast<ssize_t>(_atn.maxTokenType)));
} else {
misc::IntervalSet set = t->label();
if (!set.isEmpty()) {
if (is<NotSetTransition*>(t)) {
set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, (ssize_t)_atn.maxTokenType));
set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast<ssize_t>(_atn.maxTokenType)));
}
look.addAll(set);
}

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "LexerAction.h"
antlr4::atn::LexerAction::~LexerAction() {

View File

@ -32,7 +32,7 @@ void LexerChannelAction::execute(Lexer *lexer) {
size_t LexerChannelAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
hash = MurmurHash::update(hash, _channel);
return MurmurHash::finish(hash, 2);
}

View File

@ -38,7 +38,7 @@ void LexerCustomAction::execute(Lexer *lexer) {
size_t LexerCustomAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
hash = MurmurHash::update(hash, _ruleIndex);
hash = MurmurHash::update(hash, _actionIndex);
return MurmurHash::finish(hash, 3);

View File

@ -33,7 +33,7 @@ void LexerModeAction::execute(Lexer *lexer) {
size_t LexerModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
hash = MurmurHash::update(hash, _mode);
return MurmurHash::finish(hash, 2);
}

View File

@ -34,7 +34,7 @@ void LexerMoreAction::execute(Lexer *lexer) {
size_t LexerMoreAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
return MurmurHash::finish(hash, 1);
}

View File

@ -34,7 +34,7 @@ void LexerPopModeAction::execute(Lexer *lexer) {
size_t LexerPopModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
return MurmurHash::finish(hash, 1);
}

View File

@ -33,7 +33,7 @@ void LexerPushModeAction::execute(Lexer *lexer) {
size_t LexerPushModeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
hash = MurmurHash::update(hash, _mode);
return MurmurHash::finish(hash, 2);
}

View File

@ -34,7 +34,7 @@ void LexerSkipAction::execute(Lexer *lexer) {
size_t LexerSkipAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
return MurmurHash::finish(hash, 1);
}

View File

@ -33,7 +33,7 @@ void LexerTypeAction::execute(Lexer *lexer) {
size_t LexerTypeAction::hashCode() const {
size_t hash = MurmurHash::initialize();
hash = MurmurHash::update(hash, (size_t)getActionType());
hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
hash = MurmurHash::update(hash, _type);
return MurmurHash::finish(hash, 2);
}

View File

@ -184,7 +184,7 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream
throw e;
}
if (D->requiresFullContext && mode != PredictionMode::SLL) {
if (D->requiresFullContext && _mode != PredictionMode::SLL) {
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
BitSet conflictingAlts;
if (D->predicates.size() != 0) {
@ -283,7 +283,7 @@ dfa::DFAState *ParserATNSimulator::computeTargetState(dfa::DFA &dfa, dfa::DFASta
D->isAcceptState = true;
D->configs->uniqueAlt = predictedAlt;
D->prediction = predictedAlt;
} else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(mode, D->configs.get())) {
} else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(_mode, D->configs.get())) {
// MORE THAN ONE VIABLE ALTERNATIVE
D->configs->conflictingAlts = getConflictingAlts(D->configs.get());
D->requiresFullContext = true;
@ -370,7 +370,7 @@ size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *
predictedAlt = reach->uniqueAlt;
break;
}
if (mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
predictedAlt = PredictionModeClass::resolvesToJustOneViableAlt(altSubSets);
if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
break;
@ -1332,11 +1332,11 @@ void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, s
}
void ParserATNSimulator::setPredictionMode(PredictionMode newMode) {
mode = newMode;
_mode = newMode;
}
atn::PredictionMode ParserATNSimulator::getPredictionMode() {
return mode;
return _mode;
}
Parser* ParserATNSimulator::getParser() {
@ -1352,6 +1352,6 @@ bool ParserATNSimulator::getLrLoopSetting() {
}
void ParserATNSimulator::InitializeInstanceFields() {
mode = PredictionMode::LL;
_mode = PredictionMode::LL;
_startIndex = 0;
}

View File

@ -243,20 +243,133 @@ namespace atn {
* the input.</p>
*/
class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator {
protected:
Parser *const parser;
public:
/// Testing only!
ParserATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
PredictionContextCache &sharedContextCache);
ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
PredictionContextCache &sharedContextCache);
virtual void reset() override;
virtual void clearDFA() override;
virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext);
static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT;
std::vector<dfa::DFA> &decisionToDFA;
/** Implements first-edge (loop entry) elimination as an optimization
* during closure operations. See antlr/antlr4#1398.
*
* The optimization is to avoid adding the loop entry config when
* the exit path can only lead back to the same
* StarLoopEntryState after popping context at the rule end state
* (traversing only epsilon edges, so we're still in closure, in
* this same rule).
*
* We need to detect any state that can reach loop entry on
* epsilon w/o exiting rule. We don't have to look at FOLLOW
* links, just ensure that all stack tops for config refer to key
* states in LR rule.
*
* To verify we are in the right situation we must first check
* closure is at a StarLoopEntryState generated during LR removal.
* Then we check that each stack top of context is a return state
* from one of these cases:
*
* 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
* 2. expr op expr. The return state is the block end of internal block of (...)*
* 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
* That state points at block end of internal block of (...)*.
* 4. expr '?' expr ':' expr. The return state points at block end,
* which points at loop entry state.
*
* If any is true for each stack top, then closure does not add a
* config to the current config set for edge[0], the loop entry branch.
*
* Conditions fail if any context for the current config is:
*
* a. empty (we'd fall out of expr to do a global FOLLOW which could
* even be to some weird spot in expr) or,
* b. lies outside of expr or,
* c. lies within expr but at a state not the BlockEndState
* generated during LR removal
*
* Do we need to evaluate predicates ever in closure for this case?
*
* No. Predicates, including precedence predicates, are only
* evaluated when computing a DFA start state. I.e., only before
* the lookahead (but not parser) consumes a token.
*
* There are no epsilon edges allowed in LR rule alt blocks or in
* the "primary" part (ID here). If closure is in
* StarLoopEntryState any lookahead operation will have consumed a
* token as there are no epsilon-paths that lead to
* StarLoopEntryState. We do not have to evaluate predicates
* therefore if we are in the generated StarLoopEntryState of a LR
* rule. Note that when making a prediction starting at that
* decision point, decision d=2, compute-start-state performs
* closure starting at edges[0], edges[1] emanating from
* StarLoopEntryState. That means it is not performing closure on
* StarLoopEntryState during compute-start-state.
*
* How do we know this always gives same prediction answer?
*
* Without predicates, loop entry and exit paths are ambiguous
* upon remaining input +b (in, say, a+b). Either paths lead to
* valid parses. Closure can lead to consuming + immediately or by
* falling out of this call to expr back into expr and loop back
* again to StarLoopEntryState to match +b. In this special case,
* we choose the more efficient path, which is to take the bypass
* path.
*
* The lookahead language has not changed because closure chooses
* one path over the other. Both paths lead to consuming the same
* remaining input during a lookahead operation. If the next token
* is an operator, lookahead will enter the choice block with
* operators. If it is not, lookahead will exit expr. Same as if
* closure had chosen to enter the choice block immediately.
*
* Closure is examining one config (some loopentrystate, some alt,
* context) which means it is considering exactly one alt. Closure
* always copies the same alt to any derived configs.
*
* How do we know this optimization doesn't mess up precedence in
* our parse trees?
*
* Looking through expr from left edge of stat only has to confirm
* that an input, say, a+b+c; begins with any valid interpretation
* of an expression. The precedence actually doesn't matter when
* making a decision in stat seeing through expr. It is only when
* parsing rule expr that we must use the precedence to get the
* right interpretation and, hence, parse tree.
*/
bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const;
virtual std::string getRuleName(size_t index);
virtual Ref<ATNConfig> precedenceTransition(Ref<ATNConfig> const& config, PrecedencePredicateTransition *pt,
bool collectPredicates, bool inContext, bool fullCtx);
void setPredictionMode(PredictionMode newMode);
PredictionMode getPredictionMode();
Parser* getParser();
virtual std::string getTokenName(size_t t);
virtual std::string getLookaheadName(TokenStream *input);
private:
/// <summary>
/// SLL, LL, or LL + exact ambig detection? </summary>
PredictionMode mode;
/// Used for debugging in adaptivePredict around execATN but I cut
/// it out for clarity now that alg. works well. We can leave this
/// "dead" code for a bit.
/// </summary>
virtual void dumpDeadEndConfigs(NoViableAltException &nvae);
protected:
Parser *const parser;
/// <summary>
/// Each prediction operation uses a cache for merge of prediction contexts.
/// Don't keep around as it wastes huge amounts of memory. The merge cache
@ -273,20 +386,7 @@ namespace atn {
size_t _startIndex;
ParserRuleContext *_outerContext;
dfa::DFA *_dfa; // Reference into the decisionToDFA vector.
public:
/// Testing only!
ParserATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
PredictionContextCache &sharedContextCache);
ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
PredictionContextCache &sharedContextCache);
virtual void reset() override;
virtual void clearDFA() override;
virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext);
protected:
/// <summary>
/// Performs ATN simulation to compute a predicted alternative based
/// upon the remaining input, but also updates the DFA cache to avoid
@ -350,7 +450,7 @@ namespace atn {
// comes back with reach.uniqueAlt set to a valid alt
virtual size_t execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0,
TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over
TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over
virtual std::unique_ptr<ATNConfigSet> computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx);
@ -549,10 +649,10 @@ namespace atn {
virtual ATNState *getReachableTarget(Transition *trans, size_t ttype);
virtual std::vector<Ref<SemanticContext>> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts,
ATNConfigSet *configs, size_t nalts);
ATNConfigSet *configs, size_t nalts);
virtual std::vector<dfa::DFAState::PredPrediction*> getPredicatePredictions(const antlrcpp::BitSet &ambigAlts,
std::vector<Ref<SemanticContext>> altToPred);
std::vector<Ref<SemanticContext>> altToPred);
/**
* This method is used to improve the localization of error messages by
@ -601,7 +701,7 @@ namespace atn {
* identified and {@link #adaptivePredict} should report an error instead.
*/
size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs,
ParserRuleContext *outerContext);
ParserRuleContext *outerContext);
virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs);
@ -615,7 +715,7 @@ namespace atn {
* prediction, which is where predicates need to evaluate.
*/
std::pair<ATNConfigSet *, ATNConfigSet *> splitAccordingToSemanticValidity(ATNConfigSet *configs,
ParserRuleContext *outerContext);
ParserRuleContext *outerContext);
/// <summary>
/// Look through a list of predicate/alt pairs, returning alts for the
@ -627,7 +727,6 @@ namespace atn {
virtual antlrcpp::BitSet evalSemanticContext(std::vector<dfa::DFAState::PredPrediction*> predPredictions,
ParserRuleContext *outerContext, bool complete);
/**
* Evaluate a semantic context within a specific parser context.
*
@ -672,111 +771,15 @@ namespace atn {
virtual void closureCheckingStopState(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
/// Do the actual work of walking epsilon edges.
virtual void closure_(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
public:
/** Implements first-edge (loop entry) elimination as an optimization
* during closure operations. See antlr/antlr4#1398.
*
* The optimization is to avoid adding the loop entry config when
* the exit path can only lead back to the same
* StarLoopEntryState after popping context at the rule end state
* (traversing only epsilon edges, so we're still in closure, in
* this same rule).
*
* We need to detect any state that can reach loop entry on
* epsilon w/o exiting rule. We don't have to look at FOLLOW
* links, just ensure that all stack tops for config refer to key
* states in LR rule.
*
* To verify we are in the right situation we must first check
* closure is at a StarLoopEntryState generated during LR removal.
* Then we check that each stack top of context is a return state
* from one of these cases:
*
* 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
* 2. expr op expr. The return state is the block end of internal block of (...)*
* 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
* That state points at block end of internal block of (...)*.
* 4. expr '?' expr ':' expr. The return state points at block end,
* which points at loop entry state.
*
* If any is true for each stack top, then closure does not add a
* config to the current config set for edge[0], the loop entry branch.
*
* Conditions fail if any context for the current config is:
*
* a. empty (we'd fall out of expr to do a global FOLLOW which could
* even be to some weird spot in expr) or,
* b. lies outside of expr or,
* c. lies within expr but at a state not the BlockEndState
* generated during LR removal
*
* Do we need to evaluate predicates ever in closure for this case?
*
* No. Predicates, including precedence predicates, are only
* evaluated when computing a DFA start state. I.e., only before
* the lookahead (but not parser) consumes a token.
*
* There are no epsilon edges allowed in LR rule alt blocks or in
* the "primary" part (ID here). If closure is in
* StarLoopEntryState any lookahead operation will have consumed a
* token as there are no epsilon-paths that lead to
* StarLoopEntryState. We do not have to evaluate predicates
* therefore if we are in the generated StarLoopEntryState of a LR
* rule. Note that when making a prediction starting at that
* decision point, decision d=2, compute-start-state performs
* closure starting at edges[0], edges[1] emanating from
* StarLoopEntryState. That means it is not performing closure on
* StarLoopEntryState during compute-start-state.
*
* How do we know this always gives same prediction answer?
*
* Without predicates, loop entry and exit paths are ambiguous
* upon remaining input +b (in, say, a+b). Either paths lead to
* valid parses. Closure can lead to consuming + immediately or by
* falling out of this call to expr back into expr and loop back
* again to StarLoopEntryState to match +b. In this special case,
* we choose the more efficient path, which is to take the bypass
* path.
*
* The lookahead language has not changed because closure chooses
* one path over the other. Both paths lead to consuming the same
* remaining input during a lookahead operation. If the next token
* is an operator, lookahead will enter the choice block with
* operators. If it is not, lookahead will exit expr. Same as if
* closure had chosen to enter the choice block immediately.
*
* Closure is examining one config (some loopentrystate, some alt,
* context) which means it is considering exactly one alt. Closure
* always copies the same alt to any derived configs.
*
* How do we know this optimization doesn't mess up precedence in
* our parse trees?
*
* Looking through expr from left edge of stat only has to confirm
* that an input, say, a+b+c; begins with any valid interpretation
* of an expression. The precedence actually doesn't matter when
* making a decision in stat seeing through expr. It is only when
* parsing rule expr that we must use the precedence to get the
* right interpretation and, hence, parse tree.
*/
bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const;
virtual std::string getRuleName(size_t index);
protected:
virtual Ref<ATNConfig> getEpsilonTarget(Ref<ATNConfig> const& config, Transition *t, bool collectPredicates,
bool inContext, bool fullCtx, bool treatEofAsEpsilon);
virtual Ref<ATNConfig> actionTransition(Ref<ATNConfig> const& config, ActionTransition *t);
public:
virtual Ref<ATNConfig> precedenceTransition(Ref<ATNConfig> const& config, PrecedencePredicateTransition *pt,
bool collectPredicates, bool inContext, bool fullCtx);
protected:
virtual Ref<ATNConfig> predTransition(Ref<ATNConfig> const& config, PredicateTransition *pt, bool collectPredicates,
bool inContext, bool fullCtx);
@ -832,19 +835,6 @@ namespace atn {
virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet *configs);
public:
virtual std::string getTokenName(size_t t);
virtual std::string getLookaheadName(TokenStream *input);
/// <summary>
/// Used for debugging in adaptivePredict around execATN but I cut
/// it out for clarity now that alg. works well. We can leave this
/// "dead" code for a bit.
/// </summary>
virtual void dumpDeadEndConfigs(NoViableAltException &nvae);
protected:
virtual NoViableAltException noViableAlt(TokenStream *input, ParserRuleContext *outerContext,
ATNConfigSet *configs, size_t startIndex);
@ -901,13 +891,10 @@ namespace atn {
const antlrcpp::BitSet &ambigAlts,
ATNConfigSet *configs); // configs that LL not SLL considered conflicting
public:
void setPredictionMode(PredictionMode newMode);
PredictionMode getPredictionMode();
Parser* getParser();
private:
// SLL, LL, or LL + exact ambig detection?
PredictionMode _mode;
static bool getLrLoopSetting();
void InitializeInstanceFields();
};

View File

@ -17,7 +17,6 @@ namespace atn {
class PredictionContextMergeCache;
typedef std::unordered_set<Ref<PredictionContext>, PredictionContextHasher, PredictionContextComparer> PredictionContextCache;
//typedef std::map<std::pair<Ref<PredictionContext>, Ref<PredictionContext>>, Ref<PredictionContext>> PredictionContextMergeCache;
class ANTLR4CPP_PUBLIC PredictionContext {
public:
@ -28,10 +27,10 @@ namespace atn {
/// Represents $ in an array in full context mode, when $
/// doesn't mean wildcard: $ + x = [$,x]. Here,
/// $ = EMPTY_RETURN_STATE.
// ml: originally Integer.MAX_VALUE, which would be (size_t)-1 for us, but this is already used in places where
// ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where
// -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't
// conflict with real return states.
static const size_t EMPTY_RETURN_STATE = std::numeric_limits<size_t>::max() - 9;
static const size_t EMPTY_RETURN_STATE = static_cast<size_t>(-10); // std::numeric_limits<size_t>::max() - 9;
private:
static const size_t INITIAL_HASH = 1;

View File

@ -15,7 +15,7 @@ namespace atn {
* utility methods for analyzing configuration sets for conflicts and/or
* ambiguities.
*/
enum class ANTLR4CPP_PUBLIC PredictionMode {
enum class PredictionMode {
/**
* The SLL(*) prediction mode. This prediction mode ignores the current
* parser context when making predictions. This is the fastest prediction

View File

@ -82,7 +82,7 @@ int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate *o) {
size_t SemanticContext::PrecedencePredicate::hashCode() const {
size_t hashCode = 1;
hashCode = 31 * hashCode + (size_t)precedence;
hashCode = 31 * hashCode + static_cast<size_t>(precedence);
return hashCode;
}

View File

@ -10,16 +10,16 @@ using namespace antlr4::misc;
Interval::~Interval() = default;
size_t antlr4::misc::numericToSymbol(ssize_t v) {
return (size_t)v;
return static_cast<size_t>(v);
}
ssize_t antlr4::misc::symbolToNumeric(size_t v) {
return (ssize_t)v;
return static_cast<ssize_t>(v);
}
Interval const Interval::INVALID;
Interval::Interval() : Interval((ssize_t)-1, -2) { // Need an explicit cast here for VS.
Interval::Interval() : Interval(static_cast<ssize_t>(-1), -2) { // Need an explicit cast here for VS.
}
Interval::Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) {
@ -41,8 +41,8 @@ bool Interval::operator == (const Interval &other) const {
size_t Interval::hashCode() const {
size_t hash = 23;
hash = hash * 31 + (size_t)a;
hash = hash * 31 + (size_t)b;
hash = hash * 31 + static_cast<size_t>(a);
hash = hash * 31 + static_cast<size_t>(b);
return hash;
}

View File

@ -1,9 +1,16 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "Any.h"
antlrcpp::Any::~Any()
using namespace antlrcpp;
Any::~Any()
{
delete _ptr;
}
antlrcpp::Any::Base::~Base() {
Any::Base::~Base() {
}

View File

@ -19,7 +19,7 @@ namespace antlrcpp {
template<class T>
using StorageType = typename std::decay<T>::type;
struct Any
struct ANTLR4CPP_PUBLIC Any
{
bool isNull() const { return _ptr == nullptr; }
bool isNotNull() const { return _ptr != nullptr; }

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "tree/ErrorNode.h"
antlr4::tree::ErrorNode::~ErrorNode() {

View File

@ -1,31 +1,6 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "support/CPPUtils.h"

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "ParseTreeListener.h"
antlr4::tree::ParseTreeListener::~ParseTreeListener() {

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "ParseTreeVisitor.h"
antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() {

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "tree/TerminalNode.h"
antlr4::tree::TerminalNode::~TerminalNode() {

View File

@ -1,3 +1,8 @@
/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
* Use of this file is governed by the BSD 3-clause license that
* can be found in the LICENSE.txt file in the project root.
*/
#include "tree/pattern/Chunk.h"
antlr4::tree::pattern::Chunk::~Chunk() {

View File

@ -337,8 +337,8 @@ func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
interval = NewInterval(0, len(c.tokens)-1)
}
start := interval.start
stop := interval.stop
start := interval.Start
stop := interval.Stop
if start < 0 || stop < 0 {
return ""

View File

@ -0,0 +1,154 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"testing"
)
type commonTokenStreamTestLexer struct {
*BaseLexer
tokens []Token
i int
}
func (l *commonTokenStreamTestLexer) NextToken() Token {
tmp := l.tokens[l.i]
l.i++
return tmp
}
func TestCommonTokenStreamOffChannel(t *testing.T) {
assert := assertNew(t)
lexEngine := &commonTokenStreamTestLexer{
tokens: []Token{
newTestCommonToken(1, " ", LexerHidden), // 0
newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1
newTestCommonToken(1, " ", LexerHidden), // 2
newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3
newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4
newTestCommonToken(1, " ", LexerHidden), // 5
newTestCommonToken(1, " ", LexerHidden), // 6
newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7
newTestCommonToken(1, "\n", LexerHidden), // 9
newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10
},
}
tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
assert.Equal("x", tokens.LT(1).GetText()) // must skip first off channel token
tokens.Consume()
assert.Equal("=", tokens.LT(1).GetText())
assert.Equal("x", tokens.LT(-1).GetText())
tokens.Consume()
assert.Equal("34", tokens.LT(1).GetText())
assert.Equal("=", tokens.LT(-1).GetText())
tokens.Consume()
assert.Equal(";", tokens.LT(1).GetText())
assert.Equal("34", tokens.LT(-1).GetText())
tokens.Consume()
assert.Equal(TokenEOF, tokens.LT(1).GetTokenType())
assert.Equal(";", tokens.LT(-1).GetText())
assert.Equal("34", tokens.LT(-2).GetText())
assert.Equal("=", tokens.LT(-3).GetText())
assert.Equal("x", tokens.LT(-4).GetText())
}
func TestCommonTokenStreamFetchOffChannel(t *testing.T) {
assert := assertNew(t)
lexEngine := &commonTokenStreamTestLexer{
tokens: []Token{
newTestCommonToken(1, " ", LexerHidden), // 0
newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1
newTestCommonToken(1, " ", LexerHidden), // 2
newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3
newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4
newTestCommonToken(1, " ", LexerHidden), // 5
newTestCommonToken(1, " ", LexerHidden), // 6
newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7
newTestCommonToken(1, " ", LexerHidden), // 8
newTestCommonToken(1, "\n", LexerHidden), // 9
newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10
},
}
tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
tokens.Fill()
assert.Nil(tokens.getHiddenTokensToLeft(0, -1))
assert.Nil(tokens.getHiddenTokensToRight(0, -1))
assert.Equal("[[@0,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(1, -1)))
assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToRight(1, -1)))
assert.Nil(tokens.getHiddenTokensToLeft(2, -1))
assert.Nil(tokens.getHiddenTokensToRight(2, -1))
assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.getHiddenTokensToLeft(3, -1)))
assert.Nil(tokens.getHiddenTokensToRight(3, -1))
assert.Nil(tokens.getHiddenTokensToLeft(4, -1))
assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToRight(4, -1)))
assert.Nil(tokens.getHiddenTokensToLeft(5, -1))
assert.Equal("[[@6,0:0=' ',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToRight(5, -1)))
assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToLeft(6, -1)))
assert.Nil(tokens.getHiddenTokensToRight(6, -1))
assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToLeft(7, -1)))
assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToRight(7, -1)))
assert.Nil(tokens.getHiddenTokensToLeft(8, -1))
assert.Equal("[[@9,0:0='\\n',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToRight(8, -1)))
assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1]]",
tokensToString(tokens.getHiddenTokensToLeft(9, -1)))
assert.Nil(tokens.getHiddenTokensToRight(9, -1))
}
type commonTokenStreamTestLexerSingleEOF struct {
*BaseLexer
tokens []Token
i int
}
func (l *commonTokenStreamTestLexerSingleEOF) NextToken() Token {
return newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel)
}
func TestCommonTokenStreamSingleEOF(t *testing.T) {
assert := assertNew(t)
lexEngine := &commonTokenStreamTestLexerSingleEOF{}
tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
tokens.Fill()
assert.Equal(TokenEOF, tokens.LA(1))
assert.Equal(0, tokens.index)
assert.Equal(1, tokens.Size())
}
func TestCommonTokenStreamCannotConsumeEOF(t *testing.T) {
assert := assertNew(t)
lexEngine := &commonTokenStreamTestLexerSingleEOF{}
tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel)
tokens.Fill()
assert.Equal(TokenEOF, tokens.LA(1))
assert.Equal(0, tokens.index)
assert.Equal(1, tokens.Size())
assert.Panics(tokens.Consume)
}

View File

@ -101,7 +101,7 @@ func (is *InputStream) GetTextFromTokens(start, stop Token) string {
}
func (is *InputStream) GetTextFromInterval(i *Interval) string {
return is.GetText(i.start, i.stop)
return is.GetText(i.Start, i.Stop)
}
func (*InputStream) GetSourceName() string {

View File

@ -10,33 +10,33 @@ import (
)
type Interval struct {
start int
stop int
Start int
Stop int
}
/* stop is not included! */
func NewInterval(start, stop int) *Interval {
i := new(Interval)
i.start = start
i.stop = stop
i.Start = start
i.Stop = stop
return i
}
func (i *Interval) contains(item int) bool {
return item >= i.start && item < i.stop
func (i *Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
func (i *Interval) String() string {
if i.start == i.stop-1 {
return strconv.Itoa(i.start)
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1)
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
func (i *Interval) length() int {
return i.stop - i.start
return i.Stop - i.Start
}
type IntervalSet struct {
@ -59,7 +59,7 @@ func (i *IntervalSet) first() int {
return TokenInvalidType
}
return i.intervals[0].start
return i.intervals[0].Start
}
func (i *IntervalSet) addOne(v int) {
@ -78,24 +78,24 @@ func (i *IntervalSet) addInterval(v *Interval) {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.stop < interval.start {
if v.Stop < interval.Start {
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
} else if v.stop == interval.start {
i.intervals[k].start = v.start
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
return
} else if v.start <= interval.stop {
i.intervals[k] = NewInterval(intMin(interval.start, v.start), intMax(interval.stop, v.stop))
} else if v.Start <= interval.Stop {
i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
// if not applying to end, merge potential overlaps
if k < len(i.intervals)-1 {
l := i.intervals[k]
r := i.intervals[k+1]
// if r contained in l
if l.stop >= r.stop {
if l.Stop >= r.Stop {
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
} else if l.stop >= r.start { // partial overlap
i.intervals[k] = NewInterval(l.start, r.stop)
} else if l.Stop >= r.Start { // partial overlap
i.intervals[k] = NewInterval(l.Start, r.Stop)
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
}
}
@ -111,7 +111,7 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
if other.intervals != nil {
for k := 0; k < len(other.intervals); k++ {
i2 := other.intervals[k]
i.addInterval(NewInterval(i2.start, i2.stop))
i.addInterval(NewInterval(i2.Start, i2.Stop))
}
}
return i
@ -131,7 +131,7 @@ func (i *IntervalSet) contains(item int) bool {
return false
}
for k := 0; k < len(i.intervals); k++ {
if i.intervals[k].contains(item) {
if i.intervals[k].Contains(item) {
return true
}
}
@ -149,29 +149,29 @@ func (i *IntervalSet) length() int {
}
func (i *IntervalSet) removeRange(v *Interval) {
if v.start == v.stop-1 {
i.removeOne(v.start)
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
k := 0
for n := 0; n < len(i.intervals); n++ {
ni := i.intervals[k]
// intervals are ordered
if v.stop <= ni.start {
if v.Stop <= ni.Start {
return
} else if v.start > ni.start && v.stop < ni.stop {
i.intervals[k] = NewInterval(ni.start, v.start)
x := NewInterval(v.stop, ni.stop)
} else if v.Start > ni.Start && v.Stop < ni.Stop {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
} else if v.start <= ni.start && v.stop >= ni.stop {
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
k = k - 1 // need another pass
} else if v.start < ni.stop {
i.intervals[k] = NewInterval(ni.start, v.start)
} else if v.stop < ni.stop {
i.intervals[k] = NewInterval(v.stop, ni.stop)
} else if v.Start < ni.Stop {
i.intervals[k] = NewInterval(ni.Start, v.Start)
} else if v.Stop < ni.Stop {
i.intervals[k] = NewInterval(v.Stop, ni.Stop)
}
k++
}
@ -183,21 +183,21 @@ func (i *IntervalSet) removeOne(v int) {
for k := 0; k < len(i.intervals); k++ {
ki := i.intervals[k]
// intervals i ordered
if v < ki.start {
if v < ki.Start {
return
} else if v == ki.start && v == ki.stop-1 {
} else if v == ki.Start && v == ki.Stop-1 {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
return
} else if v == ki.start {
i.intervals[k] = NewInterval(ki.start+1, ki.stop)
} else if v == ki.Start {
i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
return
} else if v == ki.stop-1 {
i.intervals[k] = NewInterval(ki.start, ki.stop-1)
} else if v == ki.Stop-1 {
i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
return
} else if v < ki.stop-1 {
x := NewInterval(ki.start, v)
ki.start = v + 1
} else if v < ki.Stop-1 {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
@ -228,14 +228,14 @@ func (i *IntervalSet) toCharString() string {
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.stop == v.start+1 {
if v.start == TokenEOF {
if v.Stop == v.Start+1 {
if v.Start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, ("'" + string(v.start) + "'"))
names = append(names, ("'" + string(v.Start) + "'"))
}
} else {
names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'")
names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'")
}
}
if len(names) > 1 {
@ -250,14 +250,14 @@ func (i *IntervalSet) toIndexString() string {
names := make([]string, 0)
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.stop == v.start+1 {
if v.start == TokenEOF {
if v.Stop == v.Start+1 {
if v.Start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, strconv.Itoa(v.start))
names = append(names, strconv.Itoa(v.Start))
}
} else {
names = append(names, strconv.Itoa(v.start)+".."+strconv.Itoa(v.stop-1))
names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
}
}
if len(names) > 1 {
@ -270,7 +270,7 @@ func (i *IntervalSet) toIndexString() string {
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
names := make([]string, 0)
for _, v := range i.intervals {
for j := v.start; j < v.stop; j++ {
for j := v.Start; j < v.Stop; j++ {
names = append(names, i.elementName(literalNames, symbolicNames, j))
}
}

View File

@ -21,11 +21,11 @@ type Lexer interface {
Emit() Token
setChannel(int)
pushMode(int)
popMode() int
setType(int)
setMode(int)
SetChannel(int)
PushMode(int)
PopMode() int
SetType(int)
SetMode(int)
}
type BaseLexer struct {
@ -150,7 +150,7 @@ func (b *BaseLexer) GetSourceName() string {
return b.GrammarFileName
}
func (b *BaseLexer) setChannel(v int) {
func (b *BaseLexer) SetChannel(v int) {
b.channel = v
}
@ -250,11 +250,11 @@ func (b *BaseLexer) More() {
b.thetype = LexerMore
}
func (b *BaseLexer) setMode(m int) {
func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
func (b *BaseLexer) pushMode(m int) {
func (b *BaseLexer) PushMode(m int) {
if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
@ -262,7 +262,7 @@ func (b *BaseLexer) pushMode(m int) {
b.mode = m
}
func (b *BaseLexer) popMode() int {
func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
@ -331,7 +331,7 @@ func (b *BaseLexer) GetType() int {
return b.thetype
}
func (b *BaseLexer) setType(t int) {
func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
@ -361,7 +361,7 @@ func (b *BaseLexer) GetATN() *ATN {
// Return a list of all Token objects in input char stream.
// Forces load of all tokens. Does not include EOF token.
// /
func (b *BaseLexer) getAllTokens() []Token {
func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
t := vl.NextToken()

View File

@ -101,7 +101,7 @@ func NewLexerTypeAction(thetype int) *LexerTypeAction {
}
func (l *LexerTypeAction) execute(lexer Lexer) {
lexer.setType(l.thetype)
lexer.SetType(l.thetype)
}
func (l *LexerTypeAction) hash() int {
@ -145,7 +145,7 @@ func NewLexerPushModeAction(mode int) *LexerPushModeAction {
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerPushModeAction) execute(lexer Lexer) {
lexer.pushMode(l.mode)
lexer.PushMode(l.mode)
}
func (l *LexerPushModeAction) hash() int {
@ -190,7 +190,7 @@ var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (l *LexerPopModeAction) execute(lexer Lexer) {
lexer.popMode()
lexer.PopMode()
}
func (l *LexerPopModeAction) String() string {
@ -242,7 +242,7 @@ func NewLexerModeAction(mode int) *LexerModeAction {
// <p>This action is implemented by calling {@link Lexer//mode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerModeAction) execute(lexer Lexer) {
lexer.setMode(l.mode)
lexer.SetMode(l.mode)
}
func (l *LexerModeAction) hash() int {
@ -341,7 +341,7 @@ func NewLexerChannelAction(channel int) *LexerChannelAction {
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
// value provided by {@link //getChannel}.</p>
func (l *LexerChannelAction) execute(lexer Lexer) {
lexer.setChannel(l.channel)
lexer.SetChannel(l.channel)
}
func (l *LexerChannelAction) hash() int {

View File

@ -0,0 +1,98 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
// These assert functions are borrowed from https://github.com/stretchr/testify/ (MIT License)
package antlr
import (
"fmt"
"reflect"
"testing"
)
type assert struct {
t *testing.T
}
func assertNew(t *testing.T) *assert {
return &assert{
t: t,
}
}
func (a *assert) Equal(expected, actual interface{}) bool {
if !objectsAreEqual(expected, actual) {
return a.Fail(fmt.Sprintf("Not equal:\n"+
"expected: %#v\n"+
" actual: %#v\n", expected, actual))
}
return true
}
func objectsAreEqual(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
return reflect.DeepEqual(expected, actual)
}
func (a *assert) Nil(object interface{}) bool {
if isNil(object) {
return true
}
return a.Fail(fmt.Sprintf("Expected nil, but got: %#v", object))
}
func (a *assert) NotNil(object interface{}) bool {
if !isNil(object) {
return true
}
return a.Fail("Expected value not to be nil.")
}
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
return true
}
value := reflect.ValueOf(object)
kind := value.Kind()
if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
return true
}
return false
}
func (a *assert) Panics(f func()) bool {
if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
return a.Fail(fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue))
}
return true
}
// Fail reports a failure through
func (a *assert) Fail(failureMessage string) bool {
a.t.Errorf("%s", failureMessage)
return false
}
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
func didPanic(f func()) (bool, interface{}) {
didPanic := false
var message interface{}
func() {
defer func() {
if message = recover(); message != nil {
didPanic = true
}
}()
// call the target function
f()
}()
return didPanic, message
}

View File

@ -0,0 +1,107 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
/*
LexerB is a lexer for testing purpose.
This file is generated from this grammer.
lexer grammar LexerB;
ID : 'a'..'z'+;
INT : '0'..'9'+;
SEMI : ';';
ASSIGN : '=';
PLUS : '+';
MULT : '*';
WS : ' '+;
*/
var lexerB_serializedLexerAtn = []uint16{
3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 9, 40, 8,
1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9,
7, 4, 8, 9, 8, 3, 2, 6, 2, 19, 10, 2, 13, 2, 14, 2, 20, 3, 3, 6, 3, 24,
10, 3, 13, 3, 14, 3, 25, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7,
3, 8, 6, 8, 37, 10, 8, 13, 8, 14, 8, 38, 2, 2, 9, 3, 3, 5, 4, 7, 5, 9,
6, 11, 7, 13, 8, 15, 9, 3, 2, 2, 2, 42, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2,
2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2,
2, 2, 2, 15, 3, 2, 2, 2, 3, 18, 3, 2, 2, 2, 5, 23, 3, 2, 2, 2, 7, 27, 3,
2, 2, 2, 9, 29, 3, 2, 2, 2, 11, 31, 3, 2, 2, 2, 13, 33, 3, 2, 2, 2, 15,
36, 3, 2, 2, 2, 17, 19, 4, 99, 124, 2, 18, 17, 3, 2, 2, 2, 19, 20, 3, 2,
2, 2, 20, 18, 3, 2, 2, 2, 20, 21, 3, 2, 2, 2, 21, 4, 3, 2, 2, 2, 22, 24,
4, 50, 59, 2, 23, 22, 3, 2, 2, 2, 24, 25, 3, 2, 2, 2, 25, 23, 3, 2, 2,
2, 25, 26, 3, 2, 2, 2, 26, 6, 3, 2, 2, 2, 27, 28, 7, 61, 2, 2, 28, 8, 3,
2, 2, 2, 29, 30, 7, 63, 2, 2, 30, 10, 3, 2, 2, 2, 31, 32, 7, 45, 2, 2,
32, 12, 3, 2, 2, 2, 33, 34, 7, 44, 2, 2, 34, 14, 3, 2, 2, 2, 35, 37, 7,
34, 2, 2, 36, 35, 3, 2, 2, 2, 37, 38, 3, 2, 2, 2, 38, 36, 3, 2, 2, 2, 38,
39, 3, 2, 2, 2, 39, 16, 3, 2, 2, 2, 6, 2, 20, 25, 38, 2,
}
var lexerB_lexerDeserializer = NewATNDeserializer(nil)
var lexerB_lexerAtn = lexerB_lexerDeserializer.DeserializeFromUInt16(lexerB_serializedLexerAtn)
var lexerB_lexerChannelNames = []string{
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
}
var lexerB_lexerModeNames = []string{
"DEFAULT_MODE",
}
var lexerB_lexerLiteralNames = []string{
"", "", "", "';'", "'='", "'+'", "'*'",
}
var lexerB_lexerSymbolicNames = []string{
"", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS",
}
var lexerB_lexerRuleNames = []string{
"ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS",
}
type LexerB struct {
*BaseLexer
channelNames []string
modeNames []string
// TODO: EOF string
}
var lexerB_lexerDecisionToDFA = make([]*DFA, len(lexerB_lexerAtn.DecisionToState))
func init() {
for index, ds := range lexerB_lexerAtn.DecisionToState {
lexerB_lexerDecisionToDFA[index] = NewDFA(ds, index)
}
}
func NewLexerB(input CharStream) *LexerB {
l := new(LexerB)
l.BaseLexer = NewBaseLexer(input)
l.Interpreter = NewLexerATNSimulator(l, lexerB_lexerAtn, lexerB_lexerDecisionToDFA, NewPredictionContextCache())
l.channelNames = lexerB_lexerChannelNames
l.modeNames = lexerB_lexerModeNames
l.RuleNames = lexerB_lexerRuleNames
l.LiteralNames = lexerB_lexerLiteralNames
l.SymbolicNames = lexerB_lexerSymbolicNames
l.GrammarFileName = "LexerB.g4"
// TODO: l.EOF = TokenEOF
return l
}
// LexerB tokens.
const (
LexerBID = 1
LexerBINT = 2
LexerBSEMI = 3
LexerBASSIGN = 4
LexerBPLUS = 5
LexerBMULT = 6
LexerBWS = 7
)

View File

@ -0,0 +1,30 @@
package antlr
import (
"fmt"
"strings"
)
// newTestCommonToken create common token with tokentype, text and channel
// notice: test purpose only
func newTestCommonToken(tokenType int, text string, channel int) *CommonToken {
t := new(CommonToken)
t.BaseToken = new(BaseToken)
t.tokenType = tokenType
t.channel = channel
t.text = text
t.line = 0
t.column = -1
return t
}
// tokensToString returnes []Tokens string
// notice: test purpose only
func tokensToString(tokens []Token) string {
buf := make([]string, len(tokens))
for i, token := range tokens {
buf[i] = fmt.Sprintf("%v", token)
}
return "[" + strings.Join(buf, ", ") + "]"
}

View File

@ -0,0 +1,649 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"bytes"
"fmt"
)
//
// Useful for rewriting out a buffered input token stream after doing some
// augmentation or other manipulations on it.
// <p>
// You can insert stuff, replace, and delete chunks. Note that the operations
// are done lazily--only if you convert the buffer to a {@link String} with
// {@link TokenStream#getText()}. This is very efficient because you are not
// moving data around all the time. As the buffer of tokens is converted to
// strings, the {@link #getText()} method(s) scan the input token stream and
// check to see if there is an operation at the current index. If so, the
// operation is done and then normal {@link String} rendering continues on the
// buffer. This is like having multiple Turing machine instruction streams
// (programs) operating on a single input tape. :)</p>
// <p>
// This rewriter makes no modifications to the token stream. It does not ask the
// stream to fill itself up nor does it advance the input cursor. The token
// stream {@link TokenStream#index()} will return the same value before and
// after any {@link #getText()} call.</p>
// <p>
// The rewriter only works on tokens that you have in the buffer and ignores the
// current input cursor. If you are buffering tokens on-demand, calling
// {@link #getText()} halfway through the input will only do rewrites for those
// tokens in the first half of the file.</p>
// <p>
// Since the operations are done lazily at {@link #getText}-time, operations do
// not screw up the token index values. That is, an insert operation at token
// index {@code i} does not change the index values for tokens
// {@code i}+1..n-1.</p>
// <p>
// Because operations never actually alter the buffer, you may always get the
// original token stream back without undoing anything. Since the instructions
// are queued up, you can easily simulate transactions and roll back any changes
// if there is an error just by removing instructions. For example,</p>
// <pre>
// CharStream input = new ANTLRFileStream("input");
// TLexer lex = new TLexer(input);
// CommonTokenStream tokens = new CommonTokenStream(lex);
// T parser = new T(tokens);
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
// parser.startRule();
// </pre>
// <p>
// Then in the rules, you can execute (assuming rewriter is visible):</p>
// <pre>
// Token t,u;
// ...
// rewriter.insertAfter(t, "text to put after t");}
// rewriter.insertAfter(u, "text after u");}
// System.out.println(rewriter.getText());
// </pre>
// <p>
// You can also have multiple "instruction streams" and get multiple rewrites
// from a single pass over the input. Just name the instruction streams and use
// that name again when printing the buffer. This could be useful for generating
// a C file and also its header file--all from the same buffer:</p>
// <pre>
// rewriter.insertAfter("pass1", t, "text to put after t");}
// rewriter.insertAfter("pass2", u, "text after u");}
// System.out.println(rewriter.getText("pass1"));
// System.out.println(rewriter.getText("pass2"));
// </pre>
// <p>
// If you don't use named rewrite streams, a "default" stream is used as the
// first example shows.</p>
const(
Default_Program_Name = "default"
Program_Init_Size = 100
Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
type RewriteOperation interface {
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
Execute(buffer *bytes.Buffer) int
String() string
GetInstructionIndex() int
GetIndex() int
GetText() string
GetOpName() string
GetTokens() TokenStream
SetInstructionIndex(val int)
SetIndex(int)
SetText(string)
SetOpName(string)
SetTokens(TokenStream)
}
type BaseRewriteOperation struct {
//Current index of rewrites list
instruction_index int
//Token buffer index
index int
//Substitution text
text string
//Actual operation name
op_name string
//Pointer to token steam
tokens TokenStream
}
func (op *BaseRewriteOperation)GetInstructionIndex() int{
return op.instruction_index
}
func (op *BaseRewriteOperation)GetIndex() int{
return op.index
}
func (op *BaseRewriteOperation)GetText() string{
return op.text
}
func (op *BaseRewriteOperation)GetOpName() string{
return op.op_name
}
func (op *BaseRewriteOperation)GetTokens() TokenStream{
return op.tokens
}
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
op.instruction_index = val
}
func (op *BaseRewriteOperation)SetIndex(val int) {
op.index = val
}
func (op *BaseRewriteOperation)SetText(val string){
op.text = val
}
func (op *BaseRewriteOperation)SetOpName(val string){
op.op_name = val
}
func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
op.tokens = val
}
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
return op.index
}
func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
op.op_name,
op.tokens.Get(op.GetIndex()),
op.text,
)
}
type InsertBeforeOp struct {
BaseRewriteOperation
}
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
index:index,
text:text,
op_name:"InsertBeforeOp",
tokens:stream,
}}
}
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
buffer.WriteString(op.text)
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
return op.index+1
}
func (op *InsertBeforeOp) String() string {
return op.BaseRewriteOperation.String()
}
// Distinguish between insert after/before to do the "insert afters"
// first and then the "insert befores" at same index. Implementation
// of "insert after" is "insert before index+1".
type InsertAfterOp struct {
BaseRewriteOperation
}
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
index:index+1,
text:text,
tokens:stream,
}}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
return op.index+1
}
func (op *InsertAfterOp) String() string {
return op.BaseRewriteOperation.String()
}
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
type ReplaceOp struct{
BaseRewriteOperation
LastIndex int
}
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
return &ReplaceOp{
BaseRewriteOperation:BaseRewriteOperation{
index:from,
text:text,
op_name:"ReplaceOp",
tokens:stream,
},
LastIndex:to,
}
}
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
if op.text != ""{
buffer.WriteString(op.text)
}
return op.LastIndex +1
}
func (op *ReplaceOp) String() string {
if op.text == "" {
return fmt.Sprintf("<DeleteOP@%d..%d>",
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
}
return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
}
type TokenStreamRewriter struct {
//Our source stream
tokens TokenStream
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) &rarr; rewrite (List)
programs map[string][]RewriteOperation
last_rewrite_token_indexes map[string]int
}
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
return &TokenStreamRewriter{
tokens: tokens,
programs: map[string][]RewriteOperation{
Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
},
last_rewrite_token_indexes: map[string]int{},
}
}
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
return tsr.tokens
}
// Rollback the instruction stream for a program so that
// the indicated instruction (via instructionIndex) is no
// longer in the stream. UNTESTED!
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
is, ok := tsr.programs[program_name]
if ok{
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
tsr.Rollback(Default_Program_Name, instruction_index)
}
//Reset the program so that no instructions exist
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
tsr.DeleteProgram(Default_Program_Name)
}
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
tsr.InsertAfter(Default_Program_Name, index, text)
}
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
tsr.InsertBefore(Default_Program_Name, index, text)
}
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
tsr.Replace(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
tsr.ReplaceDefault(index, index, text)
}
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
tsr.ReplaceTokenDefault(index, index, text)
}
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
tsr.Replace(program_name, from, to, "" )
}
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
tsr.Delete(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
tsr.DeleteDefault(index,index)
}
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
tsr.ReplaceToken(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
tsr.DeleteToken(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
i, ok := tsr.last_rewrite_token_indexes[program_name]
if !ok{
return -1
}
return i
}
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
tsr.last_rewrite_token_indexes[program_name] = i
}
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
is := tsr.GetProgram(name)
is = append(is, op)
tsr.programs[name] = is
}
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
is, ok := tsr.programs[name]
if !ok{
is = tsr.InitializeProgram(name)
}
return is
}
// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter)GetTextDefault() string{
return tsr.GetText(
Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
rewrites := tsr.programs[program_name]
start := interval.Start
stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
start = max(start,0)
if rewrites == nil || len(rewrites) == 0{
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
// First, optimize instruction stream
indexToOp := reduceToSingleOperationPerIndex(rewrites)
// Walk buffer, executing instructions and emitting tokens
for i:=start; i<=stop && i<tsr.tokens.Size();{
op := indexToOp[i]
delete(indexToOp, i)// remove so any left have index size-1
t := tsr.tokens.Get(i)
if op == nil{
// no operation at that index, just dump token
if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
i++ // move to next token
}else {
i = op.Execute(&buf)// execute operation and skip
}
}
// include stuff after end if it's last index in buffer
// So, if they did an insertAfter(lastValidIndex, "foo"), include
// foo if end==lastValidIndex.
if stop == tsr.tokens.Size()-1{
// Scan any remaining operations after last token
// should be included (they will be inserts).
for _, op := range indexToOp{
if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
}
}
return buf.String()
}
// We need to combine operations and report invalid operations (like
// overlapping replaces that are not completed nested). Inserts to
// same index need to be combined etc... Here are the cases:
//
// I.i.u I.j.v leave alone, nonoverlapping
// I.i.u I.i.v combine: Iivu
//
// R.i-j.u R.x-y.v | i-j in x-y delete first R
// R.i-j.u R.i-j.v delete first R
// R.i-j.u R.x-y.v | x-y in i-j ERROR
// R.i-j.u R.x-y.v | boundaries overlap ERROR
//
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
// we're not deleting i)
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
// R.x-y.v I.i.u | i in x-y ERROR
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
// I.i.u = insert u before op @ index i
// R.x-y.u = replace x-y indexed tokens with u
//
// First we need to examine replaces. For any replace op:
//
// 1. wipe out any insertions before op within that range.
// 2. Drop any replace op before that is contained completely within
// that range.
// 3. Throw exception upon boundary overlap with any previous replace.
//
// Then we can deal with inserts:
//
// 1. for any inserts to same index, combine even if not adjacent.
// 2. for any prior replace with same left boundary, combine this
// insert with replace and delete this replace.
// 3. throw exception if index in same range as previous replace
//
// Don't actually delete; make op null in list. Easier to walk list.
// Later we can throw as we add to index &rarr; op map.
//
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
// inserted stuff would be before the replace range. But, if you
// add tokens in front of a method body '{' and then delete the method
// body, I think the stuff before the '{' you added should disappear too.
//
// Return a map from token index to operation.
//
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
// WALK REPLACES
for i:=0; i < len(rewrites); i++{
op := rewrites[i]
if op == nil{continue}
rop, ok := op.(*ReplaceOp)
if !ok{continue}
// Wipe prior inserts within range
for j:=0; j<i && j < len(rewrites); j++{
if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
if iop.index == rop.index{
// E.g., insert before 2, delete 2..2; update replace
// text to include insert before, kill insert
rewrites[iop.instruction_index] = nil
if rop.text != ""{
rop.text = iop.text + rop.text
}else{
rop.text = iop.text
}
}else if iop.index > rop.index && iop.index <=rop.LastIndex{
// delete insert as it's a no-op.
rewrites[iop.instruction_index] = nil
}
}
}
// Drop any prior replaces contained within
for j:=0; j<i && j < len(rewrites); j++{
if prevop, ok := rewrites[j].(*ReplaceOp);ok{
if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
// delete replace as it's a no-op.
rewrites[prevop.instruction_index] = nil
continue
}
// throw exception unless disjoint or identical
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
if prevop.text == "" && rop.text == "" && !disjoint{
rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
}else if !disjoint{
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
}
}
}
// WALK INSERTS
for i:=0; i < len(rewrites); i++ {
op := rewrites[i]
if op == nil{continue}
//hack to replicate inheritance in composition
_, iok := rewrites[i].(*InsertBeforeOp)
_, aok := rewrites[i].(*InsertAfterOp)
if !iok && !aok{continue}
iop := rewrites[i]
// combine current insert with prior if any at same index
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
for j:=0; j<i && j < len(rewrites); j++{
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
if nextIop.index == iop.GetIndex(){
iop.SetText(nextIop.text + iop.GetText())
rewrites[j] = nil
}
}
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
if prevIop.index == iop.GetIndex(){
iop.SetText(iop.GetText() + prevIop.text)
rewrites[prevIop.instruction_index] = nil
}
}
}
// look for replaces where iop.index is in range; error
for j:=0; j<i && j < len(rewrites); j++{
if rop,ok := rewrites[j].(*ReplaceOp); ok{
if iop.GetIndex() == rop.index{
rop.text = iop.GetText() + rop.text
rewrites[i] = nil
continue
}
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
}
}
}
}
m := map[int]RewriteOperation{}
for i:=0; i < len(rewrites); i++{
op := rewrites[i]
if op == nil {continue}
if _, ok := m[op.GetIndex()]; ok{
panic("should only be one op per index")
}
m[op.GetIndex()] = op
}
return m
}
/*
Quick fixing Go lack of overloads
*/
func max(a,b int)int{
if a>b{
return a
}else {
return b
}
}
func min(a,b int)int{
if a<b{
return a
}else {
return b
}
}

View File

@ -0,0 +1,392 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"testing"
"fmt"
"unicode"
"strings"
)
func TestInsertBeforeIndex0(t *testing.T){
input := NewInputStream("abc")
lexer := NewLexerA(input)
stream := NewCommonTokenStream(lexer, 0)
stream.Fill()
tokens := NewTokenStreamRewriter(stream)
tokens.InsertBeforeDefault(0, "0")
result := tokens.GetTextDefault()
if result != "0abc"{
t.Errorf("test failed, got %s", result)
}
}
func prepare_rewriter(str string) *TokenStreamRewriter{
input := NewInputStream(str)
lexer := NewLexerA(input)
stream := NewCommonTokenStream(lexer, 0)
stream.Fill()
return NewTokenStreamRewriter(stream)
}
type LexerTest struct {
input string
expected string
description string
expected_exception []string
ops func(*TokenStreamRewriter)
}
func NewLexerTest(input, expected, desc string, ops func(*TokenStreamRewriter)) LexerTest{
return LexerTest{input:input, expected:expected, description:desc, ops:ops}
}
func NewLexerExceptionTest(input string, expected_err []string, desc string, ops func(*TokenStreamRewriter)) LexerTest{
return LexerTest{input:input, expected_exception:expected_err, description:desc, ops:ops}
}
func panic_tester(t *testing.T, expected_msg []string, r *TokenStreamRewriter){
defer func() {
r :=recover()
if r == nil{
t.Errorf("Panic is expected, but finished normally")
}else
{
s_e := r.(string)
for _, e := range expected_msg{
if !strings.Contains(s_e, e){
t.Errorf("Element [%s] is not in error message: [%s]", e, s_e )
}
}
}
}()
r.GetTextDefault()
}
func TestLexerA(t *testing.T){
tests := []LexerTest{
NewLexerTest("abc", "0abc", "InsertBeforeIndex0",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "0")
}),
NewLexerTest("abc", "abcx","InsertAfterLastIndex",
func(r *TokenStreamRewriter){
r.InsertAfterDefault(2, "x")
}),
NewLexerTest("abc", "axbxc", "2InsertBeforeAfterMiddleIndex",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "x")
r.InsertAfterDefault(1, "x")
}),
NewLexerTest("abc", "xbc", "ReplaceIndex0",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(0, "x")
}),
NewLexerTest("abc", "abx", "ReplaceLastIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(2, "x")
}),
NewLexerTest("abc", "axc", "ReplaceMiddleIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(1, "x")
}),
NewLexerTest("abc", "ayc", "2ReplaceMiddleIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(1, "x")
r.ReplaceDefaultPos(1, "y")
}),
NewLexerTest("abc", "_ayc", "2ReplaceMiddleIndex1InsertBefore",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "_")
r.ReplaceDefaultPos(1, "x")
r.ReplaceDefaultPos(1, "y")
}),
NewLexerTest("abc", "ac", "ReplaceThenDeleteMiddleIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(1, "x")
r.DeleteDefaultPos(1)
}),
NewLexerExceptionTest("abc", []string{"insert op", "within boundaries of previous"},
"InsertInPriorReplace",
func(r *TokenStreamRewriter){
r.ReplaceDefault(0,2, "x")
r.InsertBeforeDefault(1, "0")
}),
NewLexerTest("abc", "0xbc", "InsertThenReplaceSameIndex",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0,"0")
r.ReplaceDefaultPos(0, "x")
}),
NewLexerTest("abc", "ayxbc", "2InsertMiddleIndex",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "x")
r.InsertBeforeDefault(1, "y")
}),
NewLexerTest("abc", "yxzbc", "2InsertThenReplaceIndex0",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "x")
r.InsertBeforeDefault(0, "y")
r.ReplaceDefaultPos(0,"z")
}),
NewLexerTest("abc", "abyx", "ReplaceThenInsertBeforeLastIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(2, "x")
r.InsertBeforeDefault(2, "y")
}),
NewLexerTest("abc", "abyx", "InsertThenReplaceLastIndex",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(2, "y")
r.ReplaceDefaultPos(2, "x")
}),
NewLexerTest("abc", "abxy", "ReplaceThenInsertAfterLastIndex",
func(r *TokenStreamRewriter){
r.ReplaceDefaultPos(2, "x")
r.InsertAfterDefault(2, "y")
}),
NewLexerTest("abcccba", "abyxba", "ReplaceThenInsertAtLeftEdge",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "x")
r.InsertBeforeDefault(2, "y")
}),
NewLexerTest("abcccba", "abyxba", "ReplaceThenInsertAtLeftEdge",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "x")
r.InsertBeforeDefault(2, "y")
}),
NewLexerExceptionTest("abcccba",
[]string{"insert op", "InsertBeforeOp", "within boundaries of previous", "ReplaceOp"},
"ReplaceRangeThenInsertAtRightEdge",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "x")
r.InsertBeforeDefault(4, "y")
}),
NewLexerTest("abcccba", "abxyba", "ReplaceRangeThenInsertAfterRightEdge",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "x")
r.InsertAfterDefault(4, "y")
}),
NewLexerTest("abcccba", "x", "ReplaceAll",
func(r *TokenStreamRewriter){
r.ReplaceDefault(0, 6, "x")
}),
NewLexerTest("abcccba", "abxyzba", "ReplaceSubsetThenFetch",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "xyz")
}),
NewLexerExceptionTest("abcccba",
[]string{"replace op boundaries of", "ReplaceOp", "overlap with previous"},
"ReplaceThenReplaceSuperset",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "xyz")
r.ReplaceDefault(3, 5, "foo")
}),
NewLexerExceptionTest("abcccba",
[]string{"replace op boundaries of", "ReplaceOp", "overlap with previous"},
"ReplaceThenReplaceLowerIndexedSuperset",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 4, "xyz")
r.ReplaceDefault(1, 3, "foo")
}),
NewLexerTest("abcba", "fooa", "ReplaceSingleMiddleThenOverlappingSuperset",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 2, "xyz")
r.ReplaceDefault(0, 3, "foo")
}),
NewLexerTest("abc", "yxabc", "CombineInserts",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "x")
r.InsertBeforeDefault(0, "y")
}),
NewLexerTest("abc", "yazxbc", "Combine3Inserts",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "x")
r.InsertBeforeDefault(0, "y")
r.InsertBeforeDefault(1, "z")
}),
NewLexerTest("abc", "zfoo", "CombineInsertOnLeftWithReplace",
func(r *TokenStreamRewriter){
r.ReplaceDefault(0, 2, "foo")
r.InsertBeforeDefault(0, "z")
}),
NewLexerTest("abc", "z", "CombineInsertOnLeftWithDelete",
func(r *TokenStreamRewriter){
r.DeleteDefault(0,2)
r.InsertBeforeDefault(0, "z")
}),
NewLexerTest("abc", "zaxbyc", "DisjointInserts",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "x")
r.InsertBeforeDefault(2, "y")
r.InsertBeforeDefault(0, "z")
}),
NewLexerTest("abcc", "bar", "OverlappingReplace",
func(r *TokenStreamRewriter){
r.ReplaceDefault(1,2, "foo")
r.ReplaceDefault(0, 3, "bar")
}),
NewLexerExceptionTest("abcc",
[]string{"replace op boundaries of", "ReplaceOp", "overlap with previous"},
"OverlappingReplace2",
func(r *TokenStreamRewriter){
r.ReplaceDefault(0, 3, "bar")
r.ReplaceDefault(1, 2, "foo")
}),
NewLexerTest("abcc", "barc", "OverlappingReplace3",
func(r *TokenStreamRewriter){
r.ReplaceDefault(1,2, "foo")
r.ReplaceDefault(0, 2, "bar")
}),
NewLexerTest("abcc", "abar", "OverlappingReplace4",
func(r *TokenStreamRewriter){
r.ReplaceDefault(1,2, "foo")
r.ReplaceDefault(1, 3, "bar")
}),
NewLexerTest("abcc", "afooc", "DropIdenticalReplace",
func(r *TokenStreamRewriter){
r.ReplaceDefault(1,2, "foo")
r.ReplaceDefault(1, 2, "foo")
}),
NewLexerTest("abc", "afoofoo", "DropPrevCoveredInsert",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "foo")
r.ReplaceDefault(1, 2, "foo")
}),
NewLexerTest("abcc", "axbfoo", "LeaveAloneDisjointInsert",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(1, "x")
r.ReplaceDefault(2, 3, "foo")
}),
NewLexerTest("abcc", "axbfoo", "LeaveAloneDisjointInsert2",
func(r *TokenStreamRewriter){
r.ReplaceDefault(2, 3, "foo")
r.InsertBeforeDefault(1, "x")
}),
NewLexerTest("abc", "aby", "InsertBeforeTokenThenDeleteThatToken",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(2, "y")
r.DeleteDefaultPos(2)
}),
NewLexerTest("aa", "<b>a</b><b>a</b>", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "<b>")
r.InsertAfterDefault(0, "</b>")
r.InsertBeforeDefault(1, "<b>")
r.InsertAfterDefault(1,"</b>")
}),
NewLexerTest("aa", "<b><p>a</p></b><b>a</b>", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "<p>")
r.InsertBeforeDefault(0, "<b>")
r.InsertAfterDefault(0, "</p>")
r.InsertAfterDefault(0, "</b>")
r.InsertBeforeDefault(1, "<b>")
r.InsertAfterDefault(1,"</b>")
}),
NewLexerTest("ab", "<div><b><p>a</p></b></div>!b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2",
func(r *TokenStreamRewriter){
r.InsertBeforeDefault(0, "<p>")
r.InsertBeforeDefault(0, "<b>")
r.InsertBeforeDefault(0, "<div>")
r.InsertAfterDefault(0, "</p>")
r.InsertAfterDefault(0, "</b>")
r.InsertAfterDefault(0, "</div>")
r.InsertBeforeDefault(1, "!")
}),
}
for _,c := range tests{
t.Run(c.description,func(t *testing.T) {
rewriter := prepare_rewriter(c.input)
c.ops(rewriter)
if len(c.expected_exception)>0{
panic_tester(t, c.expected_exception, rewriter)
}else{
result := rewriter.GetTextDefault()
if result!=c.expected{
t.Errorf("Expected:%s | Result: %s", c.expected, result)
}
}
} )
}
}
// Suppress unused import error
var _ = fmt.Printf
var _ = unicode.IsLetter
var serializedLexerAtn = []uint16{
3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 5, 15, 8,
1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3,
4, 2, 2, 5, 3, 3, 5, 4, 7, 5, 3, 2, 2, 2, 14, 2, 3, 3, 2, 2, 2, 2, 5, 3,
2, 2, 2, 2, 7, 3, 2, 2, 2, 3, 9, 3, 2, 2, 2, 5, 11, 3, 2, 2, 2, 7, 13,
3, 2, 2, 2, 9, 10, 7, 99, 2, 2, 10, 4, 3, 2, 2, 2, 11, 12, 7, 100, 2, 2,
12, 6, 3, 2, 2, 2, 13, 14, 7, 101, 2, 2, 14, 8, 3, 2, 2, 2, 3, 2, 2,
}
var lexerDeserializer = NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
var lexerChannelNames = []string{
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
}
var lexerModeNames = []string{
"DEFAULT_MODE",
}
var lexerLiteralNames = []string{
"", "'a'", "'b'", "'c'",
}
var lexerSymbolicNames = []string{
"", "A", "B", "C",
}
var lexerRuleNames = []string{
"A", "B", "C",
}
type LexerA struct {
*BaseLexer
channelNames []string
modeNames []string
// TODO: EOF string
}
var lexerDecisionToDFA = make([]*DFA, len(lexerAtn.DecisionToState))
func init() {
for index, ds := range lexerAtn.DecisionToState {
lexerDecisionToDFA[index] = NewDFA(ds, index)
}
}
func NewLexerA(input CharStream) *LexerA {
l := new(LexerA)
l.BaseLexer = NewBaseLexer(input)
l.Interpreter = NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, NewPredictionContextCache())
l.channelNames = lexerChannelNames
l.modeNames = lexerModeNames
l.RuleNames = lexerRuleNames
l.LiteralNames = lexerLiteralNames
l.SymbolicNames = lexerSymbolicNames
l.GrammarFileName = "LexerA.g4"
// TODO: l.EOF = antlr.TokenEOF
return l
}
// LexerA tokens.
const (
LexerAA = 1
LexerAB = 2
LexerAC = 3
)

View File

@ -27,6 +27,7 @@
<plugin> <!-- create src jar -->
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>3.0.1</version>
<executions>
<execution>
<goals>

View File

@ -151,8 +151,8 @@ public abstract class CodePointCharStream implements CharStream {
/** Return the UTF-16 encoded string for the given interval */
@Override
public String getText(Interval interval) {
int startIdx = Math.min(interval.a, size - 1);
int len = Math.min(interval.b - interval.a + 1, size);
int startIdx = Math.min(interval.a, size);
int len = Math.min(interval.b - interval.a + 1, size - startIdx);
// We know the maximum code point in byteArray is U+00FF,
// so we can treat this as if it were ISO-8859-1, aka Latin-1,

View File

@ -36,6 +36,21 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
protected IntervalSet lastErrorStates;
/**
* This field is used to propagate information about the lookahead following
* the previous match. Since prediction prefers completing the current rule
* to error recovery efforts, error reporting may occur later than the
* original point where it was discoverable. The original context is used to
* compute the true expected sets as though the reporting occurred as early
* as possible.
*/
protected ParserRuleContext nextTokensContext;
/**
* @see #nextTokensContext
*/
protected int nextTokensState;
/**
* {@inheritDoc}
*
@ -225,7 +240,20 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
// try cheaper subset first; might get lucky. seems to shave a wee bit off
IntervalSet nextTokens = recognizer.getATN().nextTokens(s);
if (nextTokens.contains(Token.EPSILON) || nextTokens.contains(la)) {
if (nextTokens.contains(la)) {
// We are sure the token matches
nextTokensContext = null;
nextTokensState = ATNState.INVALID_STATE_NUMBER;
return;
}
if (nextTokens.contains(Token.EPSILON)) {
if (nextTokensContext == null) {
// It's possible the next token won't match; information tracked
// by sync is restricted for performance.
nextTokensContext = recognizer.getContext();
nextTokensState = recognizer.getState();
}
return;
}
@ -450,7 +478,14 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
}
// even that didn't work; must throw the exception
throw new InputMismatchException(recognizer);
InputMismatchException e;
if (nextTokensContext == null) {
e = new InputMismatchException(recognizer);
} else {
e = new InputMismatchException(recognizer, nextTokensState, nextTokensContext);
}
throw e;
}
/**

View File

@ -13,4 +13,10 @@ public class InputMismatchException extends RecognitionException {
super(recognizer, recognizer.getInputStream(), recognizer._ctx);
this.setOffendingToken(recognizer.getCurrentToken());
}
public InputMismatchException(Parser recognizer, int state, ParserRuleContext ctx) {
super(recognizer, recognizer.getInputStream(), ctx);
this.setOffendingState(state);
this.setOffendingToken(recognizer.getCurrentToken());
}
}

View File

@ -270,7 +270,7 @@ public class ParserATNSimulator extends ATNSimulator {
public static final boolean retry_debug = false;
/** Just in case this optimization is bad, add an ENV variable to turn it off */
public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(System.getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"));
public static final boolean TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = Boolean.parseBoolean(getSafeEnv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT"));
protected final Parser parser;
@ -1541,11 +1541,6 @@ public class ParserATNSimulator extends ATNSimulator {
ATNConfig c = getEpsilonTarget(config, t, continueCollecting,
depth == 0, fullCtx, treatEofAsEpsilon);
if ( c!=null ) {
if (!t.isEpsilon() && !closureBusy.add(c)) {
// avoid infinite recursion for EOF* and EOF+
continue;
}
int newDepth = depth;
if ( config.state instanceof RuleStopState) {
assert !fullCtx;
@ -1555,11 +1550,6 @@ public class ParserATNSimulator extends ATNSimulator {
// come in handy and we avoid evaluating context dependent
// preds if this is > 0.
if (!closureBusy.add(c)) {
// avoid infinite recursion for right-recursive rules
continue;
}
if (_dfa != null && _dfa.isPrecedenceDfa()) {
int outermostPrecedenceReturn = ((EpsilonTransition)t).outermostPrecedenceReturn();
if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) {
@ -1568,15 +1558,28 @@ public class ParserATNSimulator extends ATNSimulator {
}
c.reachesIntoOuterContext++;
if (!closureBusy.add(c)) {
// avoid infinite recursion for right-recursive rules
continue;
}
configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method
assert newDepth > Integer.MIN_VALUE;
newDepth--;
if ( debug ) System.out.println("dips into outer ctx: "+c);
}
else if (t instanceof RuleTransition) {
// latch when newDepth goes negative - once we step out of the entry context we can't return
if (newDepth >= 0) {
newDepth++;
else {
if (!t.isEpsilon() && !closureBusy.add(c)) {
// avoid infinite recursion for EOF* and EOF+
continue;
}
if (t instanceof RuleTransition) {
// latch when newDepth goes negative - once we step out of the entry context we can't return
if (newDepth >= 0) {
newDepth++;
}
}
}
@ -2178,4 +2181,14 @@ public class ParserATNSimulator extends ATNSimulator {
public Parser getParser() {
return parser;
}
public static String getSafeEnv(String envName) {
try {
return System.getenv(envName);
}
catch(SecurityException e) {
// use the default value
}
return null;
}
}

View File

@ -401,11 +401,11 @@ DoubleDict.prototype.set = function (a, b, o) {
function escapeWhitespace(s, escapeSpaces) {
s = s.replace("\t", "\\t");
s = s.replace("\n", "\\n");
s = s.replace("\r", "\\r");
s = s.replace(/\t/g, "\\t")
.replace(/\n/g, "\\n")
.replace(/\r/g, "\\r");
if (escapeSpaces) {
s = s.replace(" ", "\u00B7");
s = s.replace(/ /g, "\u00B7");
}
return s;
}
@ -443,4 +443,4 @@ exports.hashStuff = hashStuff;
exports.escapeWhitespace = escapeWhitespace;
exports.arrayToString = arrayToString;
exports.titleCase = titleCase;
exports.equalArrays = equalArrays;
exports.equalArrays = equalArrays;

View File

@ -218,6 +218,13 @@ class Parser (Recognizer):
self._ctx.exitRule(listener)
listener.exitEveryRule(self._ctx)
# Gets the number of syntax errors reported during parsing. This value is
# incremented each time {@link #notifyErrorListeners} is called.
#
# @see #notifyErrorListeners
#
def getNumberOfSyntaxErrors(self):
return self._syntaxErrors
def getTokenFactory(self):
return self._input.tokenSource._factory

View File

@ -36,14 +36,13 @@ class RuleTagToken(Token):
self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
self.line = 0 # line=1..n of the 1st character
self.column = -1 # beginning of the line at which it occurs, 0..n-1
self.label = label
self.label = unicode(label)
self._text = self.getText() # text of the token.
self.ruleName = ruleName
self.ruleName = unicode(ruleName)
def getText(self):
if self.label is None:
return "<" + self.ruleName + ">"
return u"<" + self.ruleName + u">"
else:
return "<" + self.label + ":" + self.ruleName + ">"
return u"<" + self.label + ":" + self.ruleName + u">"

View File

@ -24,8 +24,8 @@ class TokenTagToken(CommonToken):
#
def __init__(self, tokenName, type, label=None):
super(TokenTagToken, self).__init__(type=type)
self.tokenName = tokenName
self.label = label
self.tokenName = unicode(tokenName)
self.label = unicode(label)
self._text = self.getText()
#
@ -36,9 +36,9 @@ class TokenTagToken(CommonToken):
#
def getText(self):
if self.label is None:
return "<" + self.tokenName + ">"
return u"<" + self.tokenName + u">"
else:
return "<" + self.label + ":" + self.tokenName + ">"
return u"<" + self.label + u":" + self.tokenName + u">"
# <p>The implementation for {@link TokenTagToken} returns a string of the form
# {@code tokenName:type}.</p>

View File

@ -108,13 +108,13 @@ class TerminalNodeImpl(TerminalNode):
return visitor.visitTerminal(self)
def getText(self):
return self.symbol.text
return unicode(self.symbol.text)
def __unicode__(self):
if self.symbol.type == Token.EOF:
return "<EOF>"
return u"<EOF>"
else:
return self.symbol.text
return unicode(self.symbol.text)
# Represents a token that was consumed during resynchronization
# rather than during a valid match operation. For example,

View File

@ -227,6 +227,14 @@ class Parser (Recognizer):
listener.exitEveryRule(self._ctx)
# Gets the number of syntax errors reported during parsing. This value is
# incremented each time {@link #notifyErrorListeners} is called.
#
# @see #notifyErrorListeners
#
def getNumberOfSyntaxErrors(self):
return self._syntaxErrors
def getTokenFactory(self):
return self._input.tokenSource._factory

View File

@ -12,7 +12,7 @@ from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.atn.ParserATNSimulator import ParserATNSimulator
from antlr4.atn.PredictionMode import PredictionMode
from antlr4.PredictionContext import PredictionContextCache
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.ParserRuleContext import RuleContext, ParserRuleContext
from antlr4.tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode
from antlr4.error.Errors import RecognitionException, IllegalStateException, NoViableAltException
from antlr4.error.ErrorStrategy import BailErrorStrategy

View File

@ -1 +1,4 @@
.build/
Antlr4.xcodeproj/
Tests/Antlr4Tests/gen/
xcuserdata/

View File

@ -1,3 +1,4 @@
// swift-tools-version:4.0
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@ -5,15 +6,19 @@
import PackageDescription
let package = Package(
name: "Antlr4"
)
products.append(
Product(
name: "Antlr4",
type: .Library(.Dynamic),
modules: [
"Antlr4"
]
)
name: "Antlr4",
products: [
.library(
name: "Antlr4",
type: .dynamic,
targets: ["Antlr4"]),
],
targets: [
.target(
name: "Antlr4",
dependencies: []),
.testTarget(
name: "Antlr4Tests",
dependencies: ["Antlr4"]),
]
)

View File

@ -1,20 +1,23 @@
///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
/// How to emit recognition errors.
///
public protocol ANTLRErrorListener: class {
///
/// Upon syntax error, notify any interested parties. This is not how to
/// recover from errors or compute error messages. {@link org.antlr.v4.runtime.ANTLRErrorStrategy}
/// recover from errors or compute error messages. _org.antlr.v4.runtime.ANTLRErrorStrategy_
/// specifies how to recover from syntax errors and how to compute error
/// messages. This listener's job is simply to emit a computed message,
/// though it has enough information to create its own message in many cases.
///
/// <p>The {@link org.antlr.v4.runtime.RecognitionException} is non-null for all syntax errors except
///
/// The _RecognitionException_ is non-null for all syntax errors except
/// when we discover mismatched token errors that we can recover from
/// in-line, without returning from the surrounding rule (via the single
/// token insertion and deletion mechanism).</p>
///
/// token insertion and deletion mechanism).
///
/// - parameter recognizer:
/// What parser got the error. From this
/// object, you can access the context as well
@ -22,7 +25,7 @@ public protocol ANTLRErrorListener: class {
/// - parameter offendingSymbol:
/// The offending token in the input token
/// stream, unless recognizer is a lexer (then it's null). If
/// no viable alternative error, {@code e} has token at which we
/// no viable alternative error, `e` has token at which we
/// started production for the decision.
/// - parameter line:
/// The line number in the input where the error occurred.
@ -35,116 +38,122 @@ public protocol ANTLRErrorListener: class {
/// the reporting of an error. It is null in the case where
/// the parser was able to recover in line without exiting the
/// surrounding rule.
func syntaxError<T:ATNSimulator>(_ recognizer: Recognizer<T>,
_ offendingSymbol: AnyObject?,
_ line: Int,
_ charPositionInLine: Int,
_ msg: String,
_ e: AnyObject?// RecognitionException?
///
func syntaxError<T>(_ recognizer: Recognizer<T>,
_ offendingSymbol: AnyObject?,
_ line: Int,
_ charPositionInLine: Int,
_ msg: String,
_ e: AnyObject?
)
///
/// This method is called by the parser when a full-context prediction
/// results in an ambiguity.
///
/// <p>Each full-context prediction which does not result in a syntax error
/// will call either {@link #reportContextSensitivity} or
/// {@link #reportAmbiguity}.</p>
///
/// <p>When {@code ambigAlts} is not null, it contains the set of potentially
///
/// Each full-context prediction which does not result in a syntax error
/// will call either _#reportContextSensitivity_ or
/// _#reportAmbiguity_.
///
/// When `ambigAlts` is not null, it contains the set of potentially
/// viable alternatives identified by the prediction algorithm. When
/// {@code ambigAlts} is null, use {@link org.antlr.v4.runtime.atn.ATNConfigSet#getAlts} to obtain the
/// represented alternatives from the {@code configs} argument.</p>
///
/// <p>When {@code exact} is {@code true}, <em>all</em> of the potentially
/// `ambigAlts` is null, use _org.antlr.v4.runtime.atn.ATNConfigSet#getAlts_ to obtain the
/// represented alternatives from the `configs` argument.
///
/// When `exact` is `true`, __all__ of the potentially
/// viable alternatives are truly viable, i.e. this is reporting an exact
/// ambiguity. When {@code exact} is {@code false}, <em>at least two</em> of
/// ambiguity. When `exact` is `false`, __at least two__ of
/// the potentially viable alternatives are viable for the current input, but
/// the prediction algorithm terminated as soon as it determined that at
/// least the <em>minimum</em> potentially viable alternative is truly
/// viable.</p>
///
/// <p>When the {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
/// least the __minimum__ potentially viable alternative is truly
/// viable.
///
/// When the _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ prediction
/// mode is used, the parser is required to identify exact ambiguities so
/// {@code exact} will always be {@code true}.</p>
///
/// <p>This method is not used by lexers.</p>
///
/// `exact` will always be `true`.
///
/// This method is not used by lexers.
///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
/// - parameter stopIndex: the input input where the ambiguity was identified
/// - parameter exact: {@code true} if the ambiguity is exactly known, otherwise
/// {@code false}. This is always {@code true} when
/// {@link org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
/// - parameter ambigAlts: the potentially ambiguous alternatives, or {@code null}
/// - parameter exact: `true` if the ambiguity is exactly known, otherwise
/// `false`. This is always `true` when
/// _org.antlr.v4.runtime.atn.PredictionMode#LL_EXACT_AMBIG_DETECTION_ is used.
/// - parameter ambigAlts: the potentially ambiguous alternatives, or `null`
/// to indicate that the potentially ambiguous alternatives are the complete
/// set of represented alternatives in {@code configs}
/// set of represented alternatives in `configs`
/// - parameter configs: the ATN configuration set where the ambiguity was
/// identified
///
func reportAmbiguity(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ exact: Bool,
_ ambigAlts: BitSet,
_ configs: ATNConfigSet) throws
_ configs: ATNConfigSet)
///
/// This method is called when an SLL conflict occurs and the parser is about
/// to use the full context information to make an LL decision.
///
/// <p>If one or more configurations in {@code configs} contains a semantic
///
/// If one or more configurations in `configs` contains a semantic
/// predicate, the predicates are evaluated before this method is called. The
/// subset of alternatives which are still viable after predicates are
/// evaluated is reported in {@code conflictingAlts}.</p>
///
/// <p>This method is not used by lexers.</p>
///
/// evaluated is reported in `conflictingAlts`.
///
/// This method is not used by lexers.
///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
/// - parameter stopIndex: the input index where the SLL conflict occurred
/// - parameter conflictingAlts: The specific conflicting alternatives. If this is
/// {@code null}, the conflicting alternatives are all alternatives
/// represented in {@code configs}. At the moment, conflictingAlts is non-null
/// `null`, the conflicting alternatives are all alternatives
/// represented in `configs`. At the moment, conflictingAlts is non-null
/// (for the reference implementation, but Sam's optimized version can see this
/// as null).
/// - parameter configs: the ATN configuration set where the SLL conflict was
/// detected
///
func reportAttemptingFullContext(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ conflictingAlts: BitSet?,
_ configs: ATNConfigSet) throws
_ configs: ATNConfigSet)
///
/// This method is called by the parser when a full-context prediction has a
/// unique result.
///
/// <p>Each full-context prediction which does not result in a syntax error
/// will call either {@link #reportContextSensitivity} or
/// {@link #reportAmbiguity}.</p>
///
/// <p>For prediction implementations that only evaluate full-context
///
/// Each full-context prediction which does not result in a syntax error
/// will call either _#reportContextSensitivity_ or
/// _#reportAmbiguity_.
///
/// For prediction implementations that only evaluate full-context
/// predictions when an SLL conflict is found (including the default
/// {@link org.antlr.v4.runtime.atn.ParserATNSimulator} implementation), this method reports cases
/// _org.antlr.v4.runtime.atn.ParserATNSimulator_ implementation), this method reports cases
/// where SLL conflicts were resolved to unique full-context predictions,
/// i.e. the decision was context-sensitive. This report does not necessarily
/// indicate a problem, and it may appear even in completely unambiguous
/// grammars.</p>
///
/// <p>{@code configs} may have more than one represented alternative if the
/// grammars.
///
/// `configs` may have more than one represented alternative if the
/// full-context prediction algorithm does not evaluate predicates before
/// beginning the full-context prediction. In all cases, the final prediction
/// is passed as the {@code prediction} argument.</p>
///
/// <p>Note that the definition of "context sensitivity" in this method
/// differs from the concept in {@link org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities}.
/// is passed as the `prediction` argument.
///
/// Note that the definition of "context sensitivity" in this method
/// differs from the concept in _org.antlr.v4.runtime.atn.DecisionInfo#contextSensitivities_.
/// This method reports all instances where an SLL conflict occurred but LL
/// parsing produced a unique result, whether or not that unique result
/// matches the minimum alternative in the SLL conflicting set.</p>
///
/// <p>This method is not used by lexers.</p>
///
/// matches the minimum alternative in the SLL conflicting set.
///
/// This method is not used by lexers.
///
/// - parameter recognizer: the parser instance
/// - parameter dfa: the DFA for the current decision
/// - parameter startIndex: the input index where the decision started
@ -153,10 +162,11 @@ public protocol ANTLRErrorListener: class {
/// - parameter prediction: the unambiguous result of the full-context prediction
/// - parameter configs: the ATN configuration set where the unambiguous prediction
/// was determined
///
func reportContextSensitivity(_ recognizer: Parser,
_ dfa: DFA,
_ startIndex: Int,
_ stopIndex: Int,
_ prediction: Int,
_ configs: ATNConfigSet) throws
_ configs: ATNConfigSet)
}

View File

@ -1,99 +1,119 @@
///
///
/// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
/// Use of this file is governed by the BSD 3-clause license that
/// can be found in the LICENSE.txt file in the project root.
/// The interface for defining strategies to deal with syntax errors encountered
/// during a parse by ANTLR-generated parsers. We distinguish between three
/// different kinds of errors:
///
/// <ul>
/// <li>The parser could not figure out which path to take in the ATN (none of
/// the available alternatives could possibly match)</li>
/// <li>The current input does not match what we were looking for</li>
/// <li>A predicate evaluated to false</li>
/// </ul>
///
/// Implementations of this interface report syntax errors by calling
/// {@link org.antlr.v4.runtime.Parser#notifyErrorListeners}.
///
/// <p>TODO: what to do about lexers</p>
///
///
///
///
/// The interface for defining strategies to deal with syntax errors
/// encountered during a parse by ANTLR-generated parsers. We distinguish between three
/// different kinds of errors:
///
/// * The parser could not figure out which path to take in the ATN (none of
/// the available alternatives could possibly match)
/// * The current input does not match what we were looking for
/// * A predicate evaluated to false
///
/// Implementations of this interface report syntax errors by calling
/// _org.antlr.v4.runtime.Parser#notifyErrorListeners_.
///
/// TODO: what to do about lexers
///
public protocol ANTLRErrorStrategy {
/// Reset the error handler state for the specified {@code recognizer}.
///
/// Reset the error handler state for the specified `recognizer`.
/// - parameter recognizer: the parser instance
///
func reset(_ recognizer: Parser)
///
/// This method is called when an unexpected symbol is encountered during an
/// inline match operation, such as {@link org.antlr.v4.runtime.Parser#match}. If the error
/// inline match operation, such as _org.antlr.v4.runtime.Parser#match_. If the error
/// strategy successfully recovers from the match failure, this method
/// returns the {@link org.antlr.v4.runtime.Token} instance which should be treated as the
/// returns the _org.antlr.v4.runtime.Token_ instance which should be treated as the
/// successful result of the match.
///
/// <p>This method handles the consumption of any tokens - the caller should
/// <b>not</b> call {@link org.antlr.v4.runtime.Parser#consume} after a successful recovery.</p>
///
/// <p>Note that the calling code will not report an error if this method
///
/// This method handles the consumption of any tokens - the caller should
/// __not__ call _org.antlr.v4.runtime.Parser#consume_ after a successful recovery.
///
/// Note that the calling code will not report an error if this method
/// returns successfully. The error strategy implementation is responsible
/// for calling {@link org.antlr.v4.runtime.Parser#notifyErrorListeners} as appropriate.</p>
///
/// for calling _org.antlr.v4.runtime.Parser#notifyErrorListeners_ as appropriate.
///
/// - parameter recognizer: the parser instance
/// - org.antlr.v4.runtime.RecognitionException if the error strategy was not able to
/// - throws: _RecognitionException_ if the error strategy was not able to
/// recover from the unexpected input symbol
///
@discardableResult
func recoverInline(_ recognizer: Parser) throws -> Token // RecognitionException;
func recoverInline(_ recognizer: Parser) throws -> Token
/// This method is called to recover from exception {@code e}. This method is
/// called after {@link #reportError} by the default exception handler
///
/// This method is called to recover from exception `e`. This method is
/// called after _#reportError_ by the default exception handler
/// generated for a rule method.
///
///
/// - seealso: #reportError
///
///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception to recover from
/// - org.antlr.v4.runtime.RecognitionException if the error strategy could not recover from
/// - throws: _RecognitionException_ if the error strategy could not recover from
/// the recognition exception
func recover(_ recognizer: Parser, _ e: AnyObject) throws // RecognitionException;
///
func recover(_ recognizer: Parser, _ e: RecognitionException) throws
///
/// This method provides the error handler with an opportunity to handle
/// syntactic or semantic errors in the input stream before they result in a
/// {@link org.antlr.v4.runtime.RecognitionException}.
///
/// <p>The generated code currently contains calls to {@link #sync} after
/// entering the decision state of a closure block ({@code (...)*} or
/// {@code (...)+}).</p>
///
/// <p>For an implementation based on Jim Idle's "magic sync" mechanism, see
/// {@link org.antlr.v4.runtime.DefaultErrorStrategy#sync}.</p>
///
/// _org.antlr.v4.runtime.RecognitionException_.
///
/// The generated code currently contains calls to _#sync_ after
/// entering the decision state of a closure block (`(...)*` or
/// `(...)+`).
///
/// For an implementation based on Jim Idle's "magic sync" mechanism, see
/// _org.antlr.v4.runtime.DefaultErrorStrategy#sync_.
///
/// - seealso: org.antlr.v4.runtime.DefaultErrorStrategy#sync
///
///
/// - parameter recognizer: the parser instance
/// - org.antlr.v4.runtime.RecognitionException if an error is detected by the error
/// - throws: _RecognitionException_ if an error is detected by the error
/// strategy but cannot be automatically recovered at the current state in
/// the parsing process
func sync(_ recognizer: Parser) throws // RecognitionException;
///
func sync(_ recognizer: Parser) throws
///
/// Tests whether or not recognizer} is in the process of recovering
/// from an error. In error recovery mode, {@link org.antlr.v4.runtime.Parser#consume} adds
/// from an error. In error recovery mode, _org.antlr.v4.runtime.Parser#consume_ adds
/// symbols to the parse tree by calling
/// {@link Parser#createErrorNode(ParserRuleContext, Token)} then
/// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
/// {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
///
/// _Parser#createErrorNode(ParserRuleContext, Token)_ then
/// _ParserRuleContext#addErrorNode(ErrorNode)_ instead of
/// _Parser#createTerminalNode(ParserRuleContext, Token)_.
///
/// - parameter recognizer: the parser instance
/// - returns: {@code true} if the parser is currently recovering from a parse
/// error, otherwise {@code false}
/// - returns: `true` if the parser is currently recovering from a parse
/// error, otherwise `false`
///
func inErrorRecoveryMode(_ recognizer: Parser) -> Bool
///
/// This method is called by when the parser successfully matches an input
/// symbol.
///
///
/// - parameter recognizer: the parser instance
///
func reportMatch(_ recognizer: Parser)
/// Report any kind of {@link org.antlr.v4.runtime.RecognitionException}. This method is called by
///
/// Report any kind of _org.antlr.v4.runtime.RecognitionException_. This method is called by
/// the default exception handler generated for a rule method.
///
///
/// - parameter recognizer: the parser instance
/// - parameter e: the recognition exception to report
func reportError(_ recognizer: Parser, _ e: AnyObject)
///
func reportError(_ recognizer: Parser, _ e: RecognitionException)
}

Some files were not shown because too many files have changed in this diff Show More