Fix merge failure

This commit is contained in:
Peter Boyer 2016-10-12 09:53:43 -04:00
commit 42347c078b
155 changed files with 1712 additions and 765 deletions

43
.gitignore vendored
View File

@ -1,5 +1,11 @@
# Maven build folders
target/
# ... but not code generation targets
!tool/src/org/antlr/v4/codegen/target/
# Node.js (npm and typings) cached dependencies
node_modules/
typings/
# Ant build folders
build/
@ -10,14 +16,37 @@ user.build.properties
# MacOSX files
.DS_Store
# Python
*.pyc
## Python, selected lines from https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# CSharp
bin/
obj/
## CSharp and VisualStudio, selected lines from https://raw.githubusercontent.com/github/gitignore/master/VisualStudio.gitignore
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
# Visual Studio 2015 cache/options directory
.vs/
# NetBeans user configuration files
nbactions*.xml
/nbproject/private/
@ -57,3 +86,7 @@ bilder.pyc
bild.log
bild_output.txt
# VSCode Java plugin temporary files
javac-services.0.log
javac-services.0.log.lck

View File

@ -6,14 +6,16 @@ jdk:
- openjdk6
- oraclejdk7
- oraclejdk8
before_install:
before_install:
- sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
- sudo add-apt-repository ppa:fkrull/deadsnakes -y
- sudo add-apt-repository ppa:rwky/nodejs -y
- sudo apt-get update -qq
- sudo apt-get install -qq python3.4
- sudo apt-get update -qq
- sudo apt-get install -qq python3.5
- sudo apt-get install -qq nodejs
- echo "deb http://download.mono-project.com/repo/debian wheezy/snapshots/3.12.1 main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list
- sudo apt-get install -qq mono-complete
- eval "$(sudo gimme 1.6.2)"
- go version ; go env
- python --version
- python3 --version

View File

@ -40,18 +40,18 @@ ANTLR project lead and supreme dictator for life
## Useful information
* [Release notes](https://github.com/antlr/antlr4/releases)
* [Getting started with v4](https://raw.githubusercontent.com/antlr/antlr4/master/doc/getting-started.md)
* [Getting started with v4](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md)
* [Official site](http://www.antlr.org/)
* [Documentation](https://raw.githubusercontent.com/antlr/antlr4/master/doc/index.md)
* [FAQ](https://raw.githubusercontent.com/antlr/antlr4/master/doc/faq/index.md)
* [Documentation](https://github.com/antlr/antlr4/blob/master/doc/index.md)
* [FAQ](https://github.com/antlr/antlr4/blob/master/doc/faq/index.md)
* [API](http://www.antlr.org/api/Java/index.html)
* [ANTLR v3](http://www.antlr3.org/)
* [v3 to v4 Migration, differences](https://raw.githubusercontent.com/antlr/antlr4/master/doc/faq/general.md)
* [v3 to v4 Migration, differences](https://github.com/antlr/antlr4/blob/master/doc/faq/general.md)
You might also find the following pages useful, particularly if you want to mess around with the various target languages.
* [How to build ANTLR itself](https://raw.githubusercontent.com/antlr/antlr4/master/doc/building-antlr.md)
* [How we create and deploy an ANTLR release](https://raw.githubusercontent.com/antlr/antlr4/master/doc/releasing-antlr.md)
* [How to build ANTLR itself](https://github.com/antlr/antlr4/blob/master/doc/building-antlr.md)
* [How we create and deploy an ANTLR release](https://github.com/antlr/antlr4/blob/master/doc/releasing-antlr.md)
## The Definitive ANTLR 4 Reference
@ -61,8 +61,12 @@ You can buy the book [The Definitive ANTLR 4 Reference](http://amzn.com/19343569
You will find the [Book source code](http://pragprog.com/titles/tpantlr2/source_code) useful.
## Additional grammars
[This repository](https://github.com/antlr/grammars-v4) is a collection of grammars without actions where the
root directory name is the all-lowercase name of the language parsed
by the grammar. For example, java, cpp, csharp, c, etc...
Travis Status
---------
<a href="https://travis-ci.org/antlr/antlr4"><img src="https://api.travis-ci.org/antlr/antlr4.png"></a>

View File

@ -34,7 +34,7 @@
<parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.2-SNAPSHOT</version>
<version>4.5.4-SNAPSHOT</version>
</parent>
<artifactId>antlr4-maven-plugin</artifactId>
<packaging>maven-plugin</packaging>

View File

@ -372,6 +372,22 @@ public class Antlr4Mojo extends AbstractMojo {
scan.addSourceMapping(mapping);
Set<File> grammarFiles = scan.getIncludedSources(sourceDirectory, null);
// We don't want the plugin to run for every grammar, regardless of whether
// it's changed since the last compilation. Check the mtime of the tokens vs
// the grammar file mtime to determine whether we even need to execute.
Set<File> grammarFilesToProcess = new HashSet<File>();
for (File grammarFile : grammarFiles) {
String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens";
File outputFile = new File(outputDirectory, tokensFileName);
if ( (! outputFile.exists()) ||
outputFile.lastModified() < grammarFile.lastModified() ) {
grammarFilesToProcess.add(grammarFile);
}
}
grammarFiles = grammarFilesToProcess;
if (grammarFiles.isEmpty()) {
getLog().info("No grammars to process");
return Collections.emptyList();

View File

@ -83,6 +83,16 @@ YYYY/MM/DD, github id, Full name, email
2015/10/12, KvanTTT, Ivan Kochurkin, ivan.kochurkin@gmail.com
2015/10/21, martin-probst, Martin Probst, martin-probst@web.de
2015/10/21, hkff, Walid Benghabrit, walid.benghabrit@mines-nantes.fr
2015/11/12, cooperra, Robbie Cooper, cooperra@users.noreply.github.com
2015/11/25, abego, Udo Borkowski, ub@abego.org
2015/12/17, sebadur, Sebastian Badur, sebadur@users.noreply.github.com
2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com
2015/12/24, dtymon, David Tymon, david.tymon@gmail.com
2016/02/18, reitzig, Raphael Reitzig, reitzig[at]cs.uni-kl.de
2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com
2016/03/29, msteiger, Martin Steiger, antlr@martin-steiger.de
2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de
2016/07/18, willfaught, Will Faught, will.faught@gmail.com
2016/08/08, wjkohnen, Wolfgang Johannes Kohnen, wjkohnen-go-antlr@ko-sys.com
2016/08/11, BurtHarris, Ralph "Burt" Harris, Burt_Harris_antlr4@azxs.33mail.com
2016/08/19, andjo403, Andreas Jonson, andjo403@hotmail.com

View File

@ -176,13 +176,13 @@ What remains to be done is have our validate function actually validate the inpu
To start with, let's load ANTLR and your parser, listener etc.. Easy, since you could write:
```
```js
var antlr4 = require('antlr4/index');
```
This may work, but it's actually unreliable. The reason is that the require function used by ANTLR, which exactly mimics the NodeJS require function, uses a different syntax than the require function that comes with ACE. So we need to bring in a require function that conforms to the NodeJS syntax. I personally use one that comes from Torben Haase's Honey project, which you can find here. But hey, now we're going to have 2 'require' functions not compatible with each other! Indeed, this is why you need to take special care, as follows:
```
```js
// load nodejs compatible require
var ace_require = require;
require = undefined;
@ -190,7 +190,9 @@ var Honey = { 'requirePath': ['..'] }; // walk up to js folder, see Honey docs
importScripts("../lib/require.js");
var antlr4_require = require;
require = ace_require;
```
Now it's safe to load antlr, and the parsers generated for your language. Assuming that your language files (generated or hand-built) are in a folder with an index.js file that calls require for each file, your parser loading code can be as simple as follows:
```js
// load antlr4 and myLanguage
var antlr4, mylanguage;
try {
@ -200,10 +202,12 @@ try {
} finally {
require = ace_require;
}
```
Please note the try-finally construct. ANTLR uses 'require' synchronously so it's perfectly safe to ignore the ACE 'require' while running ANTLR code. ACE itself does not guarantee synchronous execution, so you are much safer always switching 'require' back to 'ace_require'.
Now detecting deep syntax errors in your code is a task for your ANTLR listener or visitor or whatever piece of code you've delegated this to. We're not going to describe this here, since it would require some knowledge of your language. However, detecting grammar syntax errors is something ANTLR does beautifully (isn't that why you went for ANTLR in the first place?). So what we will illustrate here is how to report grammar syntax errors. I have no doubt that from there, you will be able to extend the validator to suit your specific needs.
Whenever ANTLR encounters an unexpected token, it fires an error. By default, the error is routed to an error listener which simply writes to the console.
What we need to do is replace this listener by our own listener, se we can route errors to the ACE editor. First, let's create such a listener:
```js
// class for gathering errors and posting them to ACE editor
var AnnotatingErrorListener = function(annotations) {
antlr4.error.ErrorListener.call(this);
@ -222,9 +226,9 @@ AnnotatingErrorListener.prototype.syntaxError = function(recognizer, offendingSy
type: "error"
});
};
```
With this, all that remains to be done is plug the listener in when we parse the code. Here is how I do it:
```js
var validate = function(input) {
var stream = new antlr4.InputStream(input);
var lexer = new mylanguage.MyLexer(stream);
@ -237,6 +241,7 @@ var validate = function(input) {
parser.parseMyRule();
return annotations;
};
```
You know what? That's it! You now have an ACE editor that does syntax validation using ANTLR! I hope you find this useful, and simple enough to get started.
What I did not address here is packaging, not something I'm an expert at. The good news is that it makes development simple, since I don't have to run any compilation process. I just edit my code, reload my editor page, and check how it goes.
Now wait, hey! How do you debug this? Well, as usual, using Chrome, since neither Firefox or Safari are able to debug worker code. What a shame...

View File

@ -2,7 +2,7 @@
In Chapter 10, Attributes and Actions, we learned how to embed actions within grammars and looked at the most common token and rule attributes. This section summarizes the important syntax and semantics from that chapter and provides a complete list of all available attributes. (You can learn more about actions in the grammar from the free excerpt on listeners and actions.)
Actions are blocks of text written in the target language and enclosed in curly braces. The recognizer triggers them according to their locations within the grammar. For example, the following rule emits found a decl after the parser has seen a valid declaration:
Actions are blocks of text written in the target language and enclosed in curly braces. The recognizer triggers them according to their locations within the grammar. For example, the following rule emits "found a decl" after the parser has seen a valid declaration:
```
decl: type ID ';' {System.out.println("found a decl");} ;

View File

@ -63,7 +63,22 @@ TestTemplates ::= [
...
```
For every name mentioned, you will find a `.stg` file with the actual test. E.g., `Sets/StarSet.stg`:
For every name mentioned, you will find a `.stg` file with the actual test template. E.g., `Sets/StarSet.stg`.
Each `.stg` file descripes the following mandatory elements for the test:
- the test type: "Parser" or "Lexer"
- some ANTLR options, such as "Debug"
- the grammar
- the start rule
- the input i.e. the text to parse
- the expected output
- the expected errors
The grammar can itself contain template expressions such as <something>.
The test generator replaces these with the corresponding values from the target language template (see below).
It then generates a unit test in which the grammar, the input and the expected output and errors are inlined.
Here is an example test template:
```
TestType() ::= "Parser"
@ -92,6 +107,7 @@ a : ('a'|'b')* 'c' {<InputText():writeln()>} ;
>>
```
### Cross-language actions embedded within grammars
To get:

View File

@ -13,14 +13,14 @@ The first step is to get the Java source code from the ANTLR 4 repository at git
```bash
$ cd /tmp
/tmp $ git clone git@github.com:antlr/antlr4.git
/tmp $ git clone https://github.com/antlr/antlr4.git
Cloning into 'antlr4'...
remote: Counting objects: 43273, done.
remote: Compressing objects: 100% (57/57), done.
remote: Total 43273 (delta 26), reused 0 (delta 0)
Receiving objects: 100% (43273/43273), 18.76 MiB | 1.60 MiB/s, done.
Resolving deltas: 100% (22419/22419), done.
remote: Counting objects: 61480, done.
remote: Total 61480 (delta 0), reused 0 (delta 0), pack-reused 61480
Receiving objects: 100% (61480/61480), 31.24 MiB | 7.18 MiB/s, done.
Resolving deltas: 100% (32970/32970), done.
Checking connectivity... done.
Checking out files: 100% (1427/1427), done.
```
# Compile

View File

@ -8,7 +8,7 @@ Creating a new target involves the following key elements:
1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. This class describes language specific details about escape characters and strings and so on. There is very little to do here typically.
1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit.
1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure is dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below.
1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below.
1. Create a template file for runtime tests. All you have to do is provide a few simple templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code in a new target and check the results. All it needs to know is how to generate a test rig (i.e., a `main` program), how to define various class fields, compare members and so on. You must create a *X* directory underneath `runtime-testsuite/resources/org/antlr/v4/test/runtime`. Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit.
## Getting started
@ -19,4 +19,4 @@ Creating a new target involves the following key elements:
```bash
$ mvn compile
```
That should proceed with success. See [Building ANTLR](building-antlr.md) for more details. (That link does not currently work as I have that documentation in a branch. see https://github.com/parrt/antlr4/blob/move-doc-to-repo/doc/building-antlr.md for now.)
That should proceed with success. See [Building ANTLR](building-antlr.md) for more details. (That link does not currently work as I have that documentation in a branch. see https://github.com/parrt/antlr4/blob/move-doc-to-repo/doc/building-antlr.md for now.)

View File

@ -68,9 +68,9 @@ expr : expr '*' expr
;
```
ANTLR 4 automatically constructs parse trees for you and abstract syntax tree (AST) construction is no longer an option. See also What if I need ASTs not parse trees for a compiler, for example?
ANTLR 4 automatically constructs parse trees for you and abstract syntax tree (AST) construction is no longer an option. See also [What if I need ASTs not parse trees for a compiler, for example?](https://github.com/antlr/antlr4/blob/master/doc/faq/parse-trees.md#what-if-i-need-asts-not-parse-trees-for-a-compiler-for-example).
Another big difference is that we discourage the use of actions directly within the grammar because ANTLR 4 automatically generates [listeners and visitors](https://raw.githubusercontent.com/antlr/antlr4/master/doc/listeners.md) for you to use that trigger method calls when some phrases of interest are recognized during a tree walk after parsing. See also [Parse Tree Matching and XPath](https://raw.githubusercontent.com/antlr/antlr4/master/doc/tree-matching.md).
Another big difference is that we discourage the use of actions directly within the grammar because ANTLR 4 automatically generates [listeners and visitors](https://github.com/antlr/antlr4/blob/master/doc/listeners.md) for you to use that trigger method calls when some phrases of interest are recognized during a tree walk after parsing. See also [Parse Tree Matching and XPath](https://github.com/antlr/antlr4/blob/master/doc/tree-matching.md).
Semantic predicates are still allowed in both the parser and lexer rules as our actions. For efficiency sake keep semantic predicates to the right edge of lexical rules.

View File

@ -83,7 +83,7 @@ $ grun MyELang stat
If there were any `tokens` specifications, the main grammar would merge the token sets. Any named actions such as `@members` would be merged. In general, you should avoid named actions and actions within rules in imported grammars since that limits their reuse. ANTLR also ignores any options in imported grammars.
Imported grammars can also import other grammars. ANTLR pursues all imported grammars in a depth-first fashion. If two or more imported grammars define ruler, ANTLR chooses the first version of `r` it finds. In the following diagram, ANTLR examines grammars in the following order `Nested`, `G1`, `G3`, `G2`.
Imported grammars can also import other grammars. ANTLR pursues all imported grammars in a depth-first fashion. If two or more imported grammars define rule `r`, ANTLR chooses the first version of `r` it finds. In the following diagram, ANTLR examines grammars in the following order `Nested`, `G1`, `G3`, `G2`.
<img src=images/nested.png width=350>

View File

@ -1,6 +1,6 @@
# Left-recursive rules
The most natural expression of a some common language constructs is left recursive. For example C declarators and arithmetic expressions. Unfortunately, left recursive specifications of arithmetic expressions are typically ambiguous but much easier to write out than the multiple levels required in a typical top-down grammar. Here is a sample ANTLR 4 grammar with a left recursive expression rule:
The most natural expression of some common language constructs is left recursive. For example C declarators and arithmetic expressions. Unfortunately, left recursive specifications of arithmetic expressions are typically ambiguous but much easier to write out than the multiple levels required in a typical top-down grammar. Here is a sample ANTLR 4 grammar with a left recursive expression rule:
```
stat: expr '=' expr ';' // e.g., x=y; or x=f(x);

View File

@ -171,7 +171,7 @@ error(126): P.g4:3:4: cannot create implicit token for string literal '&' in non
## Lexer Rule Actions
An ANTLR lexer creates a Token object after matching a lexical rule. Each request for a token starts in Lexer.nextToken, which calls emit once it has identified a token.emit collects information from the current state of the lexer to build the token. It accesses fields `_type`, `_text`, `_channel`, `_tokenStartCharIndex`, `_tokenStartLine`, and `_tokenStartCharPositionInLine`. You can set the state of these with the various setter methods such as `setType`. For example, the following rule turns `enum` into an identifier if `enumIsKeyword` is false.
An ANTLR lexer creates a Token object after matching a lexical rule. Each request for a token starts in `Lexer.nextToken`, which calls `emit` once it has identified a token. `emit` collects information from the current state of the lexer to build the token. It accesses fields `_type`, `_text`, `_channel`, `_tokenStartCharIndex`, `_tokenStartLine`, and `_tokenStartCharPositionInLine`. You can set the state of these with the various setter methods such as `setType`. For example, the following rule turns `enum` into an identifier if `enumIsKeyword` is false.
```
ENUM : 'enum' {if (!enumIsKeyword) setType(Identifier);} ;
@ -255,7 +255,8 @@ WS : [ \r\t\n]+ -> skip ;
```
For multiple 'type()' commands, only the rightmost has an effect.
channel()
### channel()
```
BLOCK_COMMENT

View File

@ -8,7 +8,7 @@ By default, ANTLR-generated parsers build a data structure called a parse tree o
The interior nodes of the parse tree are phrase names that group and identify their children. The root node is the most abstract phrase name, in this case `stat` (short for statement). The leaves of a parse tree are always the input tokens. Parse trees sit between a language recognizer and an interpreter or translator implementation. They are extremely effective data structures because they contain all of the input and complete knowledge of how the parser grouped the symbols into phrases. Better yet, they are easy to understand and the parser generates them automatically (unless you turn them off with `parser.setBuildParseTree(false)`).
Because we specify phrase structure with a set of rules, parse tree subtree roots correspond to grammar rule names. ANTLR has a ParseTreeWalker that knows how to walk these parse trees and trigger events in listener implementation objects that you can create. ANTLR the tool generates listener interfaces for you also unless you, unless you turn that off with a commandline option. You can also have it generate visitors. For example from a Java.g4 grammar, ANTLR generates:
Because we specify phrase structure with a set of rules, parse tree subtree roots correspond to grammar rule names. ANTLR has a ParseTreeWalker that knows how to walk these parse trees and trigger events in listener implementation objects that you can create. The ANTLR tool generates listener interfaces for you also, unless you turn that off with a commandline option. You can also have it generate visitors. For example from a Java.g4 grammar, ANTLR generates:
```java
public interface JavaListener extends ParseTreeListener<Token> {
@ -27,11 +27,10 @@ Assuming you've created a listener object called `MyListener`, here is how to ca
JavaLexer lexer = new JavaLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
JavaParser parser = new JavaParser(tokens);
ParserRuleContext<Token> tree = parser.compilationUnit(); // parse
ParseTreeWalker walker = new ParseTreeWalker(); // create standard walker
JavaParser.CompilationUnitContext tree = parser.compilationUnit(); // parse a compilationUnit
MyListener extractor = new MyListener(parser);
walker.walk(extractor, tree); // initiate walk of tree with listener
ParseTreeWalker.DEFAULT.walk(extractor, tree); // initiate walk of tree with listener in use of default walker
```
Listeners and visitors are great because they keep application-specific code out of grammars, making grammars easier to read and preventing them from getting entangled with a particular application.

View File

@ -12,6 +12,55 @@ where a value can be an identifier, a qualified identifier (for example, a.b.c),
All grammars can use the following options. In combined grammars, all options except language pertain only to the generated parser. Options may be set either within the grammar file using the options syntax (described above) or when invoking ANTLR on the command line, using the `-D` option. (see Section 15.9, [ANTLR Tool Command Line Options](tool-options.md).) The following examples demonstrate both mechanisms; note that `-D` overrides options within the grammar.
* `superClass`. Set the superclass of the generated parser or lexer. For combined grammars, it sets the superclass of the parser.
```
$ cat Hi.g4
grammar Hi;
a : 'hi' ;
$ antlr4 -DsuperClass=XX Hi.g4
$ grep 'public class' HiParser.java
public class HiParser extends XX {
$ grep 'public class' HiLexer.java
public class HiLexer extends Lexer {
```
* `language` Generate code in the indicated language, if ANTLR is able to do so. Otherwise, you will see an error message like this:
```
$ antlr4 -Dlanguage=C MyGrammar.g4
error(31): ANTLR cannot generate C code as of version 4.0
```
* `tokenVocab` ANTLR assigns token type numbers to the tokens as it encounters them in a file. To use different token type values, such as with a separate lexer, use this option to have ANTLR pull in the <fileextension>tokens</fileextension> file. ANTLR generates a <fileextension>tokens</fileextension> file from each grammar.
```
$ cat SomeLexer.g4
lexer grammar SomeLexer;
ID : [a-z]+ ;
$ cat R.g4
parser grammar R;
options {tokenVocab=SomeLexer;}
tokens {A,B,C} // normally, these would be token types 1, 2, 3
a : ID ;
$ antlr4 SomeLexer.g4
$ cat SomeLexer.tokens
ID=1
$ antlr4 R.g4
$ cat R.tokens
A=2
B=3
C=4
ID=1
```
* `TokenLabelType` ANTLR normally uses type <class>Token</class> when it generates variables referencing tokens. If you have passed a <class>TokenFactory</class> to your parser and lexer so that they create custom tokens, you should set this option to your specific type. This ensures that the context objects know your type for fields and method return values.
```
$ cat T2.g4
grammar T2;
options {TokenLabelType=MyToken;}
a : x=ID ;
$ antlr4 T2.g4
$ grep MyToken T2Parser.java
public MyToken x;
```
* `contextSuperClass`. Specify the super class of parse tree internal nodes. Default is `ParserRuleContext`. Should derive from ultimately `RuleContext` at minimum.
Java target can use `contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum` for convenience. It adds a backing field for `altNumber`, the alt matched for the associated rule node.
## Rule Options
There are currently no valid rule-level options, but the tool still supports the following syntax for future use:
@ -25,7 +74,7 @@ options {...}
## Rule Element Options
Token options have the form `T<name=value>` as we saw in Section 5.4, [Dealing with Precedence, Left Recursion, and Associativity](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only token option is assocand it accepts values left and right. Heres a sample grammar with a left-recursive expression rule that specifies a token option on the `^` exponent operator token:
Token options have the form `T<name=value>` as we saw in Section 5.4, [Dealing with Precedence, Left Recursion, and Associativity](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only token option is `assoc`, and it accepts values `left` and `right`. Heres a sample grammar with a left-recursive expression rule that specifies a token option on the `^` exponent operator token:
```
grammar ExprLR;
@ -40,7 +89,7 @@ INT : '0'..'9'+ ;
WS : [ \n]+ -> skip ;
```
Semantic predicates also accept an option, per [Catching failed semantic predicates](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only valid option is the fail option, which takes either a string literal in double-quotes or an action that evaluates to a string. The string literal or string result from the action should be the message to emit upon predicate failure.
Semantic predicates also accept an option, per [Catching failed semantic predicates](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only valid option is the `fail` option, which takes either a string literal in double-quotes or an action that evaluates to a string. The string literal or string result from the action should be the message to emit upon predicate failure.
```
ints[int max]

View File

@ -29,7 +29,7 @@ expr: {istype()}? ID '(' expr ')' // ctor-style typecast
;
```
The parser will only predict an expr from stat when `istype()||isfunc()` evaluates to true. This makes sense because the parser should only choose to match an expression if the upcoming `ID` is a type name or function name. It wouldn't make sense to just test one of the predicates in this case. Note that, when the parser gets to expritself, the parsing decision tests the predicates individually, one for each alternative.
The parser will only predict an expr from stat when `istype()||isfunc()` evaluates to true. This makes sense because the parser should only choose to match an expression if the upcoming `ID` is a type name or function name. It wouldn't make sense to just test one of the predicates in this case. Note that, when the parser gets to `expr` itself, the parsing decision tests the predicates individually, one for each alternative.
If multiple predicates occur in a sequence, the parser joins them with the `&&` operator. For example, consider changing `stat` to include a predicate before the call `toexpr`:
@ -72,7 +72,7 @@ stat: {System.out.println("goto"); allowgoto=true;} {java5}? 'goto' ID ';'
If we can't execute the action during prediction, we shouldn't evaluate the `{java5}?` predicate because it depends on that action.
The prediction process also can't see through token references. Token references have the side effect of advancing the input one symbol. A predicate that tested the current input symbol would find itself out of sync if the parser shifted it over the token reference. For example, in the following grammar, the predicates expectgetCurrentToken to return an ID token.
The prediction process also can't see through token references. Token references have the side effect of advancing the input one symbol. A predicate that tested the current input symbol would find itself out of sync if the parser shifted it over the token reference. For example, in the following grammar, the predicates expect `getCurrentToken` to return an `ID` token.
```
stat: '{' decl '}'

View File

@ -2,15 +2,16 @@
## Github
Create a release candidate tag 4.x-rc-1 or full 4.5 tag
```bash
git tag -a 4.5 -m 'ANTLR final release 4.5'
git push origin 4.5
```
Create a pre-release or full release at github; [Example 4.5-rc-1](https://github.com/antlr/antlr4/releases/tag/4.5-rc-1).
Wack any existing tag as mvn will create one and it fails if already there.
```
$ git tag -d 4.5.2
$ git push origin :refs/tags/4.5.2
$ git push upstream :refs/tags/4.5.2
```
## Bump version
Edit the repository looking for 4.5 or whatever and update it. Bump version in the following files:
@ -20,7 +21,7 @@ Edit the repository looking for 4.5 or whatever and update it. Bump version in t
* runtime/Python2/src/antlr4/Recognizer.py
* runtime/Python3/setup.py
* runtime/Python3/src/antlr4/Recognizer.py
* runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs
* runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs
* runtime/JavaScript/src/antlr4/package.json
* runtime/JavaScript/src/antlr4/Recognizer.js
* tool/src/org/antlr/v4/codegen/target/CSharpTarget.java
@ -89,8 +90,10 @@ The maven deploy lifecycle phased deploys the artifacts and the poms for the ANT
mvn deploy -DskipTests
```
With JDK 1.7 (not 6 or 8), do this:
```bash
mvn release:prepare
mvn release:prepare -Darguments="-DskipTests"
```
It will start out by asking you the version number:
@ -111,7 +114,7 @@ What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.5
Maven will go through your pom.xml files to update versions from 4.5.2-SNAPSHOT to 4.5.2 for release and then to 4.5.3-SNAPSHOT after release, which is done with:
```bash
mvn release:perform
mvn release:perform -Darguments="-DskipTests"
```
Maven will use git to push pom.xml changes. (big smile)
@ -132,16 +135,21 @@ cp ~/.m2/repository/org/antlr/antlr4/4.5.2/antlr4-4.5.2.jar ~/antlr/sites/websit
cd ~/antlr/sites/website-antlr4/download
git add antlr-4.5.2-complete.jar
git add antlr-runtime-4.5.2.jar
git commit -a -m 'add 4.5.2 jars'
git push origin gh-pages
```
Update on site:
* download.html
* index.html
* api/index.html
* download/index.html
* scripts/topnav.js
```
git commit -a -m 'add 4.5.2 jars'
git push origin gh-pages
```
## Deploying Targets
### JavaScript

View File

@ -18,7 +18,6 @@ The [ANTLR v4 book](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-ref
New features generally appear in the Java target and then migrate to the other targets, but these other targets don't always get updated in the same overall tool release. This section tries to identify features added to Java that have not been added to the other targets.
|Feature|Java|C&sharp;|JavaScript|Python2|Python3|Swift|C++|
|-|-|-|-|-|-|-|-|
|---|---|---|---|---|---|---|---|
|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|

View File

@ -29,7 +29,7 @@ ParseTreeMatch m = p.match(t);
if ( m.succeeded() ) {...}
```
We can also test for specific expressions or token values. For example, the following checks to see if t is an expression consisting of an identifier added to 0:
We can also test for specific expressions or token values. For example, the following checks to see if `t` is an expression consisting of an identifier added to 0:
```java
ParseTree t = ...; // assume t is an expression

View File

@ -7,7 +7,7 @@
</parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.2-SNAPSHOT</version>
<version>4.5.4-SNAPSHOT</version>
<packaging>pom</packaging>
<name>ANTLR 4</name>

View File

@ -4,7 +4,7 @@
<parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.2-SNAPSHOT</version>
<version>4.5.4-SNAPSHOT</version>
</parent>
<artifactId>antlr4-runtime-testsuite</artifactId>
<name>ANTLR 4 Runtime Test Generator</name>

View File

@ -333,6 +333,19 @@ ParseTreeWalker walker = new ParseTreeWalker();
walker.Walk(new LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::members {
public class MyRuleNode : ParserRuleContext {
public int altNum;
public MyRuleNode(ParserRuleContext parent, int invokingStateNumber): base(parent, invokingStateNumber)
{
}
public override int getAltNumber() { return altNum; }
public override void setAltNumber(int altNum) { this.altNum = altNum; }
}
}
>>
TokenGetterListener(X) ::= <<
public class LeafListener : TBaseListener {
public override void ExitA(TParser.AContext ctx) {

View File

@ -344,6 +344,19 @@ ParseTreeWalker walker = new ParseTreeWalker();
walker.walk(new LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::members {
public static class MyRuleNode extends ParserRuleContext {
public int altNum;
public MyRuleNode(ParserRuleContext parent, int invokingStateNumber) {
super(parent, invokingStateNumber);
}
@Override public int getAltNumber() { return altNum; }
@Override public void setAltNumber(int altNum) { this.altNum = altNum; }
}
}
>>
TokenGetterListener(X) ::= <<
public static class LeafListener extends TBaseListener {
public void exitA(TParser.AContext ctx) {

View File

@ -328,6 +328,20 @@ var walker = new antlr4.tree.ParseTreeWalker();
walker.walk(new this.LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::header {
MyRuleNode = function(parent, invokingState) {
antlr4.ParserRuleContext.call(this, parent, invokingState);
this.altNum = 0;
return this;
};
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
MyRuleNode.prototype.constructor = MyRuleNode;
}
>>
TokenGetterListener(X) ::= <<
this.LeafListener = function() {
this.exitA = function(ctx) {

View File

@ -328,6 +328,20 @@ var walker = new antlr4.tree.ParseTreeWalker();
walker.walk(new this.LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::header {
MyRuleNode = function(parent, invokingState) {
antlr4.ParserRuleContext.call(this, parent, invokingState);
this.altNum = 0;
return this;
};
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
MyRuleNode.prototype.constructor = MyRuleNode;
}
>>
TokenGetterListener(X) ::= <<
this.LeafListener = function() {
this.exitA = function(ctx) {

View File

@ -330,6 +330,20 @@ var walker = new antlr4.tree.ParseTreeWalker();
walker.walk(new this.LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::header {
MyRuleNode = function(parent, invokingState) {
antlr4.ParserRuleContext.call(this, parent, invokingState);
this.altNum = 0;
return this;
};
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
MyRuleNode.prototype.constructor = MyRuleNode;
}
>>
TokenGetterListener(X) ::= <<
this.LeafListener = function() {
this.exitA = function(ctx) {

View File

@ -328,6 +328,24 @@ var walker = new antlr4.tree.ParseTreeWalker();
walker.walk(new this.LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::header {
MyRuleNode = function(parent, invokingState) {
antlr4.ParserRuleContext.call(this, parent, invokingState);
this.altNum = 0;
return this;
};
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
MyRuleNode.prototype.constructor = MyRuleNode;
MyRuleNode.prototype.getAltNumber = function() { return this.altNum; }
MyRuleNode.prototype.setAltNumber = function(altNumber) { this.altNum = altNumber; }
}
>>
TokenGetterListener(X) ::= <<
this.LeafListener = function() {
this.exitA = function(ctx) {

View File

@ -328,6 +328,21 @@ var walker = new antlr4.tree.ParseTreeWalker();
walker.walk(new this.LeafListener(), <s>);
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::header {
MyRuleNode = function(parent, invokingState) {
antlr4.ParserRuleContext.call(this, parent, invokingState);
this.altNum = 0;
return this;
};
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
MyRuleNode.prototype.constructor = MyRuleNode;
}
>>
TokenGetterListener(X) ::= <<
this.LeafListener = function() {
this.exitA = function(ctx) {

View File

@ -318,6 +318,19 @@ walker = ParseTreeWalker()
walker.walk(TParser.LeafListener(), <s>)
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::members {
class MyRuleNode(ParserRuleContext):
def __init__(self, parent = None, invokingStateNumber = None ):
super(<X>Parser.MyRuleNode, self).__init__(parent, invokingStateNumber)
self.altNum = 0;
def getAltNumber(self):
return self.altNum
def setAltNumber(self, altNum):
self.altNum = altNum
}
>>
TokenGetterListener(X) ::= <<
if __name__ is not None and "." in __name__:
from .<X>Listener import <X>Listener

View File

@ -320,6 +320,19 @@ walker = ParseTreeWalker()
walker.walk(TParser.LeafListener(), <s>)
>>
TreeNodeWithAltNumField(X) ::= <<
@parser::members {
class MyRuleNode(ParserRuleContext):
def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ):
super(<X>Parser.MyRuleNode, self).__init__(parent, invokingStateNumber)
self.altNum = 0;
def getAltNumber(self):
return self.altNum
def setAltNumber(self, altNum):
self.altNum = altNum
}
>>
TokenGetterListener(X) ::= <<
class LeafListener(MockListener):
def exitA(self, ctx):

View File

@ -59,6 +59,8 @@ TestTemplates ::= [
"MultipleAlternativesWithCommonLabel_2": [],
"MultipleAlternativesWithCommonLabel_3": [],
"MultipleAlternativesWithCommonLabel_4": [],
"PrefixAndOtherAlt_1": [],
"PrefixAndOtherAlt_2": [],
"PrefixOpWithActionAndLabel_1": [],
"PrefixOpWithActionAndLabel_2": [],
"PrefixOpWithActionAndLabel_3": [],

View File

@ -0,0 +1,24 @@
TestType() ::= "Parser"
Options ::= [
"Debug": false
]
Grammar ::= [
"T": {<grammar("T")>}
]
Rule() ::= "s"
grammar(grammarName) ::= <<
grammar <grammarName>;
s @after {<ToStringTree("$ctx"):writeln()>} : expr EOF ;
expr : literal
| op expr
| expr op expr
;
literal : '-'? Integer ;
op : '+' | '-' ;
Integer : [0-9]+ ;
WS : (' '|'\n') -> skip ;
>>

View File

@ -0,0 +1,9 @@
import "PrefixAndOtherAlt.stg"
Input() ::= "-1"
Output() ::= <<
(s (expr (literal - 1)) \<EOF>)<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,9 @@
import "PrefixAndOtherAlt.stg"
Input() ::= "-1 + -1"
Output() ::= <<
(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) \<EOF>)<\n>
>>
Errors() ::= ""

View File

@ -0,0 +1,40 @@
TestType() ::= "Parser"
Grammar ::= [
"T": {<grammar("T")>}
]
Input() ::= "xyz"
Rule() ::= "s"
Output() ::= <<
(a:3 x (b:2 y) z)<\n>
>>
Errors() ::= ""
grammar(grammarName) ::= <<
grammar <grammarName>;
options { contextSuperClass=MyRuleNode; }
<TreeNodeWithAltNumField(X=grammarName)>
s
@init {
<BuildParseTrees()>
}
@after {
<ToStringTree("$r.ctx"):writeln()>
}
: r=a ;
a : 'f'
| 'g'
| 'x' b 'z'
;
b : 'e' {} | 'y'
;
>>

View File

@ -6,5 +6,6 @@ TestTemplates ::= [
"RuleRef": [],
"ExtraToken": [],
"NoViableAlt": [],
"Sync": []
"Sync": [],
"AltNum": []
]

View File

@ -482,6 +482,9 @@ public abstract class BaseTest {
throw new RuntimeException("C# runtime project file not found!");
}
String runtimeProjPath = runtimeProj.getPath();
if(isWindows()){
runtimeProjPath = runtimeProjPath.replaceFirst("/", "");
}
XPathExpression exp = XPathFactory.newInstance().newXPath()
.compile("/Project/ItemGroup/ProjectReference[@Include='" + runtimeName + "']");
Element node = (Element)exp.evaluate(prjXml, XPathConstants.NODE);

View File

@ -1801,6 +1801,50 @@ public class TestLeftRecursion extends BaseTest {
assertEquals("(prog (statement (letterA a)) (statement (letterA a)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(223);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {Console.WriteLine($ctx.ToStringTree(this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(s (expr (literal - 1)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(223);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {Console.WriteLine($ctx.ToStringTree(this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1 + -1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test

View File

@ -2,7 +2,6 @@
package org.antlr.v4.test.runtime.csharp;
import org.junit.Test;
import org.junit.Ignore;
@SuppressWarnings("unused")
public class TestParseTrees extends BaseTest {
@ -52,6 +51,49 @@ public class TestParseTrees extends BaseTest {
assertEquals("(a y)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(547);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("public class MyRuleNode : ParserRuleContext {\n");
grammarBuilder.append(" public int altNum;\n");
grammarBuilder.append(" public MyRuleNode(ParserRuleContext parent, int invokingStateNumber): base(parent, invokingStateNumber)\n");
grammarBuilder.append(" {\n");
grammarBuilder.append(" }\n");
grammarBuilder.append(" public override int getAltNumber() { return altNum; }\n");
grammarBuilder.append(" public override void setAltNumber(int altNum) { this.altNum = altNum; }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("this.BuildParseTree = true;\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("Console.WriteLine($r.ctx.ToStringTree(this));\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test

View File

@ -2007,6 +2007,58 @@ public class TestLeftRecursion extends BaseTest {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(224);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {System.out.println($ctx.toStringTree(this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(s (expr (literal - 1)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(224);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {System.out.println($ctx.toStringTree(this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1 + -1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixOpWithActionAndLabel_1() throws Exception {

View File

@ -1,10 +1,10 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.java;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestParseTrees extends BaseTest {
@ -62,6 +62,53 @@ public class TestParseTrees extends BaseTest {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(562);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("public static class MyRuleNode extends ParserRuleContext {\n");
grammarBuilder.append(" public int altNum;\n");
grammarBuilder.append(" public MyRuleNode(ParserRuleContext parent, int invokingStateNumber) {\n");
grammarBuilder.append(" super(parent, invokingStateNumber);\n");
grammarBuilder.append(" }\n");
grammarBuilder.append(" @Override public int getAltNumber() { return altNum; }\n");
grammarBuilder.append(" @Override public void setAltNumber(int altNum) { this.altNum = altNum; }\n");
grammarBuilder.append("}\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("setBuildParseTree(true);\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("System.out.println($r.ctx.toStringTree(this));\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExtraToken() throws Exception {

View File

@ -445,9 +445,16 @@ public abstract class BaseTest {
if ( runtimeSrc==null ) {
throw new RuntimeException("Cannot find JavaScript runtime");
}
if(isWindows()){
return runtimeSrc.getPath().replaceFirst("/", "");
}
return runtimeSrc.getPath();
}
private boolean isWindows() {
return System.getProperty("os.name").toLowerCase().contains("windows");
}
public void testErrors(String[] pairs, boolean printTree) {
for (int i = 0; i < pairs.length; i += 2) {
String input = pairs[i];

View File

@ -1905,6 +1905,54 @@ public class TestLeftRecursion extends BaseTest {
assertEquals("(prog (statement (letterA a)) (statement (letterA a)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(223);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {console.log($ctx.toStringTree(null, this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor",
"s", input, false);
assertEquals("(s (expr (literal - 1)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(223);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {console.log($ctx.toStringTree(null, this));} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1 + -1";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor",
"s", input, false);
assertEquals("(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test

View File

@ -1,10 +1,10 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.javascript.node;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
@SuppressWarnings("unused")
public class TestParseTrees extends BaseTest {
@ -58,6 +58,55 @@ public class TestParseTrees extends BaseTest {
assertEquals("(a y)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(663);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::header {\n");
grammarBuilder.append("MyRuleNode = function(parent, invokingState) {\n");
grammarBuilder.append(" antlr4.ParserRuleContext.call(this, parent, invokingState);\n");
grammarBuilder.append("\n");
grammarBuilder.append(" this.altNum = 0;\n");
grammarBuilder.append(" return this;\n");
grammarBuilder.append("};\n");
grammarBuilder.append("\n");
grammarBuilder.append("MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);\n");
grammarBuilder.append("MyRuleNode.prototype.constructor = MyRuleNode;\n");
grammarBuilder.append("MyRuleNode.prototype.getAltNumber = function() { return this.altNum; }\n");
grammarBuilder.append("MyRuleNode.prototype.setAltNumber = function(altNumber) { this.altNum = altNumber; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("this.buildParseTrees = true;\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("console.log($r.ctx.toStringTree(null, this));\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer",
"TListener", "TVisitor",
"s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test

View File

@ -583,9 +583,16 @@ public abstract class BasePythonTest {
if ( runtimeSrc==null ) {
throw new RuntimeException("Cannot find "+targetName+" runtime");
}
if(isWindows()){
return runtimeSrc.getPath().replaceFirst("/", "");
}
return runtimeSrc.getPath();
}
private boolean isWindows() {
return System.getProperty("os.name").toLowerCase().contains("windows");
}
public void testErrors(String[] pairs, boolean printTree) {
for (int i = 0; i < pairs.length; i+=2) {
String input = pairs[i];

View File

@ -2058,6 +2058,60 @@ public class TestLeftRecursion extends BasePython2Test {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(216);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {print($ctx.toStringTree(recog=self))} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(s (expr (literal - 1)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(216);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {print($ctx.toStringTree(recog=self))} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1 + -1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixOpWithActionAndLabel_1() throws Exception {

View File

@ -1,9 +1,10 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.python2;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
@SuppressWarnings("unused")
public class TestParseTrees extends BasePython2Test {
@ -64,6 +65,54 @@ public class TestParseTrees extends BasePython2Test {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(562);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("class MyRuleNode(ParserRuleContext):\n");
grammarBuilder.append(" def __init__(self, parent = None, invokingStateNumber = None ):\n");
grammarBuilder.append(" super(TParser.MyRuleNode, self).__init__(parent, invokingStateNumber)\n");
grammarBuilder.append(" self.altNum = 0;\n");
grammarBuilder.append(" def getAltNumber(self):\n");
grammarBuilder.append(" return self.altNum\n");
grammarBuilder.append(" def setAltNumber(self, altNum):\n");
grammarBuilder.append(" self.altNum = altNum\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("self._buildParseTrees = True\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("print($r.ctx.toStringTree(recog=self))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExtraToken() throws Exception {

View File

@ -41,8 +41,8 @@ public abstract class BasePython3Test extends BasePythonTest {
@Override
protected String getPythonExecutable() {
return "python3.4";
}
return "python3.5";
} // force 3.5
@Override
protected void writeLexerTestFile(String lexerName, boolean showDFA) {

View File

@ -2058,6 +2058,60 @@ public class TestLeftRecursion extends BasePython3Test {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_1() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(216);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {print($ctx.toStringTree(recog=self))} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(s (expr (literal - 1)) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixAndOtherAlt_2() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(216);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("s @after {print($ctx.toStringTree(recog=self))} : expr EOF ; \n");
grammarBuilder.append("expr : literal\n");
grammarBuilder.append(" | op expr\n");
grammarBuilder.append(" | expr op expr\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("literal : '-'? Integer ;\n");
grammarBuilder.append("op : '+' | '-' ;\n");
grammarBuilder.append("Integer : [0-9]+ ;\n");
grammarBuilder.append("WS : (' '|'\\n') -> skip ;");
String grammar = grammarBuilder.toString();
String input ="-1 + -1";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(s (expr (expr (literal - 1)) (op +) (expr (literal - 1))) <EOF>)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testPrefixOpWithActionAndLabel_1() throws Exception {

View File

@ -1,9 +1,10 @@
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
package org.antlr.v4.test.runtime.python3;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
@SuppressWarnings("unused")
public class TestParseTrees extends BasePython3Test {
@ -64,6 +65,54 @@ public class TestParseTrees extends BasePython3Test {
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testAltNum() throws Exception {
mkdir(tmpdir);
StringBuilder grammarBuilder = new StringBuilder(584);
grammarBuilder.append("grammar T;\n");
grammarBuilder.append("\n");
grammarBuilder.append("options { contextSuperClass=MyRuleNode; }\n");
grammarBuilder.append("\n");
grammarBuilder.append("@parser::members {\n");
grammarBuilder.append("class MyRuleNode(ParserRuleContext):\n");
grammarBuilder.append(" def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ):\n");
grammarBuilder.append(" super(TParser.MyRuleNode, self).__init__(parent, invokingStateNumber)\n");
grammarBuilder.append(" self.altNum = 0;\n");
grammarBuilder.append(" def getAltNumber(self):\n");
grammarBuilder.append(" return self.altNum\n");
grammarBuilder.append(" def setAltNumber(self, altNum):\n");
grammarBuilder.append(" self.altNum = altNum\n");
grammarBuilder.append("}\n");
grammarBuilder.append("\n");
grammarBuilder.append("\n");
grammarBuilder.append("s\n");
grammarBuilder.append("@init {\n");
grammarBuilder.append("self._buildParseTrees = True\n");
grammarBuilder.append("}\n");
grammarBuilder.append("@after {\n");
grammarBuilder.append("print($r.ctx.toStringTree(recog=self))\n");
grammarBuilder.append("}\n");
grammarBuilder.append(" : r=a ;\n");
grammarBuilder.append("\n");
grammarBuilder.append("a : 'f'\n");
grammarBuilder.append(" | 'g'\n");
grammarBuilder.append(" | 'x' b 'z'\n");
grammarBuilder.append(" ;\n");
grammarBuilder.append("b : 'e' {} | 'y'\n");
grammarBuilder.append(" ;");
String grammar = grammarBuilder.toString();
String input ="xyz";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "TListener", "TVisitor", "s", input, false);
assertEquals("(a:3 x (b:2 y) z)\n", found);
assertNull(this.stderrDuringParse);
}
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
@Test
public void testExtraToken() throws Exception {

View File

@ -352,7 +352,7 @@ namespace Antlr4.Runtime
/// for a newly constructed parser.
/// </remarks>
/// <returns>
///
///
/// <see langword="true"/>
/// if a complete parse tree will be constructed while
/// parsing, otherwise
@ -378,14 +378,14 @@ namespace Antlr4.Runtime
/// by default for a newly constructed parser.
/// </remarks>
/// <value>
///
///
/// <see langword="true"/>
/// to trim the capacity of the
/// <see cref="ParserRuleContext.children"/>
/// list to its size after a rule is parsed.
/// </value>
/// <returns>
///
///
/// <see langword="true"/>
/// if the
/// <see cref="ParserRuleContext.children"/>
@ -649,9 +649,9 @@ namespace Antlr4.Runtime
}
}
public override IIntStream InputStream
public override IIntStream InputStream
{
get
get
{
return _input;
}
@ -659,11 +659,11 @@ namespace Antlr4.Runtime
public ITokenStream TokenStream
{
get
get
{
return _input;
}
set
set
{
this._input = null;
Reset ();
@ -842,6 +842,7 @@ namespace Antlr4.Runtime
public virtual void EnterOuterAlt(ParserRuleContext localctx, int altNum)
{
localctx.setAltNumber(altNum);
// if we have new localctx, make sure we replace existing ctx
// that is previous child of parse tree
if (_buildParseTrees && _ctx != localctx)
@ -1004,7 +1005,7 @@ namespace Antlr4.Runtime
/// </summary>
/// <param name="symbol">the symbol type to check</param>
/// <returns>
///
///
/// <see langword="true"/>
/// if
/// <paramref name="symbol"/>

View File

@ -67,8 +67,8 @@ using System.Runtime.InteropServices;
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("4.5.1.0")]
[assembly: AssemblyVersion("4.5.3.0")]
#if !COMPACT
[assembly: AssemblyFileVersion("4.5.1.0")]
[assembly: AssemblyInformationalVersion("4.5.1-dev")]
[assembly: AssemblyFileVersion("4.5.3.0")]
[assembly: AssemblyInformationalVersion("4.5.3.0")]
#endif

View File

@ -30,6 +30,7 @@
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
using Antlr4.Runtime.Tree;
@ -139,7 +140,7 @@ namespace Antlr4.Runtime
{
return _parent;
}
set
set
{
_parent = value;
}
@ -216,6 +217,23 @@ namespace Antlr4.Runtime
}
}
/* For rule associated with this parse tree internal node, return
* the outer alternative number used to match the input. Default
* implementation does not compute nor store this alt num. Create
* a subclass of ParserRuleContext with backing field and set
* option contextSuperClass.
* to set it.
*/
public virtual int getAltNumber() { return Atn.ATN.InvalidAltNumber; }
/* Set the outer alternative number for this context node. Default
* implementation does nothing to avoid backing field overhead for
* trees that don't need it. Create
* a subclass of ParserRuleContext with backing field and set
* option contextSuperClass.
*/
public virtual void setAltNumber(int altNumber) { }
public virtual IParseTree GetChild(int i)
{
return null;

View File

@ -30,6 +30,7 @@
using System.Collections.Generic;
using System.Text;
using Antlr4.Runtime;
using Antlr4.Runtime.Atn;
using Antlr4.Runtime.Misc;
using Antlr4.Runtime.Sharpen;
using Antlr4.Runtime.Tree;
@ -111,10 +112,14 @@ namespace Antlr4.Runtime.Tree
{
if (ruleNames != null)
{
if (t is IRuleNode)
if (t is RuleContext)
{
int ruleIndex = ((IRuleNode)t).RuleContext.RuleIndex;
int ruleIndex = ((RuleContext)t).RuleIndex;
string ruleName = ruleNames[ruleIndex];
int altNumber = ((RuleContext)t).getAltNumber();
if ( altNumber!=Atn.ATN.InvalidAltNumber ) {
return ruleName+":"+altNumber;
}
return ruleName;
}
else

View File

@ -67,6 +67,8 @@ namespace Antlr4.Runtime
[NotNull]
private readonly string[] displayNames;
private readonly int maxTokenType;
/// <summary>
/// Constructs a new instance of
/// <see cref="Vocabulary"/>
@ -126,6 +128,19 @@ namespace Antlr4.Runtime
this.literalNames = literalNames != null ? literalNames : EmptyNames;
this.symbolicNames = symbolicNames != null ? symbolicNames : EmptyNames;
this.displayNames = displayNames != null ? displayNames : EmptyNames;
this.maxTokenType =
System.Math.Max(this.displayNames.Length,
System.Math.Max(this.literalNames.Length, this.symbolicNames.Length)) - 1;
}
/// <summary>
/// Returns the highest token type value. It can be used to iterate from
/// zero to that number, inclusively, thus querying all stored entries.
/// </summary>
public virtual int getMaxTokenType()
{
return maxTokenType;
}
[return: Nullable]

View File

@ -3,7 +3,7 @@
<parent>
<groupId>org.antlr</groupId>
<artifactId>antlr4-master</artifactId>
<version>4.5.2-SNAPSHOT</version>
<version>4.5.4-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<artifactId>antlr4-runtime</artifactId>

View File

@ -34,7 +34,6 @@ import org.antlr.v4.runtime.atn.ATNDeserializationOptions;
import org.antlr.v4.runtime.atn.ATNDeserializer;
import org.antlr.v4.runtime.atn.ATNSimulator;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.AmbiguityInfo;
import org.antlr.v4.runtime.atn.ParseInfo;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.atn.PredictionMode;
@ -51,7 +50,6 @@ import org.antlr.v4.runtime.tree.pattern.ParseTreePattern;
import org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@ -649,6 +647,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
}
public void enterOuterAlt(ParserRuleContext localctx, int altNum) {
localctx.setAltNumber(altNum);
// if we have new localctx, make sure we replace existing ctx
// that is previous child of parse tree
if ( _buildParseTrees && _ctx != localctx ) {

View File

@ -29,20 +29,17 @@
*/
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.misc.Interval;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.ParseTreeVisitor;
import org.antlr.v4.runtime.tree.RuleNode;
import org.antlr.v4.runtime.tree.Trees;
import javax.print.PrintException;
import javax.swing.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Future;
/** /** A rule context is a record of a single rule invocation.
/** A rule context is a record of a single rule invocation.
*
* We form a stack of these context objects using the parent
* pointer. A parent pointer of null indicates that the current
@ -169,6 +166,27 @@ public class RuleContext implements RuleNode {
public int getRuleIndex() { return -1; }
/** For rule associated with this parse tree internal node, return
* the outer alternative number used to match the input. Default
* implementation does not compute nor store this alt num. Create
* a subclass of ParserRuleContext with backing field and set
* option contextSuperClass.
* to set it.
*
* @since 4.5.3
*/
public int getAltNumber() { return ATN.INVALID_ALT_NUMBER; }
/** Set the outer alternative number for this context node. Default
* implementation does nothing to avoid backing field overhead for
* trees that don't need it. Create
* a subclass of ParserRuleContext with backing field and set
* option contextSuperClass.
*
* @since 4.5.3
*/
public void setAltNumber(int altNumber) { }
@Override
public ParseTree getChild(int i) {
return null;

View File

@ -0,0 +1,24 @@
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.atn.ATN;
/** A handy class for use with
*
* options {contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum;}
*
* that provides a backing field / impl for the outer alternative number
* matched for an internal parse tree node.
*
* I'm only putting into Java runtime as I'm certain I'm the only one that
* will really every use this.
*/
public class RuleContextWithAltNum extends ParserRuleContext {
public int altNum;
public RuleContextWithAltNum() { altNum = ATN.INVALID_ALT_NUMBER; }
public RuleContextWithAltNum(ParserRuleContext parent, int invokingStateNumber) {
super(parent, invokingStateNumber);
}
@Override public int getAltNumber() { return altNum; }
@Override public void setAltNumber(int altNum) { this.altNum = altNum; }
}

View File

@ -91,7 +91,7 @@ public class RuntimeMetaData {
* omitted.</li>
* </ul>
*/
public static final String VERSION = "4.5.1";
public static final String VERSION = "4.5.3";
/**
* Gets the currently executing version of the ANTLR 4 runtime library.

View File

@ -37,6 +37,13 @@ package org.antlr.v4.runtime;
* @author Sam Harwell
*/
public interface Vocabulary {
/**
* Returns the highest token type value. It can be used to iterate from
* zero to that number, inclusively, thus querying all stored entries.
* @return the highest token type value
*/
int getMaxTokenType();
/**
* Gets the string literal associated with a token type. The string returned
* by this method, when not {@code null}, can be used unaltered in a parser
@ -85,7 +92,7 @@ public interface Vocabulary {
*
* <ul>
* <li>Tokens created by lexer rules.</li>
* <li>Tokens defined in a {@code tokens{}} block in a lexer or parser
* <li>Tokens defined in a <code>tokens{}</code> block in a lexer or parser
* grammar.</li>
* <li>The implicitly defined {@code EOF} token, which has the token type
* {@link Token#EOF}.</li>

View File

@ -57,6 +57,8 @@ public class VocabularyImpl implements Vocabulary {
private final String[] displayNames;
private final int maxTokenType;
/**
* Constructs a new instance of {@link VocabularyImpl} from the specified
* literal and symbolic token names.
@ -94,6 +96,10 @@ public class VocabularyImpl implements Vocabulary {
this.literalNames = literalNames != null ? literalNames : EMPTY_NAMES;
this.symbolicNames = symbolicNames != null ? symbolicNames : EMPTY_NAMES;
this.displayNames = displayNames != null ? displayNames : EMPTY_NAMES;
// See note here on -1 part: https://github.com/antlr/antlr4/pull/1146
this.maxTokenType =
Math.max(this.displayNames.length,
Math.max(this.literalNames.length, this.symbolicNames.length)) - 1;
}
/**
@ -143,6 +149,11 @@ public class VocabularyImpl implements Vocabulary {
return new VocabularyImpl(literalNames, symbolicNames, tokenNames);
}
@Override
public int getMaxTokenType() {
return maxTokenType;
}
@Override
public String getLiteralName(int tokenType) {
if (tokenType >= 0 && tokenType < literalNames.length) {

View File

@ -424,6 +424,12 @@ public class ATNSerializer {
.append(ATNState.serializationNames.get(stype)).append(" ")
.append(ruleIndex).append(arg).append("\n");
}
// this code is meant to model the form of ATNDeserializer.deserialize,
// since both need to be updated together whenever a change is made to
// the serialization format. The "dead" code is only used in debugging
// and testing scenarios, so the form you see here was kept for
// improved maintainability.
// start
int numNonGreedyStates = ATNDeserializer.toInt(data[p++]);
for (int i = 0; i < numNonGreedyStates; i++) {
int stateNumber = ATNDeserializer.toInt(data[p++]);
@ -432,6 +438,7 @@ public class ATNSerializer {
for (int i = 0; i < numPrecedenceStates; i++) {
int stateNumber = ATNDeserializer.toInt(data[p++]);
}
// finish
int nrules = ATNDeserializer.toInt(data[p++]);
for (int i=0; i<nrules; i++) {
int s = ATNDeserializer.toInt(data[p++]);
@ -487,12 +494,17 @@ public class ATNSerializer {
buf.append(i).append(":").append(s).append("\n");
}
if (atn.grammarType == ATNType.LEXER) {
int lexerActionCount = ATNDeserializer.toInt(data[p++]);
for (int i = 0; i < lexerActionCount; i++) {
LexerActionType actionType = LexerActionType.values()[ATNDeserializer.toInt(data[p++])];
int data1 = ATNDeserializer.toInt(data[p++]);
int data2 = ATNDeserializer.toInt(data[p++]);
}
// this code is meant to model the form of ATNDeserializer.deserialize,
// since both need to be updated together whenever a change is made to
// the serialization format. The "dead" code is only used in debugging
// and testing scenarios, so the form you see here was kept for
// improved maintainability.
int lexerActionCount = ATNDeserializer.toInt(data[p++]);
for (int i = 0; i < lexerActionCount; i++) {
LexerActionType actionType = LexerActionType.values()[ATNDeserializer.toInt(data[p++])];
int data1 = ATNDeserializer.toInt(data[p++]);
int data2 = ATNDeserializer.toInt(data[p++]);
}
}
return buf.toString();
}

View File

@ -788,7 +788,8 @@ public class ParserATNSimulator extends ATNSimulator {
protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t,
boolean fullCtx)
{
if ( debug ) System.out.println("in computeReachSet, starting closure: " + closure);
if ( debug )
System.out.println("in computeReachSet, starting closure: " + closure);
if (mergeCache == null) {
mergeCache = new DoubleKeyMap<PredictionContext, PredictionContext, PredictionContext>();

View File

@ -30,9 +30,6 @@
package org.antlr.v4.runtime.misc;
import java.awt.*;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
@ -151,38 +148,6 @@ public class Utils {
return data;
}
public static void waitForClose(final Window window) throws InterruptedException {
final Object lock = new Object();
Thread t = new Thread() {
@Override
public void run() {
synchronized (lock) {
while (window.isVisible()) {
try {
lock.wait(500);
} catch (InterruptedException e) {
}
}
}
}
};
t.start();
window.addWindowListener(new WindowAdapter() {
@Override
public void windowClosing(WindowEvent arg0) {
synchronized (lock) {
window.setVisible(false);
lock.notify();
}
}
});
t.join();
}
/** Convert array of strings to string&rarr;index map. Useful for
* converting rulenames to name&rarr;ruleindex map.
*/

View File

@ -33,7 +33,9 @@ package org.antlr.v4.runtime.tree;
import org.antlr.v4.runtime.CommonToken;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.misc.Interval;
import org.antlr.v4.runtime.misc.Predicate;
import org.antlr.v4.runtime.misc.Utils;
@ -91,9 +93,13 @@ public class Trees {
public static String getNodeText(Tree t, List<String> ruleNames) {
if ( ruleNames!=null ) {
if ( t instanceof RuleNode ) {
int ruleIndex = ((RuleNode)t).getRuleContext().getRuleIndex();
if ( t instanceof RuleContext ) {
int ruleIndex = ((RuleContext)t).getRuleContext().getRuleIndex();
String ruleName = ruleNames.get(ruleIndex);
int altNumber = ((RuleContext) t).getAltNumber();
if ( altNumber!=ATN.INVALID_ALT_NUMBER ) {
return ruleName+":"+altNumber;
}
return ruleName;
}
else if ( t instanceof ErrorNode) {

View File

@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
See www.antlr.org for more information on ANTLR
See https://raw.githubusercontent.com/antlr/antlr4/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript

View File

@ -321,7 +321,7 @@ BufferedTokenStream.prototype.getHiddenTokensToRight = function(tokenIndex,
channel = -1;
}
this.lazyInit();
if (this.tokenIndex < 0 || tokenIndex >= this.tokens.length) {
if (tokenIndex < 0 || tokenIndex >= this.tokens.length) {
throw "" + tokenIndex + " not in 0.." + this.tokens.length - 1;
}
var nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1,

View File

@ -157,7 +157,7 @@ LL1Analyzer.prototype.LOOK = function(s, stopState, ctx) {
// is {@code null}.
///
LL1Analyzer.prototype._LOOK = function(s, stopState , ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) {
var c = new ATNConfig({state:s, alt:0}, ctx);
var c = new ATNConfig({state:s, alt:0, context: ctx}, null);
if (lookBusy.contains(c)) {
return;
}

View File

@ -471,6 +471,7 @@ Parser.prototype.exitRule = function() {
};
Parser.prototype.enterOuterAlt = function(localctx, altNum) {
localctx.setAltNumber(altNum);
// if we have new localctx, make sure we replace existing ctx
// that is previous child of parse tree
if (this.buildParseTrees && this._ctx !== localctx) {

View File

@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
See www.antlr.org for more information on ANTLR
See https://theantlrguy.atlassian.net/wiki/display/ANTLR4/JavaScript+Target for more information on using ANTLR in JavaScript
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript

View File

@ -45,7 +45,7 @@ Recognizer.ruleIndexMapCache = {};
Recognizer.prototype.checkVersion = function(toolVersion) {
var runtimeVersion = "4.5.1";
var runtimeVersion = "4.5.3";
if (runtimeVersion!==toolVersion) {
console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion);
}

View File

@ -51,6 +51,7 @@
var RuleNode = require('./tree/Tree').RuleNode;
var INVALID_INTERVAL = require('./tree/Tree').INVALID_INTERVAL;
var INVALID_ALT_NUMBER = require('./atn/ATN').INVALID_ALT_NUMBER;
function RuleContext(parent, invokingState) {
RuleNode.call(this);
@ -113,6 +114,21 @@ RuleContext.prototype.getText = function() {
}
};
// For rule associated with this parse tree internal node, return
// the outer alternative number used to match the input. Default
// implementation does not compute nor store this alt num. Create
// a subclass of ParserRuleContext with backing field and set
// option contextSuperClass.
// to set it.
RuleContext.prototype.getAltNumber = function() { return INVALID_ALT_NUMBER; }
// Set the outer alternative number for this context node. Default
// implementation does nothing to avoid backing field overhead for
// trees that don't need it. Create
// a subclass of ParserRuleContext with backing field and set
// option contextSuperClass.
RuleContext.prototype.setAltNumber = function(altNumber) { }
RuleContext.prototype.getChild = function(i) {
return null;
};

View File

@ -50,7 +50,7 @@ function checkParams(params, isCfg) {
} else {
var props = {};
props.state = params.state || null;
props.alt = params.alt || null;
props.alt = (params.alt === undefined) ? null : params.alt;
props.context = params.context || null;
props.semanticContext = params.semanticContext || null;
if(isCfg) {

View File

@ -42,7 +42,7 @@ function LexerActionExecutor(lexerActions) {
this.lexerActions = lexerActions === null ? [] : lexerActions;
// Caches the result of {@link //hashCode} since the hash code is an element
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
this.hashString = lexerActions.toString(); // "".join([str(la) for la in
this._hashString = lexerActions.toString(); // "".join([str(la) for la in
// lexerActions]))
return this;
}
@ -172,7 +172,7 @@ LexerActionExecutor.prototype.execute = function(lexer, input, startIndex) {
};
LexerActionExecutor.prototype.hashString = function() {
return this.hashString;
return this._hashString;
};
LexerActionExecutor.prototype.equals = function(other) {
@ -180,9 +180,18 @@ LexerActionExecutor.prototype.equals = function(other) {
return true;
} else if (!(other instanceof LexerActionExecutor)) {
return false;
} else if (this._hashString != other._hashString) {
return false;
} else if (this.lexerActions.length != other.lexerActions.length) {
return false;
} else {
return this.hashString === other.hashString &&
this.lexerActions === other.lexerActions;
var numActions = this.lexerActions.length
for (var idx = 0; idx < numActions; ++idx) {
if (!this.lexerActions[idx].equals(other.lexerActions[idx])) {
return false;
}
}
return true;
}
};

View File

@ -1301,10 +1301,10 @@ ParserATNSimulator.prototype.closureCheckingStopState = function(config, configs
}
continue;
}
returnState = this.atn.states[config.context.getReturnState(i)];
newContext = config.context.getParent(i); // "pop" return state
var returnState = this.atn.states[config.context.getReturnState(i)];
var newContext = config.context.getParent(i); // "pop" return state
var parms = {state:returnState, alt:config.alt, context:newContext, semanticContext:config.semanticContext};
c = new ATNConfig(parms, null);
var c = new ATNConfig(parms, null);
// While we have context to pop back from, we may have
// gotten that context AFTER having falling off a rule.
// Make sure we track that we are now out of context.

View File

@ -38,6 +38,9 @@ var BitSet = require('./../Utils').BitSet;
var AltDict = require('./../Utils').AltDict;
var ATN = require('./ATN').ATN;
var RuleStopState = require('./ATNState').RuleStopState;
var ATNConfigSet = require('./ATNConfigSet').ATNConfigSet;
var ATNConfig = require('./ATNConfig').ATNConfig;
var SemanticContext = require('./SemanticContext').SemanticContext;
function PredictionMode() {
return this;
@ -580,4 +583,4 @@ PredictionMode.getSingleViableAlt = function(altsets) {
return result;
};
exports.PredictionMode = PredictionMode;
exports.PredictionMode = PredictionMode;

View File

@ -30,6 +30,8 @@
///
var ATNConfigSet = require('./../atn/ATNConfigSet').ATNConfigSet;
var Utils = require('./../Utils');
var Set = Utils.Set;
// Map a predicate to a predicted alternative.///
@ -163,4 +165,4 @@ DFAState.prototype.hashString = function() {
};
exports.DFAState = DFAState;
exports.PredPrediction = PredPrediction;
exports.PredPrediction = PredPrediction;

View File

@ -1,6 +1,6 @@
{
"name": "antlr4",
"version": "4.5.1",
"version": "4.5.3",
"description": "JavaScript runtime for ANTLR4",
"main": "src/antlr4/index.js",
"repository": "antlr/antlr4.git",

View File

@ -95,6 +95,13 @@ ParseTreeVisitor.prototype.visit = function(ctx) {
}
};
ParseTreeVisitor.prototype.visitTerminal = function(node) {
};
ParseTreeVisitor.prototype.visitErrorNode = function(node) {
};
var visitAtom = function(visitor, ctx) {
if (ctx.parser === undefined) { //is terminal
return;

View File

@ -34,6 +34,8 @@ var RuleNode = require('./Tree').RuleNode;
var ErrorNode = require('./Tree').ErrorNode;
var TerminalNode = require('./Tree').TerminalNode;
var ParserRuleContext = require('./../ParserRuleContext').ParserRuleContext;
var RuleContext = require('./../RuleContext').RuleContext;
var INVALID_ALT_NUMBER = require('./../atn/ATN').INVALID_ALT_NUMBER;
/** A set of utility routines useful for all kinds of ANTLR trees. */
@ -75,8 +77,12 @@ Trees.getNodeText = function(t, ruleNames, recog) {
ruleNames = recog.ruleNames;
}
if(ruleNames!==null) {
if (t instanceof RuleNode) {
return ruleNames[t.getRuleContext().ruleIndex];
if (t instanceof RuleContext) {
var altNumber = t.getAltNumber();
if ( altNumber!=INVALID_ALT_NUMBER ) {
return ruleNames[t.ruleIndex]+":"+altNumber;
}
return ruleNames[t.ruleIndex];
} else if ( t instanceof ErrorNode) {
return t.toString();
} else if(t instanceof TerminalNode) {
@ -115,7 +121,7 @@ Trees.getAncestors = function(t) {
}
return ancestors;
};
Trees.findAllTokenNodes = function(t, ttype) {
return Trees.findAllNodes(t, ttype, true);
};

View File

@ -1,6 +1,6 @@
var Tree = require('./Tree');
exports.Trees = require('./Tree').Trees;
exports.Trees = require('./Trees').Trees;
exports.RuleNode = Tree.RuleNode;
exports.ParseTreeListener = Tree.ParseTreeListener;
exports.ParseTreeVisitor = Tree.ParseTreeVisitor;
exports.ParseTreeWalker = Tree.ParseTreeWalker;
exports.ParseTreeWalker = Tree.ParseTreeWalker;

View File

@ -62,7 +62,13 @@
// anchor element as parser in that case. Thes breaks web worker support,
// but we don't care since these browsers also don't support web workers.
var parser = URL ? new URL(location.href) : document.createElement('A');
try {
var parser = new URL(location.href);
}
catch (e) {
console.warn("Honey: falling back to DOM workaround for URL parser ("+e+")");
parser = document.createElement('A');
}
// INFO Module cache
// Contains getter functions for the exports objects of all the loaded
@ -81,7 +87,7 @@
delete cache.foo;
}
catch (e) {
console.warn("Honey: falling back to DOM workaround for defineProperty ("+e+")");
console.warn("Honey: falling back to DOM workaround for cache object ("+e+")");
cache = document.createElement('DIV');
}

View File

@ -2,12 +2,12 @@ from distutils.core import setup
setup(
name='antlr4-python2-runtime',
version='4.5.2',
version='4.5.3',
packages=['antlr4', 'antlr4.atn', 'antlr4.dfa', 'antlr4.tree', 'antlr4.error', 'antlr4.xpath'],
package_dir={'': 'src'},
url='http://www.antlr.org',
license='BSD',
author='Eric Vergnaud, Terence Parr, Sam Harwell',
author_email='eric.vergnaud@wanadoo.fr',
description='ANTLR 4.5.2 runtime for Python 2.7.6'
description='ANTLR 4.5.3 runtime for Python 2.7.6'
)

View File

@ -97,16 +97,10 @@ class IntervalSet(object):
if self.intervals is None:
return False
else:
for i in self.intervals:
if item in i:
return True
return False
return any(item in i for i in self.intervals)
def __len__(self):
xlen = 0
for i in self.intervals:
xlen += len(i)
return xlen
return sum(len(i) for i in self.intervals)
def removeRange(self, v):
if v.start==v.stop-1:
@ -126,7 +120,7 @@ class IntervalSet(object):
# check for included range, remove it
elif v.start<=i.start and v.stop>=i.stop:
self.intervals.pop(k)
k = k - 1 # need another pass
k -= 1 # need another pass
# check for lower boundary
elif v.start<i.stop:
self.intervals[k] = Interval(i.start, v.start)

View File

@ -101,9 +101,7 @@ class ListTokenSource(TokenSource):
line = lastToken.line
tokenText = lastToken.text
if tokenText is not None:
for c in tokenText:
if c == '\n':
line += 1
line += tokenText.count('\n')
# if no text is available, assume the token did not contain any newline characters.
return line

View File

@ -38,7 +38,7 @@ from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
from antlr4.tree.Tree import ParseTreeListener
class TraceListener(ParseTreeListener):
def __init__(self, parser):
self._parser = parser
@ -152,7 +152,7 @@ class Parser (Recognizer):
# @throws RecognitionException if the current input symbol did not match
# a wildcard and the error strategy could not recover from the mismatched
# symbol
def matchWildcard(self):
t = self.getCurrentToken()
if t.type > 0:
@ -382,6 +382,7 @@ class Parser (Recognizer):
self._ctx = self._ctx.parentCtx
def enterOuterAlt(self, localctx, altNum):
localctx.setAltNumber(altNum)
# if we have new localctx, make sure we replace existing ctx
# that is previous child of parse tree
if self.buildParseTrees and self._ctx != localctx:

View File

@ -30,6 +30,7 @@
#/
from io import StringIO
from antlr4.RuleContext import RuleContext
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNState import ATNState
@ -98,7 +99,7 @@ def calculateHashCode(parent, returnState):
def calculateListsHashCode(parents, returnStates ):
h = 0
for parent, returnState in parents, returnStates:
for parent, returnState in zip(parents, returnStates):
h = hash((h, calculateHashCode(parent, returnState)))
return h
@ -254,6 +255,10 @@ class ArrayPredictionContext(PredictionContext):
buf.write(u"]")
return buf.getvalue()
def __hash__(self):
return self.cachedHashCode
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
@ -328,18 +333,18 @@ def merge(a, b, rootIsWildcard, mergeCache):
#/
def mergeSingletons(a, b, rootIsWildcard, mergeCache):
if mergeCache is not None:
previous = mergeCache.get(a,b)
previous = mergeCache.get((a,b), None)
if previous is not None:
return previous
previous = mergeCache.get(b,a)
previous = mergeCache.get((b,a), None)
if previous is not None:
return previous
rootMerge = mergeRoot(a, b, rootIsWildcard)
if rootMerge is not None:
merged = mergeRoot(a, b, rootIsWildcard)
if merged is not None:
if mergeCache is not None:
mergeCache.put(a, b, rootMerge)
return rootMerge
mergeCache[(a, b)] = merged
return merged
if a.returnState==b.returnState:
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
@ -352,10 +357,10 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
# merge parents x and y, giving array node with x,y then remainders
# of those graphs. dup a, a' points at merged array
# new joined parent so create new singleton pointing to it, a'
a_ = SingletonPredictionContext.create(parent, a.returnState)
merged = SingletonPredictionContext.create(parent, a.returnState)
if mergeCache is not None:
mergeCache.put(a, b, a_)
return a_
mergeCache[(a, b)] = merged
return merged
else: # a != b payloads differ
# see if we can collapse parents due to $+x parents if local ctx
singleParent = None
@ -365,26 +370,24 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
# sort payloads and use same parent
payloads = [ a.returnState, b.returnState ]
if a.returnState > b.returnState:
payloads[0] = b.returnState
payloads[1] = a.returnState
payloads = [ b.returnState, a.returnState ]
parents = [singleParent, singleParent]
a_ = ArrayPredictionContext(parents, payloads)
merged = ArrayPredictionContext(parents, payloads)
if mergeCache is not None:
mergeCache.put(a, b, a_)
return a_
mergeCache[(a, b)] = merged
return merged
# parents differ and can't merge them. Just pack together
# into array; can't merge.
# ax + by = [ax,by]
payloads = [ a.returnState, b.returnState ]
parents = [ a.parentCtx, b.parentCtx ]
if a.returnState > b.returnState: # sort by payload
payloads[0] = b.returnState
payloads[1] = a.returnState
payloads = [ b.returnState, a.returnState ]
parents = [ b.parentCtx, a.parentCtx ]
a_ = ArrayPredictionContext(parents, payloads)
merged = ArrayPredictionContext(parents, payloads)
if mergeCache is not None:
mergeCache.put(a, b, a_)
return a_
mergeCache[(a, b)] = merged
return merged
#
@ -466,10 +469,10 @@ def mergeRoot(a, b, rootIsWildcard):
#/
def mergeArrays(a, b, rootIsWildcard, mergeCache):
if mergeCache is not None:
previous = mergeCache.get(a,b)
previous = mergeCache.get((a,b), None)
if previous is not None:
return previous
previous = mergeCache.get(b,a)
previous = mergeCache.get((b,a), None)
if previous is not None:
return previous
@ -478,8 +481,8 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
j = 0 # walks b
k = 0 # walks target M array
mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates))
mergedParents = [] * len(mergedReturnStates)
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
mergedParents = [None] * len(mergedReturnStates)
# walk and merge to yield mergedParents, mergedReturnStates
while i<len(a.returnStates) and j<len(b.returnStates):
a_parent = a.parents[i]
@ -525,30 +528,30 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
# trim merged if we combined a few that had same stack tops
if k < len(mergedParents): # write index < last position; trim
if k == 1: # for just one merged element, return singleton top
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
if mergeCache is not None:
mergeCache.put(a,b,a_)
return a_
mergeCache[(a,b)] = merged
return merged
mergedParents = mergedParents[0:k]
mergedReturnStates = mergedReturnStates[0:k]
M = ArrayPredictionContext(mergedParents, mergedReturnStates)
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
# if we created same array as a or b, return that instead
# TODO: track whether this is possible above during merge sort for speed
if M==a:
if merged==a:
if mergeCache is not None:
mergeCache.put(a,b,a)
mergeCache[(a,b)] = a
return a
if M==b:
if merged==b:
if mergeCache is not None:
mergeCache.put(a,b,b)
mergeCache[(a,b)] = b
return b
combineCommonParents(mergedParents)
if mergeCache is not None:
mergeCache.put(a,b,M)
return M
mergeCache[(a,b)] = merged
return merged
#
@ -582,15 +585,14 @@ def getCachedPredictionContext(context, contextCache, visited):
parent = getCachedPredictionContext(context.getParent(i), contextCache, visited)
if changed or parent is not context.getParent(i):
if not changed:
parents = [None] * len(context)
for j in range(0, len(context)):
parents[j] = context.getParent(j)
parents = [context.getParent(j) for j in range(len(context))]
changed = True
parents[i] = parent
if not changed:
contextCache.add(context)
visited[context] = context
return context
updated = None
if len(parents) == 0:
updated = PredictionContext.EMPTY
@ -642,6 +644,6 @@ def getAllContextNodes(context, nodes=None, visited=None):
visited.put(context, context)
nodes.add(context)
for i in range(0, len(context)):
getAllContextNodes(context.getParent(i), nodes, visited);
getAllContextNodes(context.getParent(i), nodes, visited)
return nodes

View File

@ -55,7 +55,7 @@ class Recognizer(object):
return major, minor
def checkVersion(self, toolVersion):
runtimeVersion = "4.5.2"
runtimeVersion = "4.5.3"
rvmajor, rvminor = self.extractVersion(runtimeVersion)
tvmajor, tvminor = self.extractVersion(toolVersion)
if rvmajor!=tvmajor or rvminor!=tvminor:

View File

@ -52,6 +52,7 @@
from io import StringIO
from antlr4.tree.Tree import RuleNode, INVALID_INTERVAL
from antlr4.tree.Trees import Trees
from antlr4.atn.ATN import ATN
class RuleContext(RuleNode):
@ -109,6 +110,23 @@ class RuleContext(RuleNode):
def getRuleIndex(self):
return -1
# For rule associated with this parse tree internal node, return
# the outer alternative number used to match the input. Default
# implementation does not compute nor store this alt num. Create
# a subclass of ParserRuleContext with backing field and set
# option contextSuperClass.
# to set it.
def getAltNumber(self):
return ATN.INVALID_ALT_NUMBER
# Set the outer alternative number for this context node. Default
# implementation does nothing to avoid backing field overhead for
# trees that don't need it. Create
# a subclass of ParserRuleContext with backing field and set
# option contextSuperClass.
def setAltNumber(self, altNumber):
pass
def getChild(self, i):
return None

View File

@ -95,6 +95,19 @@ class ATNConfig(object):
def __hash__(self):
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
def hashCodeForConfigSet(self):
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
def equalsForConfigSet(self, other):
if self is other:
return True
elif not isinstance(other, ATNConfig):
return False
else:
return self.state.stateNumber==other.state.stateNumber \
and self.alt==other.alt \
and self.semanticContext==other.semanticContext
def __str__(self):
return unicode(self)
@ -144,6 +157,18 @@ class LexerATNConfig(ATNConfig):
return False
return super(LexerATNConfig, self).__eq__(other)
def hashCodeForConfigSet(self):
return hash(self)
def equalsForConfigSet(self, other):
return self==other
def checkNonGreedyDecision(self, source, target):
return source.passedThroughNonGreedyDecision \
or isinstance(target, DecisionState) and target.nonGreedy

View File

@ -33,6 +33,7 @@
# info about the set, with support for combining similar configurations using a
# graph-structured stack.
#/
from functools import reduce
from io import StringIO
from antlr4.PredictionContext import merge
from antlr4.Utils import str_list
@ -105,8 +106,8 @@ class ATNConfigSet(object):
rootIsWildcard = not self.fullCtx
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
# no need to check for existing.context, config.context in cache
# since only way to create new graphs is "call rule" and here. We
# cache at both places.
# since only way to create new graphs is "call rule" and here.
# We cache at both places.
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
# make sure to preserve the precedence filter suppression during the merge
if config.precedenceFilterSuppressed:
@ -115,12 +116,12 @@ class ATNConfigSet(object):
return True
def getOrAdd(self, config):
h = hash(config)
h = config.hashCodeForConfigSet()
l = self.configLookup.get(h, None)
if l is not None:
for c in l:
if c==config:
return c
r = next((c for c in l if config.equalsForConfigSet(c)), None)
if r is not None:
return r
if l is None:
l = [config]
self.configLookup[h] = l
@ -129,17 +130,10 @@ class ATNConfigSet(object):
return config
def getStates(self):
states = set()
for c in self.configs:
states.add(c.state)
return states
return set(cfg.state for cfg in self.configs)
def getPredicates(self):
preds = list()
for c in self.configs:
if c.semanticContext!=SemanticContext.NONE:
preds.append(c.semanticContext)
return preds
return [cfg.semanticContext for cfg in self.configs if cfg.semanticContext!=SemanticContext.NONE]
def get(self, i):
return self.configs[i]
@ -181,10 +175,7 @@ class ATNConfigSet(object):
return self.hashConfigs()
def hashConfigs(self):
h = 0
for cfg in self.configs:
h = hash((h, cfg))
return h
return reduce(lambda h, cfg: hash((h, cfg)), self.configs, 0)
def __len__(self):
return len(self.configs)

View File

@ -141,10 +141,7 @@ class ATNState(object):
return self.stateNumber
def __eq__(self, other):
if isinstance(other, ATNState):
return self.stateNumber==other.stateNumber
else:
return False
return isinstance(other, ATNState) and self.stateNumber==other.stateNumber
def onlyHasEpsilonTransitions(self):
return self.epsilonOnlyTransitions

View File

@ -130,7 +130,7 @@ class LexerATNSimulator(ATNSimulator):
def matchATN(self, input):
startState = self.atn.modeToStartState[self.mode]
if self.debug:
if LexerATNSimulator.debug:
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
old_mode = self.mode
@ -144,13 +144,13 @@ class LexerATNSimulator(ATNSimulator):
predict = self.execATN(input, next)
if self.debug:
if LexerATNSimulator.debug:
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
return predict
def execATN(self, input, ds0):
if self.debug:
if LexerATNSimulator.debug:
print("start state closure=" + str(ds0.configs))
if ds0.isAcceptState:
@ -161,8 +161,8 @@ class LexerATNSimulator(ATNSimulator):
s = ds0 # s is current/from DFA state
while True: # while more work
if self.debug:
print("execATN loop starting closure: %s\n", s.configs)
if LexerATNSimulator.debug:
print("execATN loop starting closure:", str(s.configs))
# As we move src->trg, src->trg, we keep track of the previous trg to
# avoid looking up the DFA state again, which is expensive.
@ -223,8 +223,8 @@ class LexerATNSimulator(ATNSimulator):
return None
target = s.edges[t - self.MIN_DFA_EDGE]
if self.debug and target is not None:
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
if LexerATNSimulator.debug and target is not None:
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
return target
@ -280,8 +280,8 @@ class LexerATNSimulator(ATNSimulator):
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
continue
if self.debug:
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
if LexerATNSimulator.debug:
print("testing", self.getTokenName(t), "at", str(cfg))
for trans in cfg.state.transitions: # for each transition
target = self.getReachableTarget(trans, t)
@ -298,8 +298,8 @@ class LexerATNSimulator(ATNSimulator):
skipAlt = cfg.alt
def accept(self, input, lexerActionExecutor, startIndex, index, line, charPos):
if self.debug:
print("ACTION %s\n", lexerActionExecutor)
if LexerATNSimulator.debug:
print("ACTION", lexerActionExecutor)
# seek to after last char in token
input.seek(index)
@ -334,15 +334,15 @@ class LexerATNSimulator(ATNSimulator):
# {@code false}.
def closure(self, input, config, configs, currentAltReachedAcceptState,
speculative, treatEofAsEpsilon):
if self.debug:
print("closure("+config.toString(self.recog, True)+")")
if LexerATNSimulator.debug:
print("closure(" + str(config) + ")")
if isinstance( config.state, RuleStopState ):
if self.debug:
if LexerATNSimulator.debug:
if self.recog is not None:
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
else:
print("closure at rule stop %s\n", config)
print("closure at rule stop", str(config))
if config.context is None or config.context.hasEmptyPath():
if config.context is None or config.context.isEmpty():
@ -404,7 +404,7 @@ class LexerATNSimulator(ATNSimulator):
# states reached by traversing predicates. Since this is when we
# test them, we cannot cash the DFA state target of ID.
if self.debug:
if LexerATNSimulator.debug:
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
configs.hasSemanticContext = True
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
@ -516,7 +516,7 @@ class LexerATNSimulator(ATNSimulator):
# Only track edges within the DFA bounds
return to
if self.debug:
if LexerATNSimulator.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
if from_.edges is None:
@ -535,11 +535,7 @@ class LexerATNSimulator(ATNSimulator):
def addDFAState(self, configs):
proposed = DFAState(configs=configs)
firstConfigWithRuleStopState = None
for c in configs:
if isinstance(c.state, RuleStopState):
firstConfigWithRuleStopState = c
break
firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None)
if firstConfigWithRuleStopState is not None:
proposed.isAcceptState = True

View File

@ -308,7 +308,7 @@ class ParserATNSimulator(ATNSimulator):
pass
def adaptivePredict(self, input, decision, outerContext):
if self.debug or self.debug_list_atn_decisions:
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("adaptivePredict decision " + str(decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" +
@ -336,10 +336,10 @@ class ParserATNSimulator(ATNSimulator):
if s0 is None:
if outerContext is None:
outerContext = ParserRuleContext.EMPTY
if self.debug or self.debug_list_atn_decisions:
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("predictATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
", outerContext=" + outerContext.toString(self.parser))
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
# If this is not a precedence DFA, we check the ATN start state
# to determine if this ATN start state is the decision for the
@ -368,8 +368,8 @@ class ParserATNSimulator(ATNSimulator):
dfa.s0 = s0
alt = self.execATN(dfa, s0, input, index, outerContext)
if self.debug:
print("DFA after predictATN: " + dfa.toString(self.parser.tokenNames))
if ParserATNSimulator.debug:
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
return alt
finally:
self._dfa = None
@ -408,14 +408,14 @@ class ParserATNSimulator(ATNSimulator):
# conflict + preds
#
def execATN(self, dfa, s0, input, startIndex, outerContext ):
if self.debug or self.debug_list_atn_decisions:
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
previousD = s0
if self.debug:
if ParserATNSimulator.debug:
print("s0 = " + str(s0))
t = input.LA(1)
@ -445,7 +445,7 @@ class ParserATNSimulator(ATNSimulator):
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
conflictingAlts = None
if D.predicates is not None:
if self.debug:
if ParserATNSimulator.debug:
print("DFA state has preds in DFA sim LL failover")
conflictIndex = input.index
if conflictIndex != startIndex:
@ -453,7 +453,7 @@ class ParserATNSimulator(ATNSimulator):
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
if len(conflictingAlts)==1:
if self.debug:
if ParserATNSimulator.debug:
print("Full LL avoided")
return min(conflictingAlts)
@ -462,7 +462,7 @@ class ParserATNSimulator(ATNSimulator):
# context occurs with the index at the correct spot
input.seek(conflictIndex)
if self.dfa_debug:
if ParserATNSimulator.dfa_debug:
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
fullCtx = True
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
@ -534,7 +534,7 @@ class ParserATNSimulator(ATNSimulator):
predictedAlt = self.getUniqueAlt(reach)
if self.debug:
if ParserATNSimulator.debug:
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
@ -586,8 +586,8 @@ class ParserATNSimulator(ATNSimulator):
input,
startIndex,
outerContext):
if self.debug or self.debug_list_atn_decisions:
print("execATNWithFullContext "+s0)
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATNWithFullContext", str(s0))
fullCtx = True
foundExactAmbig = False
reach = None
@ -616,7 +616,7 @@ class ParserATNSimulator(ATNSimulator):
raise e
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
if self.debug:
if ParserATNSimulator.debug:
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
@ -685,7 +685,7 @@ class ParserATNSimulator(ATNSimulator):
return predictedAlt
def computeReachSet(self, closure, t, fullCtx):
if self.debug:
if ParserATNSimulator.debug:
print("in computeReachSet, starting closure: " + str(closure))
if self.mergeCache is None:
@ -707,7 +707,7 @@ class ParserATNSimulator(ATNSimulator):
# First figure out where we can reach on input t
for c in closure:
if self.debug:
if ParserATNSimulator.debug:
print("testing " + self.getTokenName(t) + " at " + str(c))
if isinstance(c.state, RuleStopState):
@ -967,7 +967,7 @@ class ParserATNSimulator(ATNSimulator):
# nonambig alts are null in altToPred
if nPredAlts==0:
altToPred = None
if self.debug:
if ParserATNSimulator.debug:
print("getPredsForAmbigAlts result " + str_list(altToPred))
return altToPred
@ -1093,11 +1093,11 @@ class ParserATNSimulator(ATNSimulator):
break
continue
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
if self.debug or self.dfa_debug:
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
if predicateEvaluationResult:
if self.debug or self.dfa_debug:
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("PREDICT " + str(pair.alt))
predictions.add(pair.alt)
if not complete:
@ -1119,8 +1119,8 @@ class ParserATNSimulator(ATNSimulator):
def closureCheckingStopState(self, config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon):
if self.debug:
print("closure(" + config.toString(self.parser,True) + ")")
if ParserATNSimulator.debug:
print("closure(" + str(config) + ")")
if isinstance(config.state, RuleStopState):
# We hit rule end. If we have context info, use it
@ -1134,7 +1134,7 @@ class ParserATNSimulator(ATNSimulator):
continue
else:
# we have no context info, just chase follow links (if greedy)
if self.debug:
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates,
fullCtx, depth, treatEofAsEpsilon)
@ -1154,7 +1154,7 @@ class ParserATNSimulator(ATNSimulator):
return
else:
# else if we have no context info, just chase follow links (if greedy)
if self.debug:
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
@ -1196,7 +1196,7 @@ class ParserATNSimulator(ATNSimulator):
c.reachesIntoOuterContext += 1
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
newDepth -= 1
if self.debug:
if ParserATNSimulator.debug:
print("dips into outer ctx: " + str(c))
elif isinstance(t, RuleTransition):
# latch when newDepth goes negative - once we step out of the entry context we can't return
@ -1237,12 +1237,12 @@ class ParserATNSimulator(ATNSimulator):
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
def actionTransition(self, config, t):
if self.debug:
if ParserATNSimulator.debug:
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
return ATNConfig(state=t.target, config=config)
def precedenceTransition(self, config, pt, collectPredicates, inContext, fullCtx):
if self.debug:
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
str(pt.precedence) + ">=_p, ctx dependent=true")
if self.parser is not None:
@ -1267,12 +1267,12 @@ class ParserATNSimulator(ATNSimulator):
else:
c = ATNConfig(state=pt.target, config=config)
if self.debug:
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def predTransition(self, config, pt, collectPredicates, inContext, fullCtx):
if self.debug:
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
if self.parser is not None:
@ -1297,12 +1297,12 @@ class ParserATNSimulator(ATNSimulator):
else:
c = ATNConfig(state=pt.target, config=config)
if self.debug:
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def ruleTransition(self, config, t):
if self.debug:
if ParserATNSimulator.debug:
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
returnState = t.followState
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
@ -1360,13 +1360,12 @@ class ParserATNSimulator(ATNSimulator):
def getTokenName(self, t):
if t==Token.EOF:
return u"EOF"
if self.parser is not None and self.parser.tokenNames is not None:
if t >= len(self.parser.tokenNames):
print(str(t) + " ttype out of range: " + str_list(self.parser.tokenNames))
print(str_list(self.parser.getInputStream().getTokens()))
else:
return self.parser.tokensNames[t] + u"<" + unicode(t) + ">"
return unicode(t)
if self.parser is not None and \
self.parser.literalNames is not None and \
t < len(self.parser.literalNames):
return self.parser.literalNames[t] + u"<" + unicode(t) + ">"
else:
return unicode(t)
def getLookaheadName(self, input):
return self.getTokenName(input.LA(1))
@ -1421,7 +1420,7 @@ class ParserATNSimulator(ATNSimulator):
# on {@code to}
#
def addDFAEdge(self, dfa, from_, t, to):
if self.debug:
if ParserATNSimulator.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
if to is None:
@ -1435,8 +1434,8 @@ class ParserATNSimulator(ATNSimulator):
from_.edges = [None] * (self.atn.maxTokenType + 2)
from_.edges[t+1] = to # connect
if self.debug:
names = None if self.parser is None else self.parser.tokenNames
if ParserATNSimulator.debug:
names = None if self.parser is None else self.parser.literalNames
print("DFA=\n" + dfa.toString(names))
return to
@ -1470,12 +1469,12 @@ class ParserATNSimulator(ATNSimulator):
D.configs.optimizeConfigs(self)
D.configs.setReadonly(True)
dfa.states[D] = D
if self.debug:
if ParserATNSimulator.debug:
print("adding new DFA state: " + str(D))
return D
def reportAttemptingFullContext(self, dfa, conflictingAlts, configs, startIndex, stopIndex):
if self.debug or self.retry_debug:
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
interval = range(startIndex, stopIndex + 1)
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(interval))
@ -1483,7 +1482,7 @@ class ParserATNSimulator(ATNSimulator):
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
def reportContextSensitivity(self, dfa, prediction, configs, startIndex, stopIndex):
if self.debug or self.retry_debug:
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
interval = range(startIndex, stopIndex + 1)
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(interval))
@ -1493,7 +1492,7 @@ class ParserATNSimulator(ATNSimulator):
# If context sensitive parsing, we know it's ambiguity not conflict#
def reportAmbiguity(self, dfa, D, startIndex, stopIndex,
exact, ambigAlts, configs ):
if self.debug or self.retry_debug:
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
# int i = 1;
# for (Transition t : dfa.atnStartState.transitions) {

View File

@ -232,10 +232,7 @@ class PredictionMode(object):
# {@link RuleStopState}, otherwise {@code false}
@classmethod
def hasConfigInRuleStopState(cls, configs):
for c in configs:
if isinstance(c.state, RuleStopState):
return True
return False
return any(isinstance(cfg.state, RuleStopState) for cfg in configs)
# Checks if all configurations in {@code configs} are in a
# {@link RuleStopState}. Configurations meeting this condition have reached
@ -247,10 +244,7 @@ class PredictionMode(object):
# {@link RuleStopState}, otherwise {@code false}
@classmethod
def allConfigsInRuleStopStates(cls, configs):
for config in configs:
if not isinstance(config.state, RuleStopState):
return False
return True
return all(isinstance(cfg.state, RuleStopState) for cfg in configs)
#
# Full LL prediction termination.
@ -419,10 +413,7 @@ class PredictionMode(object):
#
@classmethod
def hasNonConflictingAltSet(cls, altsets):
for alts in altsets:
if len(alts)==1:
return True
return False
return any(len(alts) == 1 for alts in altsets)
#
# Determines if any single alternative subset in {@code altsets} contains
@ -434,10 +425,7 @@ class PredictionMode(object):
#
@classmethod
def hasConflictingAltSet(cls, altsets):
for alts in altsets:
if len(alts)>1:
return True
return False
return any(len(alts) > 1 for alts in altsets)
#
# Determines if every alternative subset in {@code altsets} is equivalent.
@ -448,13 +436,9 @@ class PredictionMode(object):
#
@classmethod
def allSubsetsEqual(cls, altsets):
first = None
for alts in altsets:
if first is None:
first = alts
elif not alts==first:
return False
return True
if not altsets:
return True
return all(alts == altsets[0] for alts in altsets[1:])
#
# Returns the unique alternative predicted by all alternative subsets in
@ -467,9 +451,8 @@ class PredictionMode(object):
def getUniqueAlt(cls, altsets):
all = cls.getAlts(altsets)
if len(all)==1:
return all[0]
else:
return ATN.INVALID_ALT_NUMBER
return all.pop()
return ATN.INVALID_ALT_NUMBER
# Gets the complete set of represented alternatives for a collection of
# alternative subsets. This method returns the union of each {@link BitSet}
@ -480,10 +463,7 @@ class PredictionMode(object):
#
@classmethod
def getAlts(cls, altsets):
all = set()
for alts in altsets:
all = all | alts
return all
return set.union(*altsets)
#
# This function gets the conflicting alt subsets from a configuration set.
@ -527,11 +507,7 @@ class PredictionMode(object):
@classmethod
def hasStateAssociatedWithOneAlt(cls, configs):
x = cls.getStateToAltMap(configs)
for alts in x.values():
if len(alts)==1:
return True
return False
return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values())
@classmethod
def getSingleViableAlt(cls, altsets):

View File

@ -115,14 +115,7 @@ def orContext(a, b):
return result
def filterPrecedencePredicates(collection):
result = []
for context in collection:
if isinstance(context, PrecedencePredicate):
if result is None:
result = []
result.append(context)
return result
return [context for context in collection if isinstance(context, PrecedencePredicate)]
class Predicate(SemanticContext):
@ -187,13 +180,11 @@ class AND(SemanticContext):
def __init__(self, a, b):
operands = set()
if isinstance( a, AND):
for o in a.opnds:
operands.add(o)
operands.update(a.opnds)
else:
operands.add(a)
if isinstance( b, AND):
for o in b.opnds:
operands.add(o)
operands.update(b.opnds)
else:
operands.add(b)
@ -203,7 +194,7 @@ class AND(SemanticContext):
reduced = min(precedencePredicates)
operands.add(reduced)
self.opnds = [ o for o in operands ]
self.opnds = list(operands)
def __eq__(self, other):
if self is other:
@ -227,10 +218,7 @@ class AND(SemanticContext):
# unordered.</p>
#
def eval(self, parser, outerContext):
for opnd in self.opnds:
if not opnd.eval(parser, outerContext):
return False
return True
return all(opnd.eval(parser, outerContext) for opnd in self.opnds)
def evalPrecedence(self, parser, outerContext):
differs = False
@ -277,13 +265,11 @@ class OR (SemanticContext):
def __init__(self, a, b):
operands = set()
if isinstance( a, OR):
for o in a.opnds:
operands.add(o)
operands.update(a.opnds)
else:
operands.add(a)
if isinstance( b, OR):
for o in b.opnds:
operands.add(o)
operands.update(b.opnds)
else:
operands.add(b)
@ -291,10 +277,10 @@ class OR (SemanticContext):
if len(precedencePredicates)>0:
# interested in the transition with the highest precedence
s = sorted(precedencePredicates)
reduced = s[len(s)-1]
reduced = s[-1]
operands.add(reduced)
self.opnds = [ o for o in operands ]
self.opnds = list(operands)
def __eq__(self, other):
if self is other:
@ -315,10 +301,7 @@ class OR (SemanticContext):
# unordered.</p>
#
def eval(self, parser, outerContext):
for opnd in self.opnds:
if opnd.eval(parser, outerContext):
return True
return False
return any(opnd.eval(parser, outerContext) for opnd in self.opnds)
def evalPrecedence(self, parser, outerContext):
differs = False

View File

@ -105,14 +105,9 @@ class DFAState(object):
# Get the set of all alts mentioned by all ATN configurations in this
# DFA state.
def getAltSet(self):
alts = set()
if self.configs is not None:
for c in self.configs:
alts.add(c.alt)
if len(alts)==0:
return None
else:
return alts
return set(cfg.alt for cfg in self.configs) or None
return None
def __hash__(self):
return hash(self.configs)

Some files were not shown because too many files have changed in this diff Show More