Merge branch 'master' into patch-1
This commit is contained in:
commit
1bbb69ffc7
|
@ -3,7 +3,7 @@ target/
|
|||
# ... but not code generation targets
|
||||
!tool/src/org/antlr/v4/codegen/target/
|
||||
|
||||
# Node.js (npm and typings) cached dependencies
|
||||
# Node.js (npm and typings) cached dependencies
|
||||
node_modules/
|
||||
typings/
|
||||
|
||||
|
@ -98,3 +98,6 @@ xcuserdata
|
|||
javac-services.0.log
|
||||
javac-services.0.log.lck
|
||||
test/
|
||||
|
||||
# Don't ignore python tests
|
||||
!runtime/Python3/test/
|
||||
|
|
|
@ -152,6 +152,10 @@ matrix:
|
|||
jdk: openjdk8
|
||||
env: TARGET=csharp
|
||||
stage: main-test
|
||||
- os: linux
|
||||
jdk: openjdk8
|
||||
env: TARGET=dart
|
||||
stage: main-test
|
||||
- os: linux
|
||||
language: php
|
||||
php:
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
wget https://storage.googleapis.com/dart-archive/channels/stable/release/2.8.4/linux_packages/dart_2.8.4-1_amd64.deb
|
||||
sudo dpkg -i ./dart_2.8.4-1_amd64.deb
|
||||
sudo rm ./dart_2.8.4-1_amd64.deb
|
||||
sudo apt-get install -f
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
mvn -q -Dparallel=classes -DthreadCount=4 -Dtest=dart.* test
|
|
@ -23,6 +23,7 @@ ANTLR project lead and supreme dictator for life
|
|||
* [Ewan Mellor](https://github.com/ewanmellor), [Hanzhou Shi](https://github.com/hanjoes) (Swift target merging)
|
||||
* [Ben Hamilton](https://github.com/bhamiltoncx) (Full Unicode support in serialized ATN and all languages' runtimes for code points > U+FFFF)
|
||||
* [Marcos Passos](https://github.com/marcospassos) (PHP target)
|
||||
* [Lingyu Li](https://github.com/lingyv-li) (Dart target)
|
||||
|
||||
## Useful information
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ install:
|
|||
- git submodule update --init --recursive
|
||||
- cinst -y php --params "/InstallDir:C:\tools\php"
|
||||
- cinst -y composer
|
||||
- cinst -y dart-sdk --version=2.8.4
|
||||
build_script:
|
||||
- mvn -DskipTests install --batch-mode
|
||||
- msbuild /target:restore /target:rebuild /property:Configuration=Release /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln
|
||||
|
@ -15,7 +16,7 @@ build_script:
|
|||
after_build:
|
||||
- msbuild /target:pack /property:Configuration=Release /verbosity:detailed runtime/CSharp/runtime/CSharp/Antlr4.dotnet.sln
|
||||
test_script:
|
||||
- mvn install -Dantlr-php-php="C:\tools\php\php.exe" -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode
|
||||
- mvn install -Dantlr-php-php="C:\tools\php\php.exe" -Dantlr-dart-dart="C:\tools\dart-sdk\bin\dart.exe" -Dantlr-dart-pub="C:\tools\dart-sdk\bin\pub.bat" -Dantlr-dart-dart2native="C:\tools\dart-sdk\bin\dart2native.bat" -Dantlr-python2-python="C:\Python27\python.exe" -Dantlr-python3-python="C:\Python35\python.exe" -Dantlr-javascript-nodejs="C:\Program Files (x86)\nodejs\node.exe" --batch-mode
|
||||
artifacts:
|
||||
- path: 'runtime\**\*.nupkg'
|
||||
name: NuGet
|
|
@ -1,5 +1,9 @@
|
|||
ANTLR Project Contributors Certification of Origin and Rights
|
||||
|
||||
NOTE: This tool is mature and Terence is mostly occupied elsewhere. We
|
||||
can't accept any changes that could have widespread effects on thousands
|
||||
of existing projects. Sorry!
|
||||
|
||||
All contributors to ANTLR v4 must formally agree to abide by this
|
||||
certificate of origin by signing on the bottom with their github
|
||||
userid, full name, email address (you can obscure your e-mail, but it
|
||||
|
@ -204,6 +208,7 @@ YYYY/MM/DD, github id, Full name, email
|
|||
2018/07/03, jgoppert, James Goppert, james.goppert@gmail.com
|
||||
2018/07/27, Maksim Novikov, mnovikov.work@gmail.com
|
||||
2018/08/03, ENDOH takanao, djmchl@gmail.com
|
||||
2018/10/08, xsIceman, Andreas Skaar, andreas.skaar@gmail.com
|
||||
2018/10/18, edirgarcia, Edir García Lazo, edirgl@hotmail.com
|
||||
2018/07/31, Lucas Henrqiue, lucashenrique580@gmail.com
|
||||
2018/08/03, ENDOH takanao, djmchl@gmail.com
|
||||
|
@ -237,6 +242,7 @@ YYYY/MM/DD, github id, Full name, email
|
|||
2019/11/11, foxeverl, Liu Xinfeng, liuxf1986[at]gmail[dot]com
|
||||
2019/11/17, felixn, Felix Nieuwenhuizhen, felix@tdlrali.com
|
||||
2019/11/18, mlilback, Mark Lilback, mark@lilback.com
|
||||
2020/01/19, lingyv-li, Lingyu Li, lingyv.li@gmail.com
|
||||
2020/02/02, carocad, Camilo Roca, carocad@unal.edu.co
|
||||
2020/02/10, julibert, Julián Bermúdez Ortega, julibert.dev@gmail.com
|
||||
2020/02/21, StochasticTinkr, Daniel Pitts, github@coloraura.com
|
||||
|
@ -244,3 +250,15 @@ YYYY/MM/DD, github id, Full name, email
|
|||
2020/04/07, deniskyashif, Denis Kyashif, denis.kyashif@gmail.com
|
||||
2020/04/30, TristonianJones, Tristan Swadell, tswadell@google.com
|
||||
2020/05/06, iammosespaulr, Moses Paul R, iammosespaulr@gmail.com
|
||||
2020/06/04, sigmasoldi3r, Pablo Blanco, pablobc.1995@gmail.com
|
||||
2020/05/25, graknol, Sindre van der Linden, graknol@gmail.com
|
||||
2020/05/31, d-markey, David Markey, dmarkey@free.fr
|
||||
2020/06/02, cohomology, Kilian Kilger, kkilger AT gmail.com
|
||||
2020/06/04, IohannRabeson, Iohann Rabeson, iotaka6@gmail.com
|
||||
2020/07/01, sha-N, Shan M Mathews, admin@bluestarqatar.com
|
||||
2020/08/22, stevenjohnstone, Steven Johnstone, steven.james.johnstone@gmail.com
|
||||
2020/09/06, ArthurSonzogni, Sonzogni Arthur, arthursonzogni@gmail.com
|
||||
2020/09/12, Clcanny, Charles Ruan, a837940593@gmail.com
|
||||
2020/09/15, rmcgregor1990, Robert McGregor, rmcgregor1990@gmail.com
|
||||
2020/09/16, trenki2, Markus Trenkwalder, trenki2[at]gmx[dot]net
|
||||
2020/10/08, Marti2203, Martin Mirchev, mirchevmartin2203@gmail.com
|
|
@ -1,6 +1,6 @@
|
|||
# C++
|
||||
|
||||
The C++ target supports all platforms that can either run MS Visual Studio 2013 (or newer), XCode 7 (or newer) or CMake (C++11 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](http://blorente.me//Antlr-,-C++-and-CMake-Wait-what.html).
|
||||
The C++ target supports all platforms that can either run MS Visual Studio 2013 (or newer), XCode 7 (or newer) or CMake (C++11 required). All build tools can either create static or dynamic libraries, both as 64bit or 32bit arch. Additionally, XCode can create an iOS library. Also see [Antlr4 for C++ with CMake: A practical example](https://beyondtheloop.dev/Antlr-cpp-cmake/).
|
||||
|
||||
## How to create a C++ lexer or parser?
|
||||
This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example:
|
||||
|
@ -65,7 +65,7 @@ int main(int argc, const char* argv[]) {
|
|||
|
||||
tree::ParseTree *tree = parser.key();
|
||||
TreeShapeListener listener;
|
||||
tree::ParseTreeWalker::DEFAULT->walk(&listener, tree);
|
||||
tree::ParseTreeWalker::DEFAULT.walk(&listener, tree);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ Creating a new target involves the following key elements:
|
|||
1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit.
|
||||
1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below.
|
||||
1. Create a template file for runtime tests. All you have to do is provide a few templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. It needs to know how to define various class fields, compare members and so on. You must create a *X*.test.stg file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime). Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit.
|
||||
1. Create test files under [/runtime-testsuite/test/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime). They will load defined test cases in each test descriptor. Also add the `/runtime-testsuite/test/org/antlr/v4/test/runtime/X/BaseXTest.java` which defines how test cases will execute and output.
|
||||
1. Create/edit shell scripts in [/.travis](https://github.com/antlr/antlr4/blob/master/.travis) and [/appveyor.yml](https://github.com/antlr/antlr4/blob/master/appveyor.yml) to run tests in CI pipelines.
|
||||
|
||||
## Getting started
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ In order to execute this listener, you would simply add the following lines to t
|
|||
...
|
||||
IParseTree tree = parser.StartRule() - only repeated here for reference
|
||||
KeyPrinter printer = new KeyPrinter();
|
||||
ParseTreeWalker.DEFAULT.walk(printer, tree);
|
||||
ParseTreeWalker.Default.Walk(printer, tree);
|
||||
```
|
||||
|
||||
Further information can be found from The Definitive ANTLR Reference book.
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
# ANTLR4 Runtime for Dart
|
||||
|
||||
Notice: Dart target may generate code incompatible with Dart 2.9 sound null safety. Please set the minimum SDK constraint to 2.8.4 or lower if such violation is found. Contributions are welcomed.
|
||||
|
||||
### First steps
|
||||
|
||||
#### 1. Install ANTLR4
|
||||
|
||||
[The getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md)
|
||||
should get you started.
|
||||
|
||||
#### 2. Install the Dart ANTLR runtime
|
||||
|
||||
Each target language for ANTLR has a runtime package for running parser
|
||||
generated by ANTLR4. The runtime provides a common set of tools for using your parser.
|
||||
|
||||
Install the runtime with the same version as the main ANTLR tool:
|
||||
|
||||
Add this to your package's pubspec.yaml file:
|
||||
```yaml
|
||||
...
|
||||
dependencies:
|
||||
antlr4: <ANTLR version>
|
||||
...
|
||||
```
|
||||
|
||||
#### 3. Generate your parser
|
||||
|
||||
You use the ANTLR4 "tool" to generate a parser. These will reference the ANTLR
|
||||
runtime, installed above.
|
||||
|
||||
Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool
|
||||
as described in [the getting started guide](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md).
|
||||
To generate your Dart parser, run the following command:
|
||||
|
||||
```shell script
|
||||
antlr4 -Dlanguage=Dart MyGrammar.g4
|
||||
```
|
||||
|
||||
For a full list of antlr4 tool options, please visit the
|
||||
[tool documentation page](https://github.com/antlr/antlr4/blob/master/doc/tool-options.md).
|
||||
|
||||
### Complete example
|
||||
|
||||
Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json.
|
||||
|
||||
Then, invoke `antlr4 -Dlanguage=Dart JSON.g4`. The result of this is a
|
||||
collection of `.dart` including:
|
||||
|
||||
* JsonLexer.dart
|
||||
* JsonParser.dart
|
||||
* JsonBaseListener.dart
|
||||
* JsonListener.dart (if you have not activated the -no-listener option)
|
||||
* JsonVisitor.dart (if you have activated the -visitor option)
|
||||
|
||||
We'll write a small main func to call the generated parser/lexer
|
||||
(assuming they are separate). This one writes out the encountered
|
||||
`ParseTreeContext`'s:
|
||||
|
||||
```dart
|
||||
import 'package:antlr4/antlr4.dart';
|
||||
import 'package:my_project/JSONParser.dart';
|
||||
import 'package:my_project/JSONLexer.dart';
|
||||
|
||||
class TreeShapeListener implements ParseTreeListener {
|
||||
@override
|
||||
void enterEveryRule(ParserRuleContext ctx) {
|
||||
print(ctx.text);
|
||||
}
|
||||
|
||||
@override
|
||||
void exitEveryRule(ParserRuleContext node) {
|
||||
}
|
||||
|
||||
@override
|
||||
void visitErrorNode(ErrorNode node) {
|
||||
}
|
||||
|
||||
@override
|
||||
void visitTerminal(TerminalNode node) {
|
||||
}
|
||||
}
|
||||
|
||||
void main(List<String> args) async {
|
||||
JSONLexer.checkVersion();
|
||||
JSONParser.checkVersion();
|
||||
final input = await InputStream.fromPath(args[0]);
|
||||
final lexer = JSONLexer(input);
|
||||
final tokens = CommonTokenStream(lexer);
|
||||
final parser = JSONParser(tokens);
|
||||
parser.addErrorListener(DiagnosticErrorListener());
|
||||
parser.buildParseTree = true;
|
||||
final tree = parser.json();
|
||||
ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree);
|
||||
}
|
||||
```
|
||||
|
||||
Create a `example.json` file:
|
||||
```json
|
||||
{"a":1}
|
||||
```
|
||||
|
||||
Parse the input file:
|
||||
|
||||
```shell script
|
||||
dart bin/main.dart example.json
|
||||
```
|
||||
|
||||
The expected output is:
|
||||
|
||||
```
|
||||
{"a":1}
|
||||
{"a":1}
|
||||
{"a":1}
|
||||
"a":1
|
||||
1
|
||||
```
|
|
@ -96,7 +96,7 @@ The recognizers that ANTLR generates assume a character vocabulary containing al
|
|||
|
||||
## Actions
|
||||
|
||||
Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with thelanguage option.
|
||||
Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with the language option.
|
||||
|
||||
Embedded code can appear in: `@header` and `@members` named actions, parser and lexer rules, exception catching specifications, attribute sections for parser rules (return values, arguments, and locals), and some rule element options (currently predicates).
|
||||
|
||||
|
|
|
@ -65,6 +65,8 @@ Edit the repository looking for 4.5 or whatever and update it. Bump version in t
|
|||
* runtime/Cpp/demo/generate.cmd
|
||||
* runtime/Go/antlr/recognizer.go
|
||||
* runtime/Swift/Antlr4/org/antlr/v4/runtime/RuntimeMetaData.swift
|
||||
* runtime/Dart/lib/src/runtime_meta_data.dart
|
||||
* runtime/Dart/pubspec.yaml
|
||||
* tool/src/org/antlr/v4/codegen/target/GoTarget.java
|
||||
* tool/src/org/antlr/v4/codegen/target/CppTarget.java
|
||||
* tool/src/org/antlr/v4/codegen/target/CSharpTarget.java
|
||||
|
@ -442,6 +444,19 @@ git push origin gh-pages
|
|||
popd
|
||||
```
|
||||
|
||||
### Dart
|
||||
|
||||
Push to pub.dev
|
||||
|
||||
```bash
|
||||
cd runtime/Dart
|
||||
pub publish
|
||||
```
|
||||
|
||||
It will warn that no change log found for the new version.
|
||||
If there are changes relevant to dart in this release, edit [CHANGELOG.md](https://github.com/antlr/antlr4/blob/master/runtime/Dart/CHANGELOG.md) to describe the changes.
|
||||
Otherwise enter `N` to ignore the warning.
|
||||
|
||||
## Update javadoc for runtime and tool
|
||||
|
||||
First, gen javadoc:
|
||||
|
|
|
@ -10,12 +10,13 @@ This page lists the available and upcoming ANTLR runtimes. Please note that you
|
|||
* [C++](cpp-target.md)
|
||||
* [Swift](swift-target.md)
|
||||
* [PHP](php-target.md)
|
||||
* [Dart](dart-target.md)
|
||||
|
||||
## Target feature parity
|
||||
|
||||
New features generally appear in the Java target and then migrate to the other targets, but these other targets don't always get updated in the same overall tool release. This section tries to identify features added to Java that have not been added to the other targets.
|
||||
|
||||
|Feature|Java|C♯|Python2|Python3|JavaScript|Go|C++|Swift|PHP
|
||||
|---|---|---|---|---|---|---|---|---|---|
|
||||
|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|-|-|
|
||||
|Feature|Java|C♯|Python2|Python3|JavaScript|Go|C++|Swift|PHP|Dart
|
||||
|---|---|---|---|---|---|---|---|---|---|---|
|
||||
|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|-|-|-|
|
||||
|
||||
|
|
|
@ -103,6 +103,7 @@
|
|||
<include>**/python2/Test*.java</include>
|
||||
<include>**/python3/Test*.java</include>
|
||||
<include>**/php/Test*.java</include>
|
||||
<include>**/dart/Test*.java</include>
|
||||
<include>${antlr.tests.swift}</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
|
|
|
@ -0,0 +1,318 @@
|
|||
writeln(s) ::= <<print(<s>);>>
|
||||
write(s) ::= <<stdout.write(<s>);>>
|
||||
writeList(s) ::= <<print(<s; separator="+">);>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= <<assert(<s>);>>
|
||||
|
||||
Cast(t,v) ::= "(<v> as <t>)"
|
||||
|
||||
Append(a,b) ::= "<a>.toString() + <b>.toString()"
|
||||
|
||||
AppendStr(a,b) ::= <%<Append(a,b)>%>
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
AssertIsList(v) ::= "assert (<v> is List);" // just use static type system
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%int <n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%bool <n> = <v>;%>
|
||||
|
||||
InitIntVar(n,v) ::= <%<InitIntMember(n,v)>%>
|
||||
|
||||
IntArg(n) ::= "int <n>"
|
||||
|
||||
VarRef(n) ::= "<n>"
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> == <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> == <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "List\<String>"
|
||||
|
||||
BuildParseTrees() ::= "buildParseTree = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%errorHandler = new BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(parser: this)%>
|
||||
|
||||
Column() ::= "this.charPositionInLine"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>==<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text == "<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "tokenStream.text"
|
||||
|
||||
LTEquals(i, v) ::= <%tokenStream.LT(<i>).text == <v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%tokenStream.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this.tokenStartCharPositionInLine==<i>%>
|
||||
|
||||
ImportListener(X) ::= ""
|
||||
|
||||
GetExpectedTokenNames() ::= "this.expectedTokens.toString(vocabulary: this.vocabulary)"
|
||||
|
||||
RuleInvocationStack() ::= "ruleInvocationStack"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<interpreter.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserToken(parser, token) ::= <%<parser>.TOKEN_<token>%>
|
||||
|
||||
Production(p) ::= <%<p>%>
|
||||
|
||||
Result(r) ::= <%<r>%>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
bool Property() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
ParserPropertyCall(p, call) ::= "<p>.<call>"
|
||||
|
||||
PositionAdjustingLexerDef() ::= <<
|
||||
class PositionAdjustingLexerATNSimulator extends LexerATNSimulator {
|
||||
PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn,
|
||||
List\<DFA> decisionToDFA, PredictionContextCache sharedContextCache)
|
||||
: super(atn, decisionToDFA, sharedContextCache, recog: recog);
|
||||
|
||||
void resetAcceptPosition(CharStream input, int index, int line,
|
||||
int charPositionInLine) {
|
||||
input.seek(index);
|
||||
this.line = line;
|
||||
this.charPositionInLine = charPositionInLine;
|
||||
consume(input);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
@override
|
||||
Token nextToken() {
|
||||
if (!(super.interpreter is PositionAdjustingLexerATNSimulator)) {
|
||||
interpreter = new PositionAdjustingLexerATNSimulator(
|
||||
this, _ATN, _decisionToDFA, _sharedContextCache);
|
||||
}
|
||||
|
||||
return super.nextToken();
|
||||
}
|
||||
|
||||
@override
|
||||
Token emit() {
|
||||
switch (type) {
|
||||
case TOKEN_TOKENS:
|
||||
handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
|
||||
case TOKEN_LABEL:
|
||||
handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return super.emit();
|
||||
}
|
||||
|
||||
bool handleAcceptPositionForIdentifier() {
|
||||
String tokenText = text;
|
||||
int identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
isIdentifierChar(tokenText[identifierLength])) {
|
||||
identifierLength++;
|
||||
}
|
||||
|
||||
if (inputStream.index > tokenStartCharIndex + identifierLength) {
|
||||
int offset = identifierLength - 1;
|
||||
interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset,
|
||||
tokenStartLine, tokenStartCharPositionInLine + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool handleAcceptPositionForKeyword(String keyword) {
|
||||
if (inputStream.index > tokenStartCharIndex + keyword.length) {
|
||||
int offset = keyword.length - 1;
|
||||
interpreter.resetAcceptPosition(inputStream, tokenStartCharIndex + offset,
|
||||
tokenStartLine, tokenStartCharPositionInLine + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
PositionAdjustingLexerATNSimulator get interpreter {
|
||||
return super.interpreter as PositionAdjustingLexerATNSimulator;
|
||||
}
|
||||
|
||||
static bool isIdentifierChar(String c) {
|
||||
return isLetterOrDigit(c) || c == '_';
|
||||
}
|
||||
|
||||
static const ZERO = 48;
|
||||
static const LOWER_A = 97;
|
||||
static const LOWER_Z = 122;
|
||||
static const UPPER_A = 65;
|
||||
static const UPPER_Z = 90;
|
||||
|
||||
static bool isLetterOrDigit(String char) => isLetter(char) || isDigit(char);
|
||||
|
||||
// Note: this is intentially ASCII only
|
||||
static bool isLetter(String char) {
|
||||
if (char == null) return false;
|
||||
var cc = char.codeUnitAt(0);
|
||||
return cc >= LOWER_A && cc \<= LOWER_Z || cc >= UPPER_A && cc \<= UPPER_Z;
|
||||
}
|
||||
|
||||
static bool isDigit(String char) {
|
||||
if (char == null) return false;
|
||||
var cc = char.codeUnitAt(0);
|
||||
return cc >= ZERO && cc \< ZERO + 10;
|
||||
}
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
@parser::definitions {
|
||||
class LeafListener extends TBaseListener {
|
||||
void visitTerminal(TerminalNode node) {
|
||||
print(node.symbol.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
ParseTreeWalker walker = new ParseTreeWalker();
|
||||
walker.walk(new LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
@parser::definitions {
|
||||
class MyRuleNode extends ParserRuleContext {
|
||||
int altNum;
|
||||
|
||||
MyRuleNode(ParserRuleContext parent, int invokingStateNumber)
|
||||
: super(parent, invokingStateNumber);
|
||||
|
||||
@override int get altNumber {
|
||||
return altNum;
|
||||
}
|
||||
|
||||
@override void set altNumber(int altNum) {
|
||||
this.altNum = altNum;
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
@parser::definitions {
|
||||
class LeafListener extends TBaseListener {
|
||||
void exitA(AContext ctx) {
|
||||
if (ctx.childCount==2)
|
||||
stdout.write("${ctx.INT(0).symbol.text} ${ctx.INT(1).symbol.text} ${ctx.INTs()}");
|
||||
else
|
||||
print(ctx.ID().symbol);
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
@parser::definitions {
|
||||
class LeafListener extends TBaseListener {
|
||||
void exitA(AContext ctx) {
|
||||
if (ctx.childCount==2) {
|
||||
stdout.write("${ctx.b(0).start.text} ${ctx.b(1).start.text} ${ctx.bs()[0].start.text}");
|
||||
} else
|
||||
print(ctx.b(0).start.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
@parser::definitions {
|
||||
class LeafListener extends TBaseListener {
|
||||
void exitE(EContext ctx) {
|
||||
if (ctx.childCount==3) {
|
||||
stdout.write("${ctx.e(0).start.text} ${ctx.e(1).start.text} ${ctx.es()[0].start.text}\n");
|
||||
} else
|
||||
print(ctx.INT().symbol.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
@parser::definitions {
|
||||
class LeafListener extends TBaseListener {
|
||||
void exitCall(CallContext ctx) {
|
||||
stdout.write("${ctx.e().start.text} ${ctx.eList()}");
|
||||
}
|
||||
void exitInt(IntContext ctx) {
|
||||
print(ctx.INT().symbol.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
void foo() {
|
||||
SContext s = null;
|
||||
List\<AContext> a = s.as();
|
||||
List\<BContext> b = s.bs();
|
||||
}
|
||||
>>
|
||||
|
||||
Declare_foo() ::= <<
|
||||
void foo() {print("foo");}
|
||||
>>
|
||||
|
||||
Invoke_foo() ::= "foo();"
|
||||
|
||||
Declare_pred() ::= <<bool pred(bool v) {
|
||||
print("eval=\$v");
|
||||
return v;
|
||||
}
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
ParserTokenType(t) ::= "Parser.<t>"
|
||||
ContextRuleFunction(ctx, rule) ::= "<ctx>.<rule>"
|
||||
StringType() ::= "String"
|
||||
ContextMember(ctx, subctx, member) ::= "<ctx>.<subctx>.<member>"
|
|
@ -51,7 +51,8 @@ public abstract class BaseRuntimeTest {
|
|||
"CSharp",
|
||||
"Python2", "Python3",
|
||||
"PHP",
|
||||
"Node"
|
||||
"Node",
|
||||
"Dart"
|
||||
};
|
||||
|
||||
static {
|
||||
|
@ -299,6 +300,16 @@ public abstract class BaseRuntimeTest {
|
|||
}
|
||||
}
|
||||
|
||||
public static String readFile(String dir, String fileName) {
|
||||
try {
|
||||
return String.copyValueOf(Utils.readFile(dir+"/"+fileName, "UTF-8"));
|
||||
}
|
||||
catch (IOException ioe) {
|
||||
System.err.println("can't read file");
|
||||
ioe.printStackTrace(System.err);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected static void assertCorrectOutput(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate, String actualOutput) {
|
||||
String actualParseErrors = delegate.getParseErrors();
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.CompositeLexersDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestCompositeLexers extends BaseRuntimeTest {
|
||||
public TestCompositeLexers(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeLexersDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.CompositeParsersDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestCompositeParsers extends BaseRuntimeTest {
|
||||
public TestCompositeParsers(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(CompositeParsersDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.FullContextParsingDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestFullContextParsing extends BaseRuntimeTest {
|
||||
public TestFullContextParsing(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(FullContextParsingDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.LeftRecursionDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestLeftRecursion extends BaseRuntimeTest {
|
||||
public TestLeftRecursion(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(LeftRecursionDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.LexerErrorsDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestLexerErrors extends BaseRuntimeTest {
|
||||
public TestLexerErrors(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(LexerErrorsDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.LexerExecDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestLexerExec extends BaseRuntimeTest {
|
||||
public TestLexerExec(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(LexerExecDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.ListenersDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestListeners extends BaseRuntimeTest {
|
||||
public TestListeners(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(ListenersDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.ParseTreesDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestParseTrees extends BaseRuntimeTest {
|
||||
public TestParseTrees(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(ParseTreesDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.ParserErrorsDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestParserErrors extends BaseRuntimeTest {
|
||||
public TestParserErrors(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(ParserErrorsDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.ParserExecDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestParserExec extends BaseRuntimeTest {
|
||||
public TestParserExec(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(ParserExecDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.PerformanceDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestPerformance extends BaseRuntimeTest {
|
||||
public TestPerformance(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(PerformanceDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.SemPredEvalLexerDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestSemPredEvalLexer extends BaseRuntimeTest {
|
||||
public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalLexerDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.SemPredEvalParserDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestSemPredEvalParser extends BaseRuntimeTest {
|
||||
public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(SemPredEvalParserDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.test.runtime.dart;
|
||||
|
||||
import org.antlr.v4.test.runtime.BaseRuntimeTest;
|
||||
import org.antlr.v4.test.runtime.RuntimeTestDescriptor;
|
||||
import org.antlr.v4.test.runtime.descriptors.SetsDescriptors;
|
||||
import org.antlr.v4.test.runtime.dart.BaseDartTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestSets extends BaseRuntimeTest {
|
||||
public TestSets(RuntimeTestDescriptor descriptor) {
|
||||
super(descriptor,new BaseDartTest());
|
||||
}
|
||||
|
||||
@Parameterized.Parameters(name="{0}")
|
||||
public static RuntimeTestDescriptor[] getAllTestDescriptors() {
|
||||
return BaseRuntimeTest.getRuntimeTestDescriptors(SetsDescriptors.class, "Dart");
|
||||
}
|
||||
}
|
|
@ -1051,7 +1051,7 @@ public class LexerExecDescriptors {
|
|||
grammar = new String(Files.readAllBytes(Paths.get(stuff.toURI())));
|
||||
}
|
||||
catch (Exception e) {
|
||||
System.err.println("Cannot find grammar org/antlr/v4/test/runtime/LarseLexer.g4");
|
||||
System.err.println("Cannot find grammar org/antlr/v4/test/runtime/LargeLexer.g4");
|
||||
}
|
||||
|
||||
return new Pair<>(grammarName, grammar);
|
||||
|
|
|
@ -112,7 +112,7 @@ public class ParseTreesDescriptors {
|
|||
|
||||
@Override
|
||||
public boolean ignore(String targetName) {
|
||||
return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp");
|
||||
return !targetName.matches("Java|Python2|Python3|Node|Swift|CSharp|Dart");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -639,7 +639,7 @@ public class ParserErrorsDescriptors {
|
|||
|
||||
@Override
|
||||
public boolean ignore(String targetName) {
|
||||
return !"Java".equals(targetName) && !"Swift".equals(targetName);
|
||||
return !"Java".equals(targetName) && !"Swift".equals(targetName) && !"Dart".equals(targetName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -889,4 +889,32 @@ public class ParserExecDescriptors {
|
|||
@CommentHasStringValue
|
||||
public String grammar;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#2728
|
||||
* It should generate correct code for grammars with more than 65 tokens.
|
||||
* https://github.com/antlr/antlr4/pull/2728#issuecomment-622940562
|
||||
*/
|
||||
public static class TokenOffset extends BaseParserTestDescriptor {
|
||||
public String input = "12 34 56 66";
|
||||
public String output = "12345666\n";
|
||||
|
||||
public String errors = null;
|
||||
public String startRule = "a";
|
||||
public String grammarName = "L";
|
||||
|
||||
/**
|
||||
grammar L;
|
||||
a : ('1'|'2'|'3'|'4'|'5'|'6'|'7'|'8'|'9'|'10'|'11'|'12'|'13'|'14'|'15'|'16'
|
||||
|'17'|'18'|'19'|'20'|'21'|'22'|'23'|'24'|'25'|'26'|'27'|'28'|'29'|'30'|'31'|'32'
|
||||
|'33'|'34'|'35'|'36'|'37'|'38'|'39'|'40'|'41'|'42'|'43'|'44'|'45'|'46'|'47'|'48'
|
||||
|'49'|'50'|'51'|'52'|'53'|'54'|'55'|'56'|'57'|'58'|'59'|'60'|'61'|'62'|'63'|'64'
|
||||
|'65'|'66')+ {
|
||||
<writeln("$text")>
|
||||
};
|
||||
WS : (' '|'\n') -> skip;
|
||||
*/
|
||||
@CommentHasStringValue
|
||||
public String grammar;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class PerformanceDescriptors {
|
|||
|
||||
@Override
|
||||
public boolean ignore(String targetName) {
|
||||
return !Arrays.asList("Java", "CSharp", "Python2", "Python3", "Node", "Cpp", "Swift").contains(targetName);
|
||||
return !Arrays.asList("Java", "CSharp", "Python2", "Python3", "Node", "Cpp", "Swift", "Dart").contains(targetName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ public class PerformanceDescriptors {
|
|||
@Override
|
||||
public boolean ignore(String targetName) {
|
||||
// passes, but still too slow in Python and JavaScript
|
||||
return !Arrays.asList("Java", "CSharp", "Cpp", "Swift").contains(targetName);
|
||||
return !Arrays.asList("Java", "CSharp", "Cpp", "Swift", "Dart").contains(targetName);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ namespace Antlr4.Runtime.Atn
|
|||
return false;
|
||||
}
|
||||
Antlr4.Runtime.Atn.SingletonPredictionContext other = (Antlr4.Runtime.Atn.SingletonPredictionContext)o;
|
||||
return returnState == other.returnState && parent.Equals(other.parent);
|
||||
return returnState == other.returnState && (parent != null && parent.Equals(other.parent));
|
||||
}
|
||||
|
||||
public override string ToString()
|
||||
|
|
|
@ -22,20 +22,20 @@ namespace Antlr4.Runtime
|
|||
#if !PORTABLE
|
||||
public class TraceListener : IParseTreeListener
|
||||
{
|
||||
private readonly TextWriter Output;
|
||||
|
||||
public TraceListener(TextWriter output) {
|
||||
Output = output;
|
||||
public TraceListener(TextWriter output,Parser enclosing) {
|
||||
_output = output;
|
||||
_enclosing = enclosing;
|
||||
}
|
||||
|
||||
public virtual void EnterEveryRule(ParserRuleContext ctx)
|
||||
{
|
||||
Output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||
_output.WriteLine("enter " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||
}
|
||||
|
||||
public virtual void ExitEveryRule(ParserRuleContext ctx)
|
||||
{
|
||||
Output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||
_output.WriteLine("exit " + this._enclosing.RuleNames[ctx.RuleIndex] + ", LT(1)=" + this._enclosing._input.LT(1).Text);
|
||||
}
|
||||
|
||||
public virtual void VisitErrorNode(IErrorNode node)
|
||||
|
@ -46,15 +46,17 @@ namespace Antlr4.Runtime
|
|||
{
|
||||
ParserRuleContext parent = (ParserRuleContext)((IRuleNode)node.Parent).RuleContext;
|
||||
IToken token = node.Symbol;
|
||||
Output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]);
|
||||
_output.WriteLine("consume " + token + " rule " + this._enclosing.RuleNames[parent.RuleIndex]);
|
||||
}
|
||||
|
||||
internal TraceListener(Parser _enclosing)
|
||||
{
|
||||
this._enclosing = _enclosing;
|
||||
_output = Console.Out;
|
||||
}
|
||||
|
||||
private readonly Parser _enclosing;
|
||||
private readonly TextWriter _output;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ if(NOT WITH_DEMO)
|
|||
FORCE)
|
||||
endif(NOT WITH_DEMO)
|
||||
|
||||
option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" On)
|
||||
option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" Off)
|
||||
option(WITH_STATIC_CRT "(Visual C++) Enable to statically link CRT, which avoids requiring users to install the redistribution package.
|
||||
To disable with: -DWITH_STATIC_CRT=Off" On)
|
||||
|
||||
|
@ -71,6 +71,9 @@ else()
|
|||
set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W")
|
||||
endif()
|
||||
|
||||
# Define USE_UTF8_INSTEAD_OF_CODECVT macro.
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_UTF8_INSTEAD_OF_CODECVT")
|
||||
|
||||
# Initialize CXXFLAGS.
|
||||
if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
|
|
|
@ -4,9 +4,27 @@
|
|||
rm -f -R antlr4-runtime build lib 2> /dev/null
|
||||
rm antlr4-cpp-runtime-macos.zip 2> /dev/null
|
||||
|
||||
# Get utf8 dependency.
|
||||
mkdir -p runtime/thirdparty 2> /dev/null
|
||||
pushd runtime/thirdparty
|
||||
if [ ! -d utfcpp ]
|
||||
then
|
||||
git clone https://github.com/nemtrif/utfcpp.git utfcpp
|
||||
pushd utfcpp
|
||||
git checkout tags/v3.1.1
|
||||
popd
|
||||
fi
|
||||
popd
|
||||
|
||||
# Binaries
|
||||
xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4 -configuration Release
|
||||
xcodebuild -project runtime/antlrcpp.xcodeproj -target antlr4_static -configuration Release
|
||||
xcodebuild -project runtime/antlrcpp.xcodeproj \
|
||||
-target antlr4 \
|
||||
# GCC_PREPROCESSOR_DEFINITIONS='$GCC_PREPROCESSOR_DEFINITIONS USE_UTF8_INSTEAD_OF_CODECVT' \
|
||||
-configuration Release
|
||||
xcodebuild -project runtime/antlrcpp.xcodeproj \
|
||||
-target antlr4_static \
|
||||
# GCC_PREPROCESSOR_DEFINITIONS='$GCC_PREPROCESSOR_DEFINITIONS USE_UTF8_INSTEAD_OF_CODECVT' \
|
||||
-configuration Release
|
||||
rm -f -R lib
|
||||
mkdir lib
|
||||
mv runtime/build/Release/libantlr4-runtime.a lib/
|
||||
|
@ -17,6 +35,9 @@ rm -f -R antlr4-runtime
|
|||
pushd runtime/src
|
||||
find . -name '*.h' | cpio -pdm ../../antlr4-runtime
|
||||
popd
|
||||
pushd runtime/thirdparty/utfcpp/source
|
||||
find . -name '*.h' | cpio -pdm ../../../../antlr4-runtime
|
||||
popd
|
||||
|
||||
# Zip up and clean up
|
||||
zip -r antlr4-cpp-runtime-macos.zip antlr4-runtime lib
|
||||
|
|
|
@ -1,4 +1,19 @@
|
|||
|
||||
include(${CMAKE_ROOT}/Modules/ExternalProject.cmake)
|
||||
|
||||
set(THIRDPARTY_DIR ${CMAKE_BINARY_DIR}/runtime/thirdparty)
|
||||
set(UTFCPP_DIR ${THIRDPARTY_DIR}/utfcpp)
|
||||
ExternalProject_Add(
|
||||
utfcpp
|
||||
GIT_REPOSITORY "git://github.com/nemtrif/utfcpp"
|
||||
GIT_TAG "v3.1.1"
|
||||
SOURCE_DIR ${UTFCPP_DIR}
|
||||
UPDATE_DISCONNECTED 1
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${UTFCPP_DIR}/install -Dgtest_force_shared_crt=ON
|
||||
TEST_AFTER_INSTALL 1
|
||||
STEP_TARGETS build)
|
||||
|
||||
|
||||
include_directories(
|
||||
${PROJECT_SOURCE_DIR}/runtime/src
|
||||
${PROJECT_SOURCE_DIR}/runtime/src/atn
|
||||
|
@ -8,6 +23,8 @@ include_directories(
|
|||
${PROJECT_SOURCE_DIR}/runtime/src/tree
|
||||
${PROJECT_SOURCE_DIR}/runtime/src/tree/pattern
|
||||
${PROJECT_SOURCE_DIR}/runtime/src/tree/xpath
|
||||
${UTFCPP_DIR}/install/include/utf8cpp
|
||||
${UTFCPP_DIR}/install/include/utf8cpp/utf8
|
||||
)
|
||||
|
||||
|
||||
|
@ -33,8 +50,8 @@ add_custom_target(make_lib_output_dir ALL
|
|||
COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR}
|
||||
)
|
||||
|
||||
add_dependencies(antlr4_shared make_lib_output_dir)
|
||||
add_dependencies(antlr4_static make_lib_output_dir)
|
||||
add_dependencies(antlr4_shared make_lib_output_dir utfcpp)
|
||||
add_dependencies(antlr4_static make_lib_output_dir utfcpp)
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
target_link_libraries(antlr4_shared ${UUID_LIBRARIES})
|
||||
|
@ -102,15 +119,23 @@ set_target_properties(antlr4_static
|
|||
COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}")
|
||||
|
||||
install(TARGETS antlr4_shared
|
||||
DESTINATION lib
|
||||
DESTINATION lib
|
||||
EXPORT antlr4-targets)
|
||||
install(TARGETS antlr4_static
|
||||
DESTINATION lib
|
||||
EXPORT antlr4-targets)
|
||||
|
||||
install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/"
|
||||
install(DIRECTORY "${PROJECT_SOURCE_DIR}/runtime/src/"
|
||||
DESTINATION "include/antlr4-runtime"
|
||||
COMPONENT dev
|
||||
COMPONENT dev
|
||||
FILES_MATCHING PATTERN "*.h"
|
||||
)
|
||||
|
||||
install(FILES "${UTFCPP_DIR}/source/utf8.h"
|
||||
DESTINATION "include/antlr4-runtime")
|
||||
install(DIRECTORY "${UTFCPP_DIR}/source/utf8"
|
||||
DESTINATION "include/antlr4-runtime"
|
||||
COMPONENT dev
|
||||
FILES_MATCHING PATTERN "*.h"
|
||||
)
|
||||
|
||||
|
|
|
@ -182,6 +182,7 @@
|
|||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -201,6 +202,7 @@
|
|||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -220,6 +222,7 @@
|
|||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -239,6 +242,7 @@
|
|||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -259,6 +263,7 @@
|
|||
</ForcedIncludeFiles>
|
||||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -281,6 +286,7 @@
|
|||
</ForcedIncludeFiles>
|
||||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -303,6 +309,7 @@
|
|||
</ForcedIncludeFiles>
|
||||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -325,6 +332,7 @@
|
|||
</ForcedIncludeFiles>
|
||||
<DisableSpecificWarnings>4251</DisableSpecificWarnings>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/Zc:__cplusplus %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
|
|
@ -2889,7 +2889,7 @@
|
|||
GCC_WARN_UNUSED_LABEL = YES;
|
||||
GCC_WARN_UNUSED_PARAMETER = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
HEADER_SEARCH_PATHS = src/;
|
||||
HEADER_SEARCH_PATHS = "src/ thirdparty/utfcpp/source/ thirdparty/utfcpp/source/utf8/";
|
||||
MACOSX_DEPLOYMENT_TARGET = 10.9;
|
||||
ONLY_ACTIVE_ARCH = YES;
|
||||
SDKROOT = macosx;
|
||||
|
@ -2945,7 +2945,7 @@
|
|||
GCC_WARN_UNUSED_LABEL = YES;
|
||||
GCC_WARN_UNUSED_PARAMETER = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
HEADER_SEARCH_PATHS = src/;
|
||||
HEADER_SEARCH_PATHS = "src/ thirdparty/utfcpp/source/ thirdparty/utfcpp/source/utf8/";
|
||||
MACOSX_DEPLOYMENT_TARGET = 10.9;
|
||||
SDKROOT = macosx;
|
||||
};
|
||||
|
|
|
@ -17,7 +17,11 @@ using namespace antlrcpp;
|
|||
|
||||
using misc::Interval;
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
ANTLRInputStream::ANTLRInputStream(std::string_view input) {
|
||||
#else
|
||||
ANTLRInputStream::ANTLRInputStream(const std::string &input) {
|
||||
#endif
|
||||
InitializeInstanceFields();
|
||||
load(input);
|
||||
}
|
||||
|
@ -31,6 +35,16 @@ ANTLRInputStream::ANTLRInputStream(std::istream &stream) {
|
|||
load(stream);
|
||||
}
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
void ANTLRInputStream::load(std::string_view input) {
|
||||
// Remove the UTF-8 BOM if present.
|
||||
constexpr std::string_view bom = "\xef\xbb\xbf";
|
||||
if (input.compare(0, 3, bom) == 0)
|
||||
input.remove_prefix(3);
|
||||
_data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size());
|
||||
p = 0;
|
||||
}
|
||||
#else
|
||||
void ANTLRInputStream::load(const std::string &input) {
|
||||
// Remove the UTF-8 BOM if present.
|
||||
const char bom[4] = "\xef\xbb\xbf";
|
||||
|
@ -40,6 +54,7 @@ void ANTLRInputStream::load(const std::string &input) {
|
|||
_data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size());
|
||||
p = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void ANTLRInputStream::load(std::istream &stream) {
|
||||
if (!stream.good() || stream.eof()) // No fail, bad or EOF.
|
||||
|
|
|
@ -25,11 +25,19 @@ namespace antlr4 {
|
|||
/// What is name or source of this char stream?
|
||||
std::string name;
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
ANTLRInputStream(std::string_view input = "");
|
||||
#else
|
||||
ANTLRInputStream(const std::string &input = "");
|
||||
#endif
|
||||
ANTLRInputStream(const char data_[], size_t numberOfActualCharsInArray);
|
||||
ANTLRInputStream(std::istream &stream);
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
virtual void load(std::string_view input);
|
||||
#else
|
||||
virtual void load(const std::string &input);
|
||||
#endif
|
||||
virtual void load(std::istream &stream);
|
||||
|
||||
/// Reset the stream so that it's in the same state it was
|
||||
|
|
|
@ -27,7 +27,13 @@ namespace antlr4 {
|
|||
/// </summary>
|
||||
class ANTLR4CPP_PUBLIC IntStream {
|
||||
public:
|
||||
static const size_t EOF = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t EOF = std::numeric_limits<size_t>::max();
|
||||
#else
|
||||
enum : size_t {
|
||||
EOF = static_cast<size_t>(-1), // std::numeric_limits<size_t>::max(); doesn't work in VS 2013
|
||||
};
|
||||
#endif
|
||||
|
||||
/// The value returned by <seealso cref="#LA LA()"/> when the end of the stream is
|
||||
/// reached.
|
||||
|
|
|
@ -18,14 +18,27 @@ namespace antlr4 {
|
|||
/// of speed.
|
||||
class ANTLR4CPP_PUBLIC Lexer : public Recognizer, public TokenSource {
|
||||
public:
|
||||
static const size_t DEFAULT_MODE = 0;
|
||||
static const size_t MORE = static_cast<size_t>(-2);
|
||||
static const size_t SKIP = static_cast<size_t>(-3);
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t DEFAULT_MODE = 0;
|
||||
static constexpr size_t MORE = std::numeric_limits<size_t>::max() - 1;
|
||||
static constexpr size_t SKIP = std::numeric_limits<size_t>::max() - 2;
|
||||
|
||||
static const size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL;
|
||||
static const size_t HIDDEN = Token::HIDDEN_CHANNEL;
|
||||
static const size_t MIN_CHAR_VALUE = 0;
|
||||
static const size_t MAX_CHAR_VALUE = 0x10FFFF;
|
||||
static constexpr size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL;
|
||||
static constexpr size_t HIDDEN = Token::HIDDEN_CHANNEL;
|
||||
static constexpr size_t MIN_CHAR_VALUE = 0;
|
||||
static constexpr size_t MAX_CHAR_VALUE = 0x10FFFF;
|
||||
#else
|
||||
enum : size_t {
|
||||
DEFAULT_MODE = 0,
|
||||
MORE = static_cast<size_t>(-2), // std::numeric_limits<size_t>::max() - 1; doesn't work in VS 2013
|
||||
SKIP = static_cast<size_t>(-3), // std::numeric_limits<size_t>::max() - 2; doesn't work in VS 2013
|
||||
|
||||
DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL,
|
||||
HIDDEN = Token::HIDDEN_CHANNEL,
|
||||
MIN_CHAR_VALUE = 0,
|
||||
MAX_CHAR_VALUE = 0x10FFFF,
|
||||
};
|
||||
#endif
|
||||
|
||||
CharStream *_input; // Pure reference, usually from statically allocated instance.
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ void Parser::removeParseListeners() {
|
|||
}
|
||||
|
||||
void Parser::triggerEnterRuleEvent() {
|
||||
for (auto listener : _parseListeners) {
|
||||
for (auto *listener : _parseListeners) {
|
||||
listener->enterEveryRule(_ctx);
|
||||
_ctx->enterRule(listener);
|
||||
}
|
||||
|
@ -307,14 +307,14 @@ Token* Parser::consume() {
|
|||
tree::ErrorNode *node = createErrorNode(o);
|
||||
_ctx->addChild(node);
|
||||
if (_parseListeners.size() > 0) {
|
||||
for (auto listener : _parseListeners) {
|
||||
for (auto *listener : _parseListeners) {
|
||||
listener->visitErrorNode(node);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tree::TerminalNode *node = _ctx->addChild(createTerminalNode(o));
|
||||
if (_parseListeners.size() > 0) {
|
||||
for (auto listener : _parseListeners) {
|
||||
for (auto *listener : _parseListeners) {
|
||||
listener->visitTerminal(node);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,8 +38,8 @@ void ParserRuleContext::copyFrom(ParserRuleContext *ctx) {
|
|||
|
||||
// copy any error nodes to alt label node
|
||||
if (!ctx->children.empty()) {
|
||||
for (auto child : ctx->children) {
|
||||
auto errorNode = dynamic_cast<ErrorNode *>(child);
|
||||
for (auto *child : ctx->children) {
|
||||
auto *errorNode = dynamic_cast<ErrorNode *>(child);
|
||||
if (errorNode != nullptr) {
|
||||
errorNode->setParent(this);
|
||||
children.push_back(errorNode);
|
||||
|
@ -82,7 +82,7 @@ tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) {
|
|||
}
|
||||
|
||||
size_t j = 0; // what token with ttype have we found?
|
||||
for (auto o : children) {
|
||||
for (auto *o : children) {
|
||||
if (is<tree::TerminalNode *>(o)) {
|
||||
tree::TerminalNode *tnode = dynamic_cast<tree::TerminalNode *>(o);
|
||||
Token *symbol = tnode->getSymbol();
|
||||
|
|
|
@ -114,7 +114,7 @@ namespace antlr4 {
|
|||
template<typename T>
|
||||
std::vector<T *> getRuleContexts() {
|
||||
std::vector<T *> contexts;
|
||||
for (auto child : children) {
|
||||
for (auto *child : children) {
|
||||
if (antlrcpp::is<T *>(child)) {
|
||||
contexts.push_back(dynamic_cast<T *>(child));
|
||||
}
|
||||
|
|
|
@ -26,28 +26,28 @@ void ProxyErrorListener::removeErrorListeners() {
|
|||
void ProxyErrorListener::syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line,
|
||||
size_t charPositionInLine, const std::string &msg, std::exception_ptr e) {
|
||||
|
||||
for (auto listener : _delegates) {
|
||||
for (auto *listener : _delegates) {
|
||||
listener->syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
void ProxyErrorListener::reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
|
||||
bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) {
|
||||
for (auto listener : _delegates) {
|
||||
for (auto *listener : _delegates) {
|
||||
listener->reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
|
||||
}
|
||||
}
|
||||
|
||||
void ProxyErrorListener::reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex,
|
||||
size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) {
|
||||
for (auto listener : _delegates) {
|
||||
for (auto *listener : _delegates) {
|
||||
listener->reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs);
|
||||
}
|
||||
}
|
||||
|
||||
void ProxyErrorListener::reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
|
||||
size_t prediction, atn::ATNConfigSet *configs) {
|
||||
for (auto listener : _delegates) {
|
||||
for (auto *listener : _delegates) {
|
||||
listener->reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,13 @@ namespace antlr4 {
|
|||
|
||||
class ANTLR4CPP_PUBLIC Recognizer {
|
||||
public:
|
||||
static const size_t EOF = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013.
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t EOF = std::numeric_limits<size_t>::max();
|
||||
#else
|
||||
enum : size_t {
|
||||
EOF = static_cast<size_t>(-1), // std::numeric_limits<size_t>::max(); doesn't work in VS 2013.
|
||||
};
|
||||
#endif
|
||||
|
||||
Recognizer();
|
||||
Recognizer(Recognizer const&) = delete;
|
||||
|
|
|
@ -14,24 +14,50 @@ namespace antlr4 {
|
|||
/// we obtained this token.
|
||||
class ANTLR4CPP_PUBLIC Token {
|
||||
public:
|
||||
static const size_t INVALID_TYPE = 0;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t INVALID_TYPE = 0;
|
||||
#else
|
||||
enum : size_t {
|
||||
INVALID_TYPE = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// During lookahead operations, this "token" signifies we hit rule end ATN state
|
||||
/// and did not follow it despite needing to.
|
||||
static const size_t EPSILON = static_cast<size_t>(-2);
|
||||
static const size_t MIN_USER_TOKEN_TYPE = 1;
|
||||
static const size_t EOF = IntStream::EOF;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t EPSILON = std::numeric_limits<size_t>::max() - 1;
|
||||
static constexpr size_t MIN_USER_TOKEN_TYPE = 1;
|
||||
static constexpr size_t EOF = IntStream::EOF;
|
||||
#else
|
||||
enum : size_t {
|
||||
EPSILON = static_cast<size_t>(-2), // std::numeric_limits<size_t>::max() - 1; doesn't work in VS 2013
|
||||
MIN_USER_TOKEN_TYPE = 1,
|
||||
EOF = IntStream::EOF,
|
||||
};
|
||||
#endif
|
||||
|
||||
virtual ~Token();
|
||||
|
||||
/// All tokens go to the parser (unless skip() is called in that rule)
|
||||
/// on a particular "channel". The parser tunes to a particular channel
|
||||
/// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
static const size_t DEFAULT_CHANNEL = 0;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t DEFAULT_CHANNEL = 0;
|
||||
#else
|
||||
enum : size_t {
|
||||
DEFAULT_CHANNEL = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
||||
/// by parser.
|
||||
static const size_t HIDDEN_CHANNEL = 1;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t HIDDEN_CHANNEL = 1;
|
||||
#else
|
||||
enum : size_t {
|
||||
HIDDEN_CHANNEL = 1,
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This is the minimum constant value which can be assigned to a
|
||||
|
@ -44,7 +70,13 @@ namespace antlr4 {
|
|||
*
|
||||
* @see Token#getChannel()
|
||||
*/
|
||||
static const size_t MIN_USER_CHANNEL_VALUE = 2;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t MIN_USER_CHANNEL_VALUE = 2;
|
||||
#else
|
||||
enum : size_t {
|
||||
MIN_USER_CHANNEL_VALUE = 2,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// Get the text of the token.
|
||||
virtual std::string getText() const = 0;
|
||||
|
|
|
@ -94,7 +94,7 @@ TokenStreamRewriter::TokenStreamRewriter(TokenStream *tokens_) : tokens(tokens_)
|
|||
|
||||
TokenStreamRewriter::~TokenStreamRewriter() {
|
||||
for (auto program : _programs) {
|
||||
for (auto operation : program.second) {
|
||||
for (auto *operation : program.second) {
|
||||
delete operation;
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> TokenStreamRe
|
|||
|
||||
// Wipe prior inserts within range
|
||||
std::vector<InsertBeforeOp *> inserts = getKindOfOps<InsertBeforeOp>(rewrites, i);
|
||||
for (auto iop : inserts) {
|
||||
for (auto *iop : inserts) {
|
||||
if (iop->index == rop->index) {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
|
@ -339,7 +339,7 @@ std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> TokenStreamRe
|
|||
}
|
||||
// Drop any prior replaces contained within
|
||||
std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
|
||||
for (auto prevRop : prevReplaces) {
|
||||
for (auto *prevRop : prevReplaces) {
|
||||
if (prevRop->index >= rop->index && prevRop->lastIndex <= rop->lastIndex) {
|
||||
// delete replace as it's a no-op.
|
||||
delete rewrites[prevRop->instructionIndex];
|
||||
|
@ -373,7 +373,7 @@ std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> TokenStreamRe
|
|||
// combine current insert with prior if any at same index
|
||||
|
||||
std::vector<InsertBeforeOp *> prevInserts = getKindOfOps<InsertBeforeOp>(rewrites, i);
|
||||
for (auto prevIop : prevInserts) {
|
||||
for (auto *prevIop : prevInserts) {
|
||||
if (prevIop->index == iop->index) { // combine objects
|
||||
// convert to strings...we're in process of toString'ing
|
||||
// whole token buffer so no lazy eval issue with any templates
|
||||
|
@ -385,7 +385,7 @@ std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> TokenStreamRe
|
|||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
|
||||
for (auto rop : prevReplaces) {
|
||||
for (auto *rop : prevReplaces) {
|
||||
if (iop->index == rop->index) {
|
||||
rop->text = catOpText(&iop->text, &rop->text);
|
||||
delete rewrites[i];
|
||||
|
|
|
@ -86,8 +86,15 @@ namespace antlr4 {
|
|||
class ANTLR4CPP_PUBLIC TokenStreamRewriter {
|
||||
public:
|
||||
static const std::string DEFAULT_PROGRAM_NAME;
|
||||
static const size_t PROGRAM_INIT_SIZE = 100;
|
||||
static const size_t MIN_TOKEN_INDEX = 0;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t PROGRAM_INIT_SIZE = 100;
|
||||
static constexpr size_t MIN_TOKEN_INDEX = 0;
|
||||
#else
|
||||
enum : size_t {
|
||||
PROGRAM_INIT_SIZE = 100,
|
||||
MIN_TOKEN_INDEX = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
TokenStreamRewriter(TokenStream *tokens);
|
||||
virtual ~TokenStreamRewriter();
|
||||
|
|
|
@ -22,8 +22,7 @@ Vocabulary::Vocabulary(const std::vector<std::string> &literalNames,
|
|||
// See note here on -1 part: https://github.com/antlr/antlr4/pull/1146
|
||||
}
|
||||
|
||||
Vocabulary::~Vocabulary() {
|
||||
}
|
||||
Vocabulary::~Vocabulary() = default;
|
||||
|
||||
Vocabulary Vocabulary::fromTokenNames(const std::vector<std::string> &tokenNames) {
|
||||
if (tokenNames.empty()) {
|
||||
|
@ -34,25 +33,18 @@ Vocabulary Vocabulary::fromTokenNames(const std::vector<std::string> &tokenNames
|
|||
std::vector<std::string> symbolicNames = tokenNames;
|
||||
std::locale locale;
|
||||
for (size_t i = 0; i < tokenNames.size(); i++) {
|
||||
std::string tokenName = tokenNames[i];
|
||||
if (tokenName == "") {
|
||||
const std::string& tokenName = tokenNames[i];
|
||||
if (tokenName.empty()) {
|
||||
continue;
|
||||
} else if (tokenName.front() == '\'') {
|
||||
symbolicNames[i].clear();
|
||||
} else if (std::isupper(tokenName.front(), locale)) {
|
||||
literalNames[i].clear();
|
||||
} else {
|
||||
// wasn't a literal or symbolic name
|
||||
literalNames[i].clear();
|
||||
symbolicNames[i].clear();
|
||||
}
|
||||
|
||||
if (!tokenName.empty()) {
|
||||
char firstChar = tokenName[0];
|
||||
if (firstChar == '\'') {
|
||||
symbolicNames[i] = "";
|
||||
continue;
|
||||
} else if (std::isupper(firstChar, locale)) {
|
||||
literalNames[i] = "";
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// wasn't a literal or symbolic name
|
||||
literalNames[i] = "";
|
||||
symbolicNames[i] = "";
|
||||
}
|
||||
|
||||
return Vocabulary(literalNames, symbolicNames, tokenNames);
|
||||
|
|
|
@ -14,9 +14,6 @@ namespace dfa {
|
|||
/// interface.
|
||||
class ANTLR4CPP_PUBLIC Vocabulary {
|
||||
public:
|
||||
Vocabulary(Vocabulary const&) = default;
|
||||
virtual ~Vocabulary();
|
||||
|
||||
/// Gets an empty <seealso cref="Vocabulary"/> instance.
|
||||
///
|
||||
/// <para>
|
||||
|
@ -25,7 +22,9 @@ namespace dfa {
|
|||
/// except <seealso cref="Token#EOF"/>.</para>
|
||||
static const Vocabulary EMPTY_VOCABULARY;
|
||||
|
||||
Vocabulary() {}
|
||||
Vocabulary() = default;
|
||||
Vocabulary(Vocabulary const&) = default;
|
||||
virtual ~Vocabulary();
|
||||
|
||||
/// <summary>
|
||||
/// Constructs a new instance of <seealso cref="Vocabulary"/> from the specified
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include <algorithm>
|
||||
#include <assert.h>
|
||||
#include <atomic>
|
||||
#include <codecvt>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
@ -37,6 +36,10 @@
|
|||
#include <condition_variable>
|
||||
#include <functional>
|
||||
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
#include <codecvt>
|
||||
#endif
|
||||
|
||||
// Defines for the Guid class and other platform dependent stuff.
|
||||
#ifdef _WIN32
|
||||
#ifdef _MSC_VER
|
||||
|
|
|
@ -183,7 +183,7 @@ std::string ATN::toString() const {
|
|||
ss << "states (" << states.size() << ") {" << std::endl;
|
||||
|
||||
size_t index = 0;
|
||||
for (auto state : states) {
|
||||
for (auto *state : states) {
|
||||
if (state == nullptr) {
|
||||
ss << " " << index++ << ": nul" << std::endl;
|
||||
} else {
|
||||
|
@ -193,7 +193,7 @@ std::string ATN::toString() const {
|
|||
}
|
||||
|
||||
index = 0;
|
||||
for (auto state : decisionToState) {
|
||||
for (auto *state : decisionToState) {
|
||||
if (state == nullptr) {
|
||||
ss << " " << index++ << ": nul" << std::endl;
|
||||
} else {
|
||||
|
|
|
@ -12,7 +12,13 @@ namespace atn {
|
|||
|
||||
class ANTLR4CPP_PUBLIC ATN {
|
||||
public:
|
||||
static const size_t INVALID_ALT_NUMBER = 0;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t INVALID_ALT_NUMBER = 0;
|
||||
#else
|
||||
enum : size_t {
|
||||
INVALID_ALT_NUMBER = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// Used for runtime deserialization of ATNs from strings.
|
||||
ATN();
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
|
||||
using namespace antlr4::atn;
|
||||
|
||||
const size_t ATNConfig::SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
|
||||
|
||||
ATNConfig::ATNConfig(ATNState *state_, size_t alt_, Ref<PredictionContext> const& context_)
|
||||
: ATNConfig(state_, alt_, context_, SemanticContext::NONE) {
|
||||
}
|
||||
|
|
|
@ -114,7 +114,13 @@ namespace atn {
|
|||
* {@link #isPrecedenceFilterSuppressed} property as a bit within the
|
||||
* existing {@link #reachesIntoOuterContext} field.
|
||||
*/
|
||||
static const size_t SUPPRESS_PRECEDENCE_FILTER;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
|
||||
#else
|
||||
enum : size_t {
|
||||
SUPPRESS_PRECEDENCE_FILTER = 0x40000000,
|
||||
};
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace atn
|
||||
|
@ -139,7 +145,7 @@ namespace std {
|
|||
size_t operator() (const std::vector<Ref<ATNConfig>> &vector) const
|
||||
{
|
||||
std::size_t seed = 0;
|
||||
for (auto &config : vector) {
|
||||
for (const auto &config : vector) {
|
||||
seed ^= config->hashCode() + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
return seed;
|
||||
|
|
|
@ -57,8 +57,6 @@ using namespace antlr4;
|
|||
using namespace antlr4::atn;
|
||||
using namespace antlrcpp;
|
||||
|
||||
const size_t ATNDeserializer::SERIALIZED_VERSION = 3;
|
||||
|
||||
namespace {
|
||||
|
||||
uint32_t deserializeInt32(const std::vector<uint16_t>& data, size_t offset) {
|
||||
|
|
|
@ -13,7 +13,13 @@ namespace atn {
|
|||
|
||||
class ANTLR4CPP_PUBLIC ATNDeserializer {
|
||||
public:
|
||||
static const size_t SERIALIZED_VERSION;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t SERIALIZED_VERSION = 3;
|
||||
#else
|
||||
enum : size_t {
|
||||
SERIALIZED_VERSION = 3,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// This is the current serialized UUID.
|
||||
// ml: defined as function to avoid the “static initialization order fiasco”.
|
||||
|
|
|
@ -166,7 +166,7 @@ std::vector<size_t> ATNSerializer::serialize() {
|
|||
}
|
||||
|
||||
data.push_back(containsEof ? 1 : 0);
|
||||
for (auto &interval : set.getIntervals()) {
|
||||
for (const auto &interval : set.getIntervals()) {
|
||||
if (interval.a == -1) {
|
||||
if (interval.b == -1) {
|
||||
continue;
|
||||
|
|
|
@ -17,7 +17,7 @@ ATNState::ATNState() {
|
|||
}
|
||||
|
||||
ATNState::~ATNState() {
|
||||
for (auto transition : transitions) {
|
||||
for (auto *transition : transitions) {
|
||||
delete transition;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,8 +81,15 @@ namespace atn {
|
|||
|
||||
ATNState& operator=(ATNState const&) = delete;
|
||||
|
||||
static const size_t INITIAL_NUM_TRANSITIONS = 4;
|
||||
static const size_t INVALID_STATE_NUMBER = static_cast<size_t>(-1); // std::numeric_limits<size_t>::max();
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t INITIAL_NUM_TRANSITIONS = 4;
|
||||
static constexpr size_t INVALID_STATE_NUMBER = std::numeric_limits<size_t>::max();
|
||||
#else
|
||||
enum : size_t {
|
||||
INITIAL_NUM_TRANSITIONS = 4,
|
||||
INVALID_STATE_NUMBER = static_cast<size_t>(-1), // std::numeric_limits<size_t>::max(); doesn't work in VS 2013
|
||||
};
|
||||
#endif
|
||||
|
||||
enum {
|
||||
ATN_INVALID_TYPE = 0,
|
||||
|
|
|
@ -17,7 +17,13 @@ namespace atn {
|
|||
public:
|
||||
/// Special value added to the lookahead sets to indicate that we hit
|
||||
/// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
static const size_t HIT_PRED = Token::INVALID_TYPE;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t HIT_PRED = Token::INVALID_TYPE;
|
||||
#else
|
||||
enum : size_t {
|
||||
HIT_PRED = Token::INVALID_TYPE,
|
||||
};
|
||||
#endif
|
||||
|
||||
const atn::ATN &_atn;
|
||||
|
||||
|
|
|
@ -38,8 +38,15 @@ namespace atn {
|
|||
|
||||
|
||||
public:
|
||||
static const size_t MIN_DFA_EDGE = 0;
|
||||
static const size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t MIN_DFA_EDGE = 0;
|
||||
static constexpr size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN
|
||||
#else
|
||||
enum : size_t {
|
||||
MIN_DFA_EDGE = 0,
|
||||
MAX_DFA_EDGE = 127, // forces unicode to stay in ATN
|
||||
};
|
||||
#endif
|
||||
|
||||
protected:
|
||||
/// <summary>
|
||||
|
|
|
@ -774,7 +774,7 @@ std::pair<ATNConfigSet *, ATNConfigSet *> ParserATNSimulator::splitAccordingToSe
|
|||
BitSet ParserATNSimulator::evalSemanticContext(std::vector<dfa::DFAState::PredPrediction*> predPredictions,
|
||||
ParserRuleContext *outerContext, bool complete) {
|
||||
BitSet predictions;
|
||||
for (auto prediction : predPredictions) {
|
||||
for (auto *prediction : predPredictions) {
|
||||
if (prediction->pred == SemanticContext::NONE) {
|
||||
predictions.set(prediction->alt);
|
||||
if (!complete) {
|
||||
|
|
|
@ -30,10 +30,22 @@ namespace atn {
|
|||
// ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where
|
||||
// -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't
|
||||
// conflict with real return states.
|
||||
static const size_t EMPTY_RETURN_STATE = static_cast<size_t>(-10); // std::numeric_limits<size_t>::max() - 9;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t EMPTY_RETURN_STATE = std::numeric_limits<size_t>::max() - 9;
|
||||
#else
|
||||
enum : size_t {
|
||||
EMPTY_RETURN_STATE = static_cast<size_t>(-10), // std::numeric_limits<size_t>::max() - 9; doesn't work in VS 2013
|
||||
};
|
||||
#endif
|
||||
|
||||
private:
|
||||
static const size_t INITIAL_HASH = 1;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t INITIAL_HASH = 1;
|
||||
#else
|
||||
enum : size_t {
|
||||
INITIAL_HASH = 1,
|
||||
};
|
||||
#endif
|
||||
|
||||
public:
|
||||
static size_t globalNodeCount;
|
||||
|
|
|
@ -46,7 +46,7 @@ DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), decision(other.decis
|
|||
|
||||
DFA::~DFA() {
|
||||
bool s0InList = (s0 == nullptr);
|
||||
for (auto state : states) {
|
||||
for (auto *state : states) {
|
||||
if (state == s0)
|
||||
s0InList = true;
|
||||
delete state;
|
||||
|
@ -88,7 +88,7 @@ void DFA::setPrecedenceStartState(int precedence, DFAState *startState, SingleWr
|
|||
|
||||
std::vector<DFAState *> DFA::getStates() const {
|
||||
std::vector<DFAState *> result;
|
||||
for (auto state : states)
|
||||
for (auto *state : states)
|
||||
result.push_back(state);
|
||||
|
||||
std::sort(result.begin(), result.end(), [](DFAState *o1, DFAState *o2) -> bool {
|
||||
|
|
|
@ -27,7 +27,7 @@ std::string DFASerializer::toString() const {
|
|||
|
||||
std::stringstream ss;
|
||||
std::vector<DFAState *> states = _dfa->getStates();
|
||||
for (auto s : states) {
|
||||
for (auto *s : states) {
|
||||
for (size_t i = 0; i < s->edges.size(); i++) {
|
||||
DFAState *t = s->edges[i];
|
||||
if (t != nullptr && t->stateNumber != INT32_MAX) {
|
||||
|
|
|
@ -42,7 +42,7 @@ DFAState::DFAState(std::unique_ptr<ATNConfigSet> configs_) : DFAState() {
|
|||
}
|
||||
|
||||
DFAState::~DFAState() {
|
||||
for (auto predicate : predicates) {
|
||||
for (auto *predicate : predicates) {
|
||||
delete predicate;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ void IntervalSet::add(const Interval &addition) {
|
|||
|
||||
IntervalSet IntervalSet::Or(const std::vector<IntervalSet> &sets) {
|
||||
IntervalSet result;
|
||||
for (auto &s : sets) {
|
||||
for (const auto &s : sets) {
|
||||
result.addAll(s);
|
||||
}
|
||||
return result;
|
||||
|
@ -271,7 +271,7 @@ bool IntervalSet::contains(ssize_t el) const {
|
|||
if (el < _intervals[0].a) // list is sorted and el is before first interval; not here
|
||||
return false;
|
||||
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
if (el >= interval.a && el <= interval.b) {
|
||||
return true; // found in this interval
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ std::vector<Interval> const& IntervalSet::getIntervals() const {
|
|||
|
||||
size_t IntervalSet::hashCode() const {
|
||||
size_t hash = MurmurHash::initialize();
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
hash = MurmurHash::update(hash, interval.a);
|
||||
hash = MurmurHash::update(hash, interval.b);
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ std::string IntervalSet::toString(bool elemAreChar) const {
|
|||
}
|
||||
|
||||
bool firstEntry = true;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
if (!firstEntry)
|
||||
ss << ", ";
|
||||
firstEntry = false;
|
||||
|
@ -395,7 +395,7 @@ std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const {
|
|||
}
|
||||
|
||||
bool firstEntry = true;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
if (!firstEntry)
|
||||
ss << ", ";
|
||||
firstEntry = false;
|
||||
|
@ -436,7 +436,7 @@ std::string IntervalSet::elementName(const dfa::Vocabulary &vocabulary, ssize_t
|
|||
|
||||
size_t IntervalSet::size() const {
|
||||
size_t result = 0;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
result += size_t(interval.b - interval.a + 1);
|
||||
}
|
||||
return result;
|
||||
|
@ -444,7 +444,7 @@ size_t IntervalSet::size() const {
|
|||
|
||||
std::vector<ssize_t> IntervalSet::toList() const {
|
||||
std::vector<ssize_t> result;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
ssize_t a = interval.a;
|
||||
ssize_t b = interval.b;
|
||||
for (ssize_t v = a; v <= b; v++) {
|
||||
|
@ -456,7 +456,7 @@ std::vector<ssize_t> IntervalSet::toList() const {
|
|||
|
||||
std::set<ssize_t> IntervalSet::toSet() const {
|
||||
std::set<ssize_t> result;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
ssize_t a = interval.a;
|
||||
ssize_t b = interval.b;
|
||||
for (ssize_t v = a; v <= b; v++) {
|
||||
|
@ -468,7 +468,7 @@ std::set<ssize_t> IntervalSet::toSet() const {
|
|||
|
||||
ssize_t IntervalSet::get(size_t i) const {
|
||||
size_t index = 0;
|
||||
for (auto &interval : _intervals) {
|
||||
for (const auto &interval : _intervals) {
|
||||
ssize_t a = interval.a;
|
||||
ssize_t b = interval.b;
|
||||
for (ssize_t v = a; v <= b; v++) {
|
||||
|
|
|
@ -13,7 +13,13 @@ namespace misc {
|
|||
class ANTLR4CPP_PUBLIC MurmurHash {
|
||||
|
||||
private:
|
||||
static const size_t DEFAULT_SEED = 0;
|
||||
#if __cplusplus >= 201703L
|
||||
static constexpr size_t DEFAULT_SEED = 0;
|
||||
#else
|
||||
enum : size_t {
|
||||
DEFAULT_SEED = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
/// Initialize the hash using the default seed value.
|
||||
/// Returns the intermediate hash value.
|
||||
|
|
|
@ -79,7 +79,7 @@ struct ANTLR4CPP_PUBLIC Any
|
|||
if (_ptr == a._ptr)
|
||||
return *this;
|
||||
|
||||
auto old_ptr = _ptr;
|
||||
auto * old_ptr = _ptr;
|
||||
_ptr = a.clone();
|
||||
|
||||
if (old_ptr)
|
||||
|
|
|
@ -16,7 +16,7 @@ std::string Arrays::listToString(const std::vector<std::string> &list, const std
|
|||
bool firstEntry = true;
|
||||
|
||||
ss << '[';
|
||||
for (auto &entry : list) {
|
||||
for (const auto &entry : list) {
|
||||
ss << entry;
|
||||
if (firstEntry) {
|
||||
ss << separator;
|
||||
|
@ -32,7 +32,7 @@ template <>
|
|||
std::string Arrays::toString(const std::vector<antlr4::tree::ParseTree*> &source) {
|
||||
std::string result = "[";
|
||||
bool firstEntry = true;
|
||||
for (auto value : source) {
|
||||
for (auto *value : source) {
|
||||
result += value->toStringTree();
|
||||
if (firstEntry) {
|
||||
result += ", ";
|
||||
|
|
|
@ -46,7 +46,7 @@ namespace antlrcpp {
|
|||
|
||||
case ' ':
|
||||
if (escapeSpaces) {
|
||||
result += "·";
|
||||
result += "\u00B7";
|
||||
break;
|
||||
}
|
||||
// else fall through
|
||||
|
|
|
@ -20,16 +20,26 @@ void replaceAll(std::string& str, std::string const& from, std::string const& to
|
|||
}
|
||||
|
||||
std::string ws2s(std::wstring const& wstr) {
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
|
||||
std::string narrow = converter.to_bytes(wstr);
|
||||
#else
|
||||
std::string narrow;
|
||||
utf8::utf32to8(wstr.begin(), wstr.end(), std::back_inserter(narrow));
|
||||
#endif
|
||||
|
||||
return narrow;
|
||||
}
|
||||
|
||||
std::wstring s2ws(const std::string &str) {
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
|
||||
std::wstring wide = converter.from_bytes(str);
|
||||
|
||||
#else
|
||||
std::wstring wide;
|
||||
utf8::utf8to32(str.begin(), str.end(), std::back_inserter(wide));
|
||||
#endif
|
||||
|
||||
return wide;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,43 +7,65 @@
|
|||
|
||||
#include "antlr4-common.h"
|
||||
|
||||
#ifdef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
#include "utf8.h"
|
||||
#endif
|
||||
|
||||
namespace antlrcpp {
|
||||
|
||||
// For all conversions utf8 <-> utf32.
|
||||
// I wouldn't prefer wstring_convert because: according to
|
||||
// https://en.cppreference.com/w/cpp/locale/wstring_convert,
|
||||
// wstring_convert is deprecated in C++17.
|
||||
// utfcpp (https://github.com/nemtrif/utfcpp) is a substitution.
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
// VS 2015 and VS 2017 have different bugs in std::codecvt_utf8<char32_t> (VS 2013 works fine).
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
typedef std::wstring_convert<std::codecvt_utf8<__int32>, __int32> UTF32Converter;
|
||||
#else
|
||||
typedef std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> UTF32Converter;
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
typedef std::wstring_convert<std::codecvt_utf8<__int32>, __int32> UTF32Converter;
|
||||
#else
|
||||
typedef std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> UTF32Converter;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// The conversion functions fails in VS2017, so we explicitly use a workaround.
|
||||
template<typename T>
|
||||
inline std::string utf32_to_utf8(T const& data)
|
||||
{
|
||||
// Don't make the converter static or we have to serialize access to it.
|
||||
thread_local UTF32Converter converter;
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
// Don't make the converter static or we have to serialize access to it.
|
||||
thread_local UTF32Converter converter;
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
auto p = reinterpret_cast<const int32_t *>(data.data());
|
||||
return converter.to_bytes(p, p + data.size());
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
const auto p = reinterpret_cast<const int32_t *>(data.data());
|
||||
return converter.to_bytes(p, p + data.size());
|
||||
#else
|
||||
return converter.to_bytes(data);
|
||||
#endif
|
||||
#else
|
||||
return converter.to_bytes(data);
|
||||
std::string narrow;
|
||||
utf8::utf32to8(data.begin(), data.end(), std::back_inserter(narrow));
|
||||
return narrow;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline UTF32String utf8_to_utf32(const char* first, const char* last)
|
||||
{
|
||||
thread_local UTF32Converter converter;
|
||||
#ifndef USE_UTF8_INSTEAD_OF_CODECVT
|
||||
thread_local UTF32Converter converter;
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
auto r = converter.from_bytes(first, last);
|
||||
i32string s = reinterpret_cast<const int32_t *>(r.data());
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 2000
|
||||
auto r = converter.from_bytes(first, last);
|
||||
i32string s = reinterpret_cast<const int32_t *>(r.data());
|
||||
return s;
|
||||
#else
|
||||
std::u32string s = converter.from_bytes(first, last);
|
||||
return s;
|
||||
#endif
|
||||
#else
|
||||
std::u32string s = converter.from_bytes(first, last);
|
||||
UTF32String wide;
|
||||
utf8::utf8to32(first, last, std::back_inserter(wide));
|
||||
return wide;
|
||||
#endif
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void replaceAll(std::string &str, std::string const& from, std::string const& to);
|
||||
|
|
|
@ -88,7 +88,7 @@ namespace tree {
|
|||
}
|
||||
|
||||
void reset() {
|
||||
for (auto entry : _allocated)
|
||||
for (auto * entry : _allocated)
|
||||
delete entry;
|
||||
_allocated.clear();
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ std::vector<ParseTree *> Trees::getDescendants(ParseTree *t) {
|
|||
std::size_t n = t->children.size();
|
||||
for (size_t i = 0 ; i < n ; i++) {
|
||||
auto descentants = getDescendants(t->children[i]);
|
||||
for (auto entry: descentants) {
|
||||
for (auto *entry: descentants) {
|
||||
nodes.push_back(entry);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ std::vector<ParseTreeMatch> ParseTreePattern::findAll(ParseTree *tree, const std
|
|||
xpath::XPath finder(_matcher->getParser(), xpath);
|
||||
std::vector<ParseTree *> subtrees = finder.evaluate(tree);
|
||||
std::vector<ParseTreeMatch> matches;
|
||||
for (auto t : subtrees) {
|
||||
for (auto *t : subtrees) {
|
||||
ParseTreeMatch aMatch = match(t);
|
||||
if (aMatch.succeeded()) {
|
||||
matches.push_back(aMatch);
|
||||
|
|
|
@ -137,7 +137,7 @@ std::vector<ParseTree *> XPath::evaluate(ParseTree *t) {
|
|||
|
||||
while (i < elements.size()) {
|
||||
std::vector<ParseTree *> next;
|
||||
for (auto node : work) {
|
||||
for (auto *node : work) {
|
||||
if (!node->children.empty()) {
|
||||
// only try to match next element if it has children
|
||||
// e.g., //func/*/stat might have a token node for which
|
||||
|
|
|
@ -18,7 +18,7 @@ XPathRuleElement::XPathRuleElement(const std::string &ruleName, size_t ruleIndex
|
|||
std::vector<ParseTree *> XPathRuleElement::evaluate(ParseTree *t) {
|
||||
// return all children of t that match nodeName
|
||||
std::vector<ParseTree *> nodes;
|
||||
for (auto c : t->children) {
|
||||
for (auto *c : t->children) {
|
||||
if (antlrcpp::is<ParserRuleContext *>(c)) {
|
||||
ParserRuleContext *ctx = dynamic_cast<ParserRuleContext *>(c);
|
||||
if ((ctx->getRuleIndex() == _ruleIndex && !_invert) || (ctx->getRuleIndex() != _ruleIndex && _invert)) {
|
||||
|
|
|
@ -21,7 +21,7 @@ XPathTokenElement::XPathTokenElement(const std::string &tokenName, size_t tokenT
|
|||
std::vector<ParseTree *> XPathTokenElement::evaluate(ParseTree *t) {
|
||||
// return all children of t that match nodeName
|
||||
std::vector<ParseTree *> nodes;
|
||||
for (auto c : t->children) {
|
||||
for (auto *c : t->children) {
|
||||
if (antlrcpp::is<TerminalNode *>(c)) {
|
||||
TerminalNode *tnode = dynamic_cast<TerminalNode *>(c);
|
||||
if ((tnode->getSymbol()->getType() == _tokenType && !_invert) || (tnode->getSymbol()->getType() != _tokenType && _invert)) {
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
!lib
|
||||
|
||||
# See https://www.dartlang.org/guides/libraries/private-files
|
||||
|
||||
# Files and directories created by pub
|
||||
.dart_tool/
|
||||
.packages
|
||||
build/
|
||||
# If you're building an application, you may want to check-in your pubspec.lock
|
||||
pubspec.lock
|
||||
|
||||
# Directory created by dartdoc
|
||||
# If you don't generate documentation locally you can remove this line.
|
||||
doc/api/
|
||||
|
||||
# Avoid committing generated Javascript files:
|
||||
*.dart.js
|
||||
*.info.json # Produced by the --dump-info flag.
|
||||
*.js # When generated by dart2js. Don't specify *.js if your
|
||||
# project includes source files written in JavaScript.
|
||||
*.js_
|
||||
*.js.deps
|
||||
*.js.map
|
|
@ -0,0 +1,4 @@
|
|||
|
||||
## 4.8.0-dev.2
|
||||
|
||||
* Initial release
|
|
@ -0,0 +1,52 @@
|
|||
[The "BSD 3-clause license"]
|
||||
Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
=====
|
||||
|
||||
MIT License for codepointat.js from https://git.io/codepointat
|
||||
MIT License for fromcodepoint.js from https://git.io/vDW1m
|
||||
|
||||
Copyright Mathias Bynens <https://mathiasbynens.be/>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,11 @@
|
|||
# Dart target for ANTLR 4
|
||||
|
||||
Dart runtime libraries for ANTLR 4
|
||||
|
||||
This runtime is available through [pub](https://pub.dev). The package name is 'antlr4'.
|
||||
|
||||
See www.antlr.org for more information on ANTLR.
|
||||
|
||||
See https://github.com/antlr/antlr4/blob/master/doc/dart-target.md for more information on using ANTLR in Dart.
|
||||
|
||||
|
|
@ -0,0 +1 @@
|
|||
include: package:pedantic/analysis_options.yaml
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
library antlr4;
|
||||
|
||||
export 'src/atn/atn.dart';
|
||||
export 'src/dfa/dfa.dart';
|
||||
export 'src/tree/tree.dart';
|
||||
export 'src/error/error.dart';
|
||||
export 'src/rule_context.dart';
|
||||
export 'src/input_stream.dart';
|
||||
export 'src/token_stream.dart';
|
||||
export 'src/lexer.dart';
|
||||
export 'src/parser.dart';
|
||||
export 'src/parser_rule_context.dart';
|
||||
export 'src/vocabulary.dart';
|
||||
export 'src/runtime_meta_data.dart';
|
||||
export 'src/token.dart';
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
export 'src/atn.dart';
|
||||
export 'src/atn_config.dart';
|
||||
export 'src/atn_config_set.dart';
|
||||
export 'src/atn_deserializer.dart';
|
||||
export 'src/atn_simulator.dart';
|
||||
export 'src/atn_state.dart';
|
||||
export 'src/info.dart';
|
||||
export 'src/lexer_action_executor.dart';
|
||||
export 'src/lexer_atn_simulator.dart';
|
||||
export 'src/parser_atn_simulator.dart';
|
||||
export 'src/profiling_atn_simulator.dart';
|
||||
export 'src/transition.dart';
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
import '../../interval_set.dart';
|
||||
import '../../ll1_analyzer.dart';
|
||||
import '../../rule_context.dart';
|
||||
import '../../token.dart';
|
||||
import 'atn_state.dart';
|
||||
import 'atn_type.dart';
|
||||
import 'lexer_action.dart';
|
||||
import 'transition.dart';
|
||||
|
||||
class ATN {
|
||||
static final INVALID_ALT_NUMBER = 0;
|
||||
|
||||
List<ATNState> states = [];
|
||||
|
||||
/// Each subrule/rule is a decision point and we must track them so we
|
||||
/// can go back later and build DFA predictors for them. This includes
|
||||
/// all the rules, subrules, optional blocks, ()+, ()* etc...
|
||||
List<DecisionState> decisionToState = [];
|
||||
|
||||
/// Maps from rule index to starting state number.
|
||||
List<RuleStartState> ruleToStartState;
|
||||
|
||||
/// Maps from rule index to stop state number.
|
||||
List<RuleStopState> ruleToStopState;
|
||||
|
||||
Map<String, TokensStartState> modeNameToStartState = {};
|
||||
|
||||
/// The type of the ATN.
|
||||
final ATNType grammarType;
|
||||
|
||||
/// The maximum value for any symbol recognized by a transition in the ATN.
|
||||
final int maxTokenType;
|
||||
|
||||
/// For lexer ATNs, this maps the rule index to the resulting token type.
|
||||
/// For parser ATNs, this maps the rule index to the generated bypass token
|
||||
/// type if the
|
||||
/// {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
|
||||
/// deserialization option was specified; otherwise, this is null.
|
||||
List<int> ruleToTokenType;
|
||||
|
||||
/// For lexer ATNs, this is an array of [LexerAction] objects which may
|
||||
/// be referenced by action transitions in the ATN.
|
||||
List<LexerAction> lexerActions;
|
||||
|
||||
List<TokensStartState> modeToStartState = [];
|
||||
|
||||
/// Used for runtime deserialization of ATNs from strings */
|
||||
ATN(this.grammarType, this.maxTokenType);
|
||||
|
||||
/// TODO merge doc comment
|
||||
/// Compute the set of valid tokens that can occur starting in state [s].
|
||||
/// If [ctx] is null, the set of tokens will not include what can follow
|
||||
/// the rule surrounding [s]. In other words, the set will be
|
||||
/// restricted to tokens reachable staying within [s]'s rule.
|
||||
///
|
||||
/// Compute the set of valid tokens that can occur starting in [s] and
|
||||
/// staying in same rule. {@link Token#EPSILON} is in set if we reach end of
|
||||
/// rule.
|
||||
IntervalSet nextTokens(ATNState s, [RuleContext ctx]) {
|
||||
if (ctx != null) {
|
||||
return LL1Analyzer(this).LOOK(s, ctx);
|
||||
}
|
||||
if (s.nextTokenWithinRule != null) return s.nextTokenWithinRule;
|
||||
s.nextTokenWithinRule = LL1Analyzer(this).LOOK(s, null);
|
||||
s.nextTokenWithinRule.setReadonly(true);
|
||||
return s.nextTokenWithinRule;
|
||||
}
|
||||
|
||||
void addState(ATNState state) {
|
||||
if (state != null) {
|
||||
state.atn = this;
|
||||
state.stateNumber = states.length;
|
||||
}
|
||||
|
||||
states.add(state);
|
||||
}
|
||||
|
||||
void removeState(ATNState state) {
|
||||
states[state.stateNumber] =
|
||||
null; // just free mem, don't shift states in list
|
||||
}
|
||||
|
||||
int defineDecisionState(DecisionState s) {
|
||||
decisionToState.add(s);
|
||||
s.decision = decisionToState.length - 1;
|
||||
return s.decision;
|
||||
}
|
||||
|
||||
DecisionState getDecisionState(int decision) {
|
||||
if (decisionToState.isNotEmpty) {
|
||||
return decisionToState[decision];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
int get numberOfDecisions {
|
||||
return decisionToState.length;
|
||||
}
|
||||
|
||||
/// Computes the set of input symbols which could follow ATN state number
|
||||
/// [stateNumber] in the specified full [context]. This method
|
||||
/// considers the complete parser context, but does not evaluate semantic
|
||||
/// predicates (i.e. all predicates encountered during the calculation are
|
||||
/// assumed true). If a path in the ATN exists from the starting state to the
|
||||
/// [RuleStopState] of the outermost context without matching any
|
||||
/// symbols, {@link Token#EOF} is added to the returned set.
|
||||
///
|
||||
/// <p>If [context] is null, it is treated as {@link ParserRuleContext#EMPTY}.</p>
|
||||
///
|
||||
/// Note that this does NOT give you the set of all tokens that could
|
||||
/// appear at a given token position in the input phrase. In other words,
|
||||
/// it does not answer:
|
||||
///
|
||||
/// "Given a specific partial input phrase, return the set of all tokens
|
||||
/// that can follow the last token in the input phrase."
|
||||
///
|
||||
/// The big difference is that with just the input, the parser could
|
||||
/// land right in the middle of a lookahead decision. Getting
|
||||
/// all *possible* tokens given a partial input stream is a separate
|
||||
/// computation. See https://github.com/antlr/antlr4/issues/1428
|
||||
///
|
||||
/// For this function, we are specifying an ATN state and call stack to compute
|
||||
/// what token(s) can come next and specifically: outside of a lookahead decision.
|
||||
/// That is what you want for error reporting and recovery upon parse error.
|
||||
///
|
||||
/// @param stateNumber the ATN state number
|
||||
/// @param context the full parse context
|
||||
/// @return The set of potentially valid input symbols which could follow the
|
||||
/// specified state in the specified context.
|
||||
/// @throws IllegalArgumentException if the ATN does not contain a state with
|
||||
/// number [stateNumber]
|
||||
IntervalSet getExpectedTokens(int stateNumber, RuleContext context) {
|
||||
if (stateNumber < 0 || stateNumber >= states.length) {
|
||||
throw RangeError.index(stateNumber, states, 'stateNumber');
|
||||
}
|
||||
|
||||
var ctx = context;
|
||||
final s = states[stateNumber];
|
||||
var following = nextTokens(s);
|
||||
if (!following.contains(Token.EPSILON)) {
|
||||
return following;
|
||||
}
|
||||
|
||||
final expected = IntervalSet();
|
||||
expected.addAll(following);
|
||||
expected.remove(Token.EPSILON);
|
||||
while (ctx != null &&
|
||||
ctx.invokingState >= 0 &&
|
||||
following.contains(Token.EPSILON)) {
|
||||
final invokingState = states[ctx.invokingState];
|
||||
RuleTransition rt = invokingState.transition(0);
|
||||
following = nextTokens(rt.followState);
|
||||
expected.addAll(following);
|
||||
expected.remove(Token.EPSILON);
|
||||
ctx = ctx.parent;
|
||||
}
|
||||
|
||||
if (following.contains(Token.EPSILON)) {
|
||||
expected.addOne(Token.EOF);
|
||||
}
|
||||
|
||||
return expected;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
import '../../prediction_context.dart';
|
||||
import '../../recognizer.dart';
|
||||
import '../../util/murmur_hash.dart';
|
||||
import 'atn_state.dart';
|
||||
import 'lexer_action_executor.dart';
|
||||
import 'semantic_context.dart';
|
||||
|
||||
Map<String, dynamic> checkParams(params, isCfg) {
|
||||
if (params == null) {
|
||||
final result = <String, dynamic>{
|
||||
'state': null,
|
||||
'alt': null,
|
||||
'context': null,
|
||||
'semanticContext': null
|
||||
};
|
||||
if (isCfg) {
|
||||
result['reachesIntoOuterContext'] = 0;
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
final props = <String, dynamic>{};
|
||||
props['state'] = params.state;
|
||||
props['alt'] = (params.alt == null) ? null : params.alt;
|
||||
props['context'] = params.context;
|
||||
props['semanticContext'] = params.semanticContext;
|
||||
if (isCfg) {
|
||||
props['reachesIntoOuterContext'] = params.reachesIntoOuterContext ?? 0;
|
||||
props['precedenceFilterSuppressed'] =
|
||||
params.precedenceFilterSuppressed ?? false;
|
||||
}
|
||||
return props;
|
||||
}
|
||||
}
|
||||
|
||||
/// A tuple: (ATN state, predicted alt, syntactic, semantic context).
|
||||
/// The syntactic context is a graph-structured stack node whose
|
||||
/// path(s) to the root is the rule invocation(s)
|
||||
/// chain used to arrive at the state. The semantic context is
|
||||
/// the tree of semantic predicates encountered before reaching
|
||||
/// an ATN state.
|
||||
class ATNConfig {
|
||||
/// This field stores the bit mask for implementing the
|
||||
/// {@link #isPrecedenceFilterSuppressed} property as a bit within the
|
||||
/// existing {@link #reachesIntoOuterContext} field.
|
||||
static final int SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
|
||||
|
||||
/// The ATN state associated with this configuration */
|
||||
ATNState state;
|
||||
|
||||
/// What alt (or lexer rule) is predicted by this configuration */
|
||||
int alt;
|
||||
|
||||
/// The stack of invoking states leading to the rule/states associated
|
||||
/// with this config. We track only those contexts pushed during
|
||||
/// execution of the ATN simulator.
|
||||
PredictionContext context;
|
||||
|
||||
/// We cannot execute predicates dependent upon local context unless
|
||||
/// we know for sure we are in the correct context. Because there is
|
||||
/// no way to do this efficiently, we simply cannot evaluate
|
||||
/// dependent predicates unless we are in the rule that initially
|
||||
/// invokes the ATN simulator.
|
||||
///
|
||||
/// <p>
|
||||
/// closure() tracks the depth of how far we dip into the outer context:
|
||||
/// depth > 0. Note that it may not be totally accurate depth since I
|
||||
/// don't ever decrement. TODO: make it a bool then</p>
|
||||
///
|
||||
/// <p>
|
||||
/// For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
|
||||
/// is also backed by this field. Since the field is ly accessible, the
|
||||
/// highest bit which would not cause the value to become negative is used to
|
||||
/// store this field. This choice minimizes the risk that code which only
|
||||
/// compares this value to 0 would be affected by the new purpose of the
|
||||
/// flag. It also ensures the performance of the existing [ATNConfig]
|
||||
/// constructors as well as certain operations like
|
||||
/// {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are
|
||||
/// <em>completely</em> unaffected by the change.</p>
|
||||
int reachesIntoOuterContext = 0;
|
||||
|
||||
SemanticContext semanticContext;
|
||||
|
||||
ATNConfig(this.state, this.alt, this.context,
|
||||
[this.semanticContext = SemanticContext.NONE]);
|
||||
|
||||
ATNConfig.dup(ATNConfig c,
|
||||
{this.state, this.alt, this.context, this.semanticContext}) {
|
||||
state = state ?? c.state;
|
||||
alt = alt ?? c.alt;
|
||||
context = context ?? c.context;
|
||||
semanticContext = semanticContext ?? c.semanticContext;
|
||||
reachesIntoOuterContext =
|
||||
c.reachesIntoOuterContext ?? reachesIntoOuterContext;
|
||||
}
|
||||
|
||||
/// This method gets the value of the {@link #reachesIntoOuterContext} field
|
||||
/// as it existed prior to the introduction of the
|
||||
/// {@link #isPrecedenceFilterSuppressed} method.
|
||||
int get outerContextDepth {
|
||||
return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER;
|
||||
}
|
||||
|
||||
bool isPrecedenceFilterSuppressed() {
|
||||
return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0;
|
||||
}
|
||||
|
||||
void setPrecedenceFilterSuppressed(bool value) {
|
||||
if (value) {
|
||||
reachesIntoOuterContext |= 0x40000000;
|
||||
} else {
|
||||
reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER;
|
||||
}
|
||||
}
|
||||
|
||||
/// An ATN configuration is equal to another if both have
|
||||
/// the same state, they predict the same alternative, and
|
||||
/// syntactic/semantic contexts are the same.
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (other is ATNConfig && other != null) {
|
||||
return state.stateNumber == other.state.stateNumber &&
|
||||
alt == other.alt &&
|
||||
(context == other.context ||
|
||||
(context != null && context == other.context)) &&
|
||||
semanticContext == other.semanticContext &&
|
||||
isPrecedenceFilterSuppressed() ==
|
||||
other.isPrecedenceFilterSuppressed();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode {
|
||||
var hashCode = MurmurHash.initialize(7);
|
||||
hashCode = MurmurHash.update(hashCode, state.stateNumber);
|
||||
hashCode = MurmurHash.update(hashCode, alt);
|
||||
hashCode = MurmurHash.update(hashCode, context);
|
||||
hashCode = MurmurHash.update(hashCode, semanticContext);
|
||||
hashCode = MurmurHash.finish(hashCode, 4);
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@override
|
||||
String toString([Recognizer recog, bool showAlt = true]) {
|
||||
final buf = StringBuffer();
|
||||
// if ( state.ruleIndex>=0 ) {
|
||||
// if ( recog!=null ) buf.write(recog.ruleNames[state.ruleIndex]+":");
|
||||
// else buf.write(state.ruleIndex+":");
|
||||
// }
|
||||
buf.write('(');
|
||||
buf.write(state);
|
||||
if (showAlt) {
|
||||
buf.write(',');
|
||||
buf.write(alt);
|
||||
}
|
||||
if (context != null) {
|
||||
buf.write(',[');
|
||||
buf.write(context.toString());
|
||||
buf.write(']');
|
||||
}
|
||||
if (semanticContext != null && semanticContext != SemanticContext.NONE) {
|
||||
buf.write(',');
|
||||
buf.write(semanticContext);
|
||||
}
|
||||
if (outerContextDepth > 0) {
|
||||
buf.write(',up=');
|
||||
buf.write(outerContextDepth);
|
||||
}
|
||||
buf.write(')');
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
|
||||
class LexerATNConfig extends ATNConfig {
|
||||
/// Gets the [LexerActionExecutor] capable of executing the embedded
|
||||
/// action(s) for the current configuration.
|
||||
LexerActionExecutor lexerActionExecutor;
|
||||
|
||||
bool passedThroughNonGreedyDecision = false;
|
||||
|
||||
LexerATNConfig(ATNState state, int alt, PredictionContext context,
|
||||
[this.lexerActionExecutor])
|
||||
: super(state, alt, context, SemanticContext.NONE) {
|
||||
passedThroughNonGreedyDecision = false;
|
||||
}
|
||||
|
||||
LexerATNConfig.dup(LexerATNConfig c, ATNState state,
|
||||
{this.lexerActionExecutor, PredictionContext context})
|
||||
: super.dup(c, state: state, context: context) {
|
||||
lexerActionExecutor = lexerActionExecutor ?? c.lexerActionExecutor;
|
||||
passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state);
|
||||
}
|
||||
|
||||
bool hasPassedThroughNonGreedyDecision() {
|
||||
return passedThroughNonGreedyDecision;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode {
|
||||
var hashCode = MurmurHash.initialize(7);
|
||||
hashCode = MurmurHash.update(hashCode, state.stateNumber);
|
||||
hashCode = MurmurHash.update(hashCode, alt);
|
||||
hashCode = MurmurHash.update(hashCode, context);
|
||||
hashCode = MurmurHash.update(hashCode, semanticContext);
|
||||
hashCode =
|
||||
MurmurHash.update(hashCode, passedThroughNonGreedyDecision ? 1 : 0);
|
||||
hashCode = MurmurHash.update(hashCode, lexerActionExecutor);
|
||||
hashCode = MurmurHash.finish(hashCode, 6);
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (identical(this, other)) {
|
||||
return true;
|
||||
} else if (other is LexerATNConfig) {
|
||||
final lexerOther = other;
|
||||
if (passedThroughNonGreedyDecision !=
|
||||
lexerOther.passedThroughNonGreedyDecision) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lexerActionExecutor != lexerOther.lexerActionExecutor) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return super == other;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool checkNonGreedyDecision(LexerATNConfig source, ATNState target) {
|
||||
return source.passedThroughNonGreedyDecision ||
|
||||
target is DecisionState && target.nonGreedy;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
import 'dart:collection';
|
||||
import 'dart:math';
|
||||
|
||||
import 'package:collection/collection.dart';
|
||||
|
||||
import '../../misc/pair.dart';
|
||||
import '../../prediction_context.dart';
|
||||
import '../../util/bit_set.dart';
|
||||
import '../../util/utils.dart';
|
||||
import 'atn.dart';
|
||||
import 'atn_config.dart';
|
||||
import 'atn_state.dart';
|
||||
import 'semantic_context.dart';
|
||||
|
||||
class ATNConfigSet extends Iterable<ATNConfig> {
|
||||
/// Indicates that the set of configurations is read-only. Do not
|
||||
/// allow any code to manipulate the set; DFA states will point at
|
||||
/// the sets and they must not change. This does not protect the other
|
||||
/// fields; in particular, conflictingAlts is set after
|
||||
/// we've made this readonly.
|
||||
bool _readOnly = false;
|
||||
|
||||
bool get readOnly => _readOnly;
|
||||
|
||||
set readOnly(bool readOnly) {
|
||||
_readOnly = readOnly;
|
||||
if (readOnly) {
|
||||
configLookup = null; // can't mod, no need for lookup cache
|
||||
}
|
||||
}
|
||||
|
||||
/// The reason that we need this is because we don't want the hash map to use
|
||||
/// the standard hash code and equals. We need all configurations with the same
|
||||
/// {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
|
||||
/// the number of objects associated with ATNConfigs. The other solution is to
|
||||
/// use a hash table that lets us specify the equals/hashcode operation.
|
||||
///
|
||||
/// All configs but hashed by (s, i, _, pi) not including context. Wiped out
|
||||
/// when we go readonly as this set becomes a DFA state.
|
||||
Set<ATNConfig> configLookup = HashSet<ATNConfig>(equals: (a, b) {
|
||||
if (a == null || b == null) return false;
|
||||
return a.state.stateNumber == b.state.stateNumber &&
|
||||
a.alt == b.alt &&
|
||||
a.semanticContext == b.semanticContext;
|
||||
}, hashCode: (ATNConfig o) {
|
||||
var hashCode = 7;
|
||||
hashCode = 31 * hashCode + o.state.stateNumber;
|
||||
hashCode = 31 * hashCode + o.alt;
|
||||
hashCode = 31 * hashCode + o.semanticContext.hashCode;
|
||||
return hashCode;
|
||||
});
|
||||
|
||||
/// Track the elements as they are added to the set; supports get(i) */
|
||||
final List<ATNConfig> configs = [];
|
||||
|
||||
// TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
|
||||
// TODO: can we track conflicts as they are added to save scanning configs later?
|
||||
int uniqueAlt = 0;
|
||||
|
||||
/// Currently this is only used when we detect SLL conflict; this does
|
||||
/// not necessarily represent the ambiguous alternatives. In fact,
|
||||
/// I should also point out that this seems to include predicated alternatives
|
||||
/// that have predicates that evaluate to false. Computed in computeTargetState().
|
||||
BitSet conflictingAlts;
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from this.
|
||||
bool hasSemanticContext = false;
|
||||
bool dipsIntoOuterContext = false;
|
||||
|
||||
/// Indicates that this configuration set is part of a full context
|
||||
/// LL prediction. It will be used to determine how to merge $. With SLL
|
||||
/// it's a wildcard whereas it is not for LL context merge.
|
||||
bool fullCtx;
|
||||
|
||||
int cachedHashCode = -1;
|
||||
|
||||
ATNConfigSet([this.fullCtx = true]);
|
||||
|
||||
ATNConfigSet.dup(ATNConfigSet old) {
|
||||
fullCtx = old.fullCtx;
|
||||
addAll(old);
|
||||
uniqueAlt = old.uniqueAlt;
|
||||
conflictingAlts = old.conflictingAlts;
|
||||
hasSemanticContext = old.hasSemanticContext;
|
||||
dipsIntoOuterContext = old.dipsIntoOuterContext;
|
||||
}
|
||||
|
||||
/// Adding a new config means merging contexts with existing configs for
|
||||
/// {@code (s, i, pi, _)}, where [s] is the
|
||||
/// {@link ATNConfig#state}, [i] is the {@link ATNConfig#alt}, and
|
||||
/// [pi] is the {@link ATNConfig#semanticContext}. We use
|
||||
/// {@code (s,i,pi)} as key.
|
||||
///
|
||||
/// <p>This method updates {@link #dipsIntoOuterContext} and
|
||||
/// {@link #hasSemanticContext} when necessary.</p>
|
||||
bool add(ATNConfig config,
|
||||
[Map<Pair<PredictionContext, PredictionContext>, PredictionContext>
|
||||
mergeCache]) {
|
||||
if (readOnly) throw StateError('This set is readonly');
|
||||
if (config.semanticContext != SemanticContext.NONE) {
|
||||
hasSemanticContext = true;
|
||||
}
|
||||
if (config.outerContextDepth > 0) {
|
||||
dipsIntoOuterContext = true;
|
||||
}
|
||||
final existing = configLookup.lookup(config) ?? config;
|
||||
if (identical(existing, config)) {
|
||||
// we added this new one
|
||||
cachedHashCode = -1;
|
||||
configLookup.add(config);
|
||||
configs.add(config); // track order here
|
||||
return true;
|
||||
}
|
||||
// a previous (s,i,pi,_), merge with it and save result
|
||||
final rootIsWildcard = !fullCtx;
|
||||
final merged = PredictionContext.merge(
|
||||
existing.context, config.context, rootIsWildcard, mergeCache);
|
||||
// no need to check for existing.context, config.context in cache
|
||||
// since only way to create new graphs is "call rule" and here. We
|
||||
// cache at both places.
|
||||
existing.reachesIntoOuterContext =
|
||||
max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext);
|
||||
|
||||
// make sure to preserve the precedence filter suppression during the merge
|
||||
if (config.isPrecedenceFilterSuppressed()) {
|
||||
existing.setPrecedenceFilterSuppressed(true);
|
||||
}
|
||||
|
||||
existing.context = merged; // replace context; no need to alt mapping
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Return a List holding list of configs */
|
||||
List<ATNConfig> get elements {
|
||||
return configs;
|
||||
}
|
||||
|
||||
Set<ATNState> get states {
|
||||
final states = <ATNState>{};
|
||||
for (var i = 0; i < configs.length; i++) {
|
||||
states.add(configs[i].state);
|
||||
}
|
||||
return states;
|
||||
}
|
||||
|
||||
/// Gets the complete set of represented alternatives for the configuration
|
||||
/// set.
|
||||
///
|
||||
/// @return the set of represented alternatives in this configuration set
|
||||
///
|
||||
/// @since 4.3
|
||||
BitSet get alts {
|
||||
final alts = BitSet();
|
||||
for (var config in configs) {
|
||||
alts.set(config.alt);
|
||||
}
|
||||
return alts;
|
||||
}
|
||||
|
||||
List<SemanticContext> get predicates {
|
||||
final preds = <SemanticContext>[];
|
||||
for (var c in configs) {
|
||||
if (c.semanticContext != SemanticContext.NONE) {
|
||||
preds.add(c.semanticContext);
|
||||
}
|
||||
}
|
||||
return preds;
|
||||
}
|
||||
|
||||
ATNConfig get(int i) {
|
||||
return configs[i];
|
||||
}
|
||||
|
||||
void optimizeConfigs(interpreter) {
|
||||
if (readOnly) throw StateError('This set is readonly');
|
||||
|
||||
if (configLookup.isEmpty) return;
|
||||
|
||||
for (var config in configs) {
|
||||
// int before = PredictionContext.getAllContextNodes(config.context).length;
|
||||
config.context = interpreter.getCachedContext(config.context);
|
||||
// int after = PredictionContext.getAllContextNodes(config.context).length;
|
||||
// System.out.println("configs "+before+"->"+after);
|
||||
}
|
||||
}
|
||||
|
||||
bool addAll(coll) {
|
||||
for (ATNConfig c in coll) {
|
||||
add(c);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
bool operator ==(other) {
|
||||
return identical(this, other) ||
|
||||
(other is ATNConfigSet &&
|
||||
other != null &&
|
||||
ListEquality().equals(configs, other.configs) &&
|
||||
fullCtx == other.fullCtx &&
|
||||
uniqueAlt == other.uniqueAlt &&
|
||||
conflictingAlts == other.conflictingAlts &&
|
||||
hasSemanticContext == other.hasSemanticContext &&
|
||||
dipsIntoOuterContext == other.dipsIntoOuterContext);
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode {
|
||||
if (readOnly) {
|
||||
if (cachedHashCode == -1) {
|
||||
cachedHashCode = ListEquality().hash(configs);
|
||||
}
|
||||
|
||||
return cachedHashCode;
|
||||
}
|
||||
|
||||
return ListEquality().hash(configs);
|
||||
}
|
||||
|
||||
@override
|
||||
int get length {
|
||||
return configs.length;
|
||||
}
|
||||
|
||||
@override
|
||||
bool get isEmpty => configs.isEmpty;
|
||||
|
||||
void updateHashCode(hash) {
|
||||
if (readOnly) {
|
||||
if (cachedHashCode == -1) {
|
||||
cachedHashCode = hashCode;
|
||||
}
|
||||
hash.update(cachedHashCode);
|
||||
} else {
|
||||
hash.update(hashCode);
|
||||
}
|
||||
}
|
||||
|
||||
@override
|
||||
bool contains(Object o) {
|
||||
if (configLookup == null) {
|
||||
throw UnsupportedError(
|
||||
'This method is not implemented for readonly sets.');
|
||||
}
|
||||
|
||||
return configLookup.contains(o);
|
||||
}
|
||||
|
||||
@override
|
||||
Iterator<ATNConfig> get iterator => configs.iterator;
|
||||
|
||||
void clear() {
|
||||
if (readOnly) throw StateError('This set is readonly');
|
||||
configs.clear();
|
||||
cachedHashCode = -1;
|
||||
configLookup.clear();
|
||||
}
|
||||
|
||||
@override
|
||||
String toString() {
|
||||
final buf = StringBuffer();
|
||||
buf.write(arrayToString(elements));
|
||||
if (hasSemanticContext) {
|
||||
buf.write(',hasSemanticContext=$hasSemanticContext');
|
||||
}
|
||||
if (uniqueAlt != ATN.INVALID_ALT_NUMBER) buf.write(',uniqueAlt=$uniqueAlt');
|
||||
if (conflictingAlts != null) buf.write(',conflictingAlts=$conflictingAlts');
|
||||
if (dipsIntoOuterContext) buf.write(',dipsIntoOuterContext');
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
|
||||
class OrderedATNConfigSet extends ATNConfigSet {
|
||||
@override
|
||||
final configLookup = <ATNConfig>{};
|
||||
}
|
|
@ -0,0 +1,809 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
import '../../interval_set.dart';
|
||||
import '../../misc/pair.dart';
|
||||
import '../../token.dart';
|
||||
import 'atn.dart';
|
||||
import 'atn_state.dart';
|
||||
import 'atn_type.dart';
|
||||
import 'lexer_action.dart';
|
||||
import 'transition.dart';
|
||||
|
||||
class ATNDeserializationOptions {
|
||||
static final ATNDeserializationOptions defaultOptions =
|
||||
ATNDeserializationOptions()..makeReadOnly();
|
||||
|
||||
bool readOnly;
|
||||
bool verifyATN;
|
||||
bool generateRuleBypassTransitions;
|
||||
|
||||
ATNDeserializationOptions([ATNDeserializationOptions options]) {
|
||||
if (options == null) {
|
||||
verifyATN = true;
|
||||
generateRuleBypassTransitions = false;
|
||||
} else {
|
||||
verifyATN = options.verifyATN;
|
||||
generateRuleBypassTransitions =
|
||||
options.generateRuleBypassTransitions;
|
||||
}
|
||||
}
|
||||
|
||||
bool isReadOnly() {
|
||||
return readOnly;
|
||||
}
|
||||
|
||||
void makeReadOnly() {
|
||||
readOnly = true;
|
||||
}
|
||||
|
||||
bool isVerifyATN() {
|
||||
return verifyATN;
|
||||
}
|
||||
|
||||
void setVerifyATN(bool verifyATN) {
|
||||
throwIfReadOnly();
|
||||
this.verifyATN = verifyATN;
|
||||
}
|
||||
|
||||
bool isGenerateRuleBypassTransitions() {
|
||||
return generateRuleBypassTransitions;
|
||||
}
|
||||
|
||||
void setGenerateRuleBypassTransitions(bool generateRuleBypassTransitions) {
|
||||
throwIfReadOnly();
|
||||
this.generateRuleBypassTransitions = generateRuleBypassTransitions;
|
||||
}
|
||||
|
||||
void throwIfReadOnly() {
|
||||
if (isReadOnly()) {
|
||||
throw StateError('The object is read only.');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ATNDeserializer {
|
||||
/// This value should never change. Updates following this version are
|
||||
/// reflected as change in the unique ID SERIALIZED_UUID.
|
||||
static final SERIALIZED_VERSION = 3;
|
||||
|
||||
/** WARNING: DO NOT MERGE THESE LINES. If UUIDs differ during a merge,
|
||||
* resolve the conflict by generating a new ID!
|
||||
*/
|
||||
/// This is the earliest supported serialized UUID.
|
||||
static final BASE_SERIALIZED_UUID = '33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3';
|
||||
|
||||
/// This UUID indicates an extension of {@link BASE_SERIALIZED_UUID} for the
|
||||
/// addition of precedence predicates.
|
||||
static final ADDED_PRECEDENCE_TRANSITIONS =
|
||||
'1DA0C57D-6C06-438A-9B27-10BCB3CE0F61';
|
||||
|
||||
/// This UUID indicates an extension of {@link #ADDED_PRECEDENCE_TRANSITIONS}
|
||||
/// for the addition of lexer actions encoded as a sequence of
|
||||
/// [LexerAction] instances.
|
||||
static final ADDED_LEXER_ACTIONS = 'AADB8D7E-AEEF-4415-AD2B-8204D6CF042E';
|
||||
|
||||
/// This UUID indicates the serialized ATN contains two sets of
|
||||
/// IntervalSets, where the second set's values are encoded as
|
||||
/// 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
|
||||
static final ADDED_UNICODE_SMP = '59627784-3BE5-417A-B9EB-8131A7286089';
|
||||
|
||||
/// This list contains all of the currently supported UUIDs, ordered by when
|
||||
/// the feature first appeared in this branch.
|
||||
static final SUPPORTED_UUIDS = [
|
||||
BASE_SERIALIZED_UUID,
|
||||
ADDED_PRECEDENCE_TRANSITIONS,
|
||||
ADDED_LEXER_ACTIONS,
|
||||
ADDED_UNICODE_SMP
|
||||
];
|
||||
|
||||
/// This is the current serialized UUID.
|
||||
static final SERIALIZED_UUID = ADDED_UNICODE_SMP;
|
||||
|
||||
ATNDeserializationOptions deserializationOptions;
|
||||
List<int> data;
|
||||
var pos;
|
||||
String uuid;
|
||||
|
||||
ATNDeserializer([options]) {
|
||||
deserializationOptions =
|
||||
options ?? ATNDeserializationOptions.defaultOptions;
|
||||
}
|
||||
|
||||
/// Determines if a particular serialized representation of an ATN supports
|
||||
/// a particular feature, identified by the [UUID] used for serializing
|
||||
/// the ATN at the time the feature was first introduced.
|
||||
///
|
||||
/// @param feature The [UUID] marking the first time the feature was
|
||||
/// supported in the serialized ATN.
|
||||
/// @param actualUuid The [UUID] of the actual serialized ATN which is
|
||||
/// currently being deserialized.
|
||||
/// @return [true] if the [actualUuid] value represents a
|
||||
/// serialized ATN at or after the feature identified by [feature] was
|
||||
/// introduced; otherwise, [false].
|
||||
bool isFeatureSupported(feature, actualUuid) {
|
||||
final idx1 = SUPPORTED_UUIDS.indexOf(feature);
|
||||
if (idx1 < 0) {
|
||||
return false;
|
||||
}
|
||||
final idx2 = SUPPORTED_UUIDS.indexOf(actualUuid);
|
||||
return idx2 >= idx1;
|
||||
}
|
||||
|
||||
ATN deserialize(List<int> data) {
|
||||
reset(data);
|
||||
checkVersion();
|
||||
checkUUID();
|
||||
final atn = readATN();
|
||||
readStates(atn);
|
||||
readRules(atn);
|
||||
readModes(atn);
|
||||
final sets = <IntervalSet>[];
|
||||
// First, deserialize sets with 16-bit arguments <= U+FFFF.
|
||||
readSets(atn, sets, () => readInt());
|
||||
// Next, if the ATN was serialized with the Unicode SMP feature,
|
||||
// deserialize sets with 32-bit arguments <= U+10FFFF.
|
||||
if (isFeatureSupported(ADDED_UNICODE_SMP, uuid)) {
|
||||
readSets(atn, sets, () => readInt32());
|
||||
}
|
||||
readEdges(atn, sets);
|
||||
readDecisions(atn);
|
||||
readLexerActions(atn);
|
||||
markPrecedenceDecisions(atn);
|
||||
verifyATN(atn);
|
||||
if (deserializationOptions.generateRuleBypassTransitions &&
|
||||
atn.grammarType == ATNType.PARSER) {
|
||||
generateRuleBypassTransitions(atn);
|
||||
// re-verify after modification
|
||||
verifyATN(atn);
|
||||
}
|
||||
return atn;
|
||||
}
|
||||
|
||||
/// Each char value in data is shifted by +2 at the entry to this method.
|
||||
/// This is an encoding optimization targeting the serialized values 0
|
||||
/// and -1 (serialized to 0xFFFF), each of which are very common in the
|
||||
/// serialized form of the ATN. In the modified UTF-8 that Java uses for
|
||||
/// compiled string literals, these two character values have multi-byte
|
||||
/// forms. By shifting each value by +2, they become characters 2 and 1
|
||||
/// prior to writing the string, each of which have single-byte
|
||||
/// representations. Since the shift occurs in the tool during ATN
|
||||
/// serialization, each target is responsible for adjusting the values
|
||||
/// during deserialization.
|
||||
///
|
||||
/// As a special case, note that the first element of data is not
|
||||
/// adjusted because it contains the major version number of the
|
||||
/// serialized ATN, which was fixed at 3 at the time the value shifting
|
||||
/// was implemented.
|
||||
void reset(List<int> data) {
|
||||
final adjust = (int c) {
|
||||
final v = c;
|
||||
return v > 1 ? v - 2 : v + 65534;
|
||||
};
|
||||
final temp = data.map(adjust).toList();
|
||||
// don't adjust the first value since that's the version number
|
||||
temp[0] = data[0];
|
||||
this.data = temp;
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
void checkVersion() {
|
||||
final version = readInt();
|
||||
if (version != SERIALIZED_VERSION) {
|
||||
throw ('Could not deserialize ATN with version $version (expected $SERIALIZED_VERSION).');
|
||||
}
|
||||
}
|
||||
|
||||
void checkUUID() {
|
||||
final uuid = readUUID();
|
||||
if (!SUPPORTED_UUIDS.contains(uuid)) {
|
||||
throw ('Could not deserialize ATN with UUID: $uuid (expected $SERIALIZED_UUID or a legacy UUID).');
|
||||
}
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
ATN readATN() {
|
||||
final grammarType = readInt();
|
||||
final maxTokenType = readInt();
|
||||
return ATN(ATNType.values[grammarType], maxTokenType);
|
||||
}
|
||||
|
||||
void readStates(ATN atn) {
|
||||
final loopBackStateNumbers = <Pair<LoopEndState, int>>[];
|
||||
final endStateNumbers = <Pair<BlockStartState, int>>[];
|
||||
final nstates = readInt();
|
||||
for (var i = 0; i < nstates; i++) {
|
||||
final stype = StateType.values[readInt()];
|
||||
// ignore bad type of states
|
||||
if (stype == StateType.INVALID_TYPE) {
|
||||
atn.addState(null);
|
||||
continue;
|
||||
}
|
||||
|
||||
var ruleIndex = readInt();
|
||||
if (ruleIndex == 0xFFFF) {
|
||||
ruleIndex = -1;
|
||||
}
|
||||
|
||||
final s = stateFactory(stype, ruleIndex);
|
||||
if (s is LoopEndState) {
|
||||
// special case
|
||||
final loopBackStateNumber = readInt();
|
||||
loopBackStateNumbers.add(Pair(s, loopBackStateNumber));
|
||||
} else if (s is BlockStartState) {
|
||||
final endStateNumber = readInt();
|
||||
endStateNumbers.add(Pair(s, endStateNumber));
|
||||
}
|
||||
atn.addState(s);
|
||||
}
|
||||
|
||||
// delay the assignment of loop back and end states until we know all the state instances have been initialized
|
||||
for (final pair in loopBackStateNumbers) {
|
||||
pair.a.loopBackState = atn.states[pair.b];
|
||||
}
|
||||
|
||||
for (final pair in endStateNumbers) {
|
||||
pair.a.endState = atn.states[pair.b] as BlockEndState;
|
||||
}
|
||||
|
||||
final numNonGreedyStates = readInt();
|
||||
for (var i = 0; i < numNonGreedyStates; i++) {
|
||||
final stateNumber = readInt();
|
||||
(atn.states[stateNumber] as DecisionState).nonGreedy = true;
|
||||
}
|
||||
if (isFeatureSupported(ADDED_PRECEDENCE_TRANSITIONS, uuid)) {
|
||||
final numPrecedenceStates = readInt();
|
||||
for (var i = 0; i < numPrecedenceStates; i++) {
|
||||
final stateNumber = readInt();
|
||||
(atn.states[stateNumber] as RuleStartState).isLeftRecursiveRule = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void readRules(ATN atn) {
|
||||
final nrules = readInt();
|
||||
if (atn.grammarType == ATNType.LEXER) {
|
||||
atn.ruleToTokenType = List<int>(nrules);
|
||||
}
|
||||
|
||||
atn.ruleToStartState = List<RuleStartState>(nrules);
|
||||
for (var i = 0; i < nrules; i++) {
|
||||
final s = readInt();
|
||||
RuleStartState startState = atn.states[s];
|
||||
atn.ruleToStartState[i] = startState;
|
||||
if (atn.grammarType == ATNType.LEXER) {
|
||||
var tokenType = readInt();
|
||||
if (tokenType == 0xFFFF) {
|
||||
tokenType = Token.EOF;
|
||||
}
|
||||
|
||||
atn.ruleToTokenType[i] = tokenType;
|
||||
|
||||
if (!isFeatureSupported(ADDED_LEXER_ACTIONS, uuid)) {
|
||||
// this piece of unused metadata was serialized prior to the
|
||||
// addition of LexerAction
|
||||
final actionIndexIgnored = readInt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atn.ruleToStopState = List<RuleStopState>(nrules);
|
||||
for (var state in atn.states) {
|
||||
if (!(state is RuleStopState)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
RuleStopState stopState = state;
|
||||
atn.ruleToStopState[state.ruleIndex] = stopState;
|
||||
atn.ruleToStartState[state.ruleIndex].stopState = stopState;
|
||||
}
|
||||
}
|
||||
|
||||
void readModes(ATN atn) {
|
||||
final nmodes = readInt();
|
||||
for (var i = 0; i < nmodes; i++) {
|
||||
final s = readInt();
|
||||
atn.modeToStartState.add(atn.states[s] as TokensStartState);
|
||||
}
|
||||
}
|
||||
|
||||
void readSets(ATN atn, List<IntervalSet> sets, readUnicode) {
|
||||
final nsets = readInt();
|
||||
for (var i = 0; i < nsets; i++) {
|
||||
final nintervals = readInt();
|
||||
final set = IntervalSet();
|
||||
sets.add(set);
|
||||
|
||||
final containsEof = readInt() != 0;
|
||||
if (containsEof) {
|
||||
set.addOne(-1);
|
||||
}
|
||||
|
||||
for (var j = 0; j < nintervals; j++) {
|
||||
int a = readUnicode();
|
||||
int b = readUnicode();
|
||||
set.addRange(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void readEdges(ATN atn, sets) {
|
||||
final nedges = readInt();
|
||||
for (var i = 0; i < nedges; i++) {
|
||||
final src = readInt();
|
||||
final trg = readInt();
|
||||
final ttype = TransitionType.values[readInt()];
|
||||
final arg1 = readInt();
|
||||
final arg2 = readInt();
|
||||
final arg3 = readInt();
|
||||
final trans =
|
||||
edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets);
|
||||
// System.out.println("EDGE "+trans.getClass().getSimpleName()+" "+
|
||||
// src+"->"+trg+
|
||||
// " "+Transition.serializationNames[ttype]+
|
||||
// " "+arg1+","+arg2+","+arg3);
|
||||
final srcState = atn.states[src];
|
||||
srcState.addTransition(trans);
|
||||
}
|
||||
|
||||
// edges for rule stop states can be derived, so they aren't serialized
|
||||
for (var state in atn.states) {
|
||||
for (var i = 0; i < state.numberOfTransitions; i++) {
|
||||
final t = state.transition(i);
|
||||
if (t is RuleTransition) {
|
||||
final ruleTransition = t;
|
||||
var outermostPrecedenceReturn = -1;
|
||||
if (atn.ruleToStartState[ruleTransition.target.ruleIndex]
|
||||
.isLeftRecursiveRule) {
|
||||
if (ruleTransition.precedence == 0) {
|
||||
outermostPrecedenceReturn = ruleTransition.target.ruleIndex;
|
||||
}
|
||||
}
|
||||
|
||||
final returnTransition = EpsilonTransition(
|
||||
ruleTransition.followState, outermostPrecedenceReturn);
|
||||
atn.ruleToStopState[ruleTransition.target.ruleIndex]
|
||||
.addTransition(returnTransition);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (var state in atn.states) {
|
||||
if (state is BlockStartState) {
|
||||
// we need to know the end state to set its start state
|
||||
if (state.endState == null) {
|
||||
throw StateError('');
|
||||
}
|
||||
|
||||
// block end states can only be associated to a single block start state
|
||||
if (state.endState.startState != null) {
|
||||
throw StateError('');
|
||||
}
|
||||
|
||||
state.endState.startState = state;
|
||||
}
|
||||
|
||||
if (state is PlusLoopbackState) {
|
||||
final loopbackState = state;
|
||||
for (var i = 0; i < loopbackState.numberOfTransitions; i++) {
|
||||
final target = loopbackState.transition(i).target;
|
||||
if (target is PlusBlockStartState) {
|
||||
target.loopBackState = loopbackState;
|
||||
}
|
||||
}
|
||||
} else if (state is StarLoopbackState) {
|
||||
final loopbackState = state;
|
||||
for (var i = 0; i < loopbackState.numberOfTransitions; i++) {
|
||||
final target = loopbackState.transition(i).target;
|
||||
if (target is StarLoopEntryState) {
|
||||
target.loopBackState = loopbackState;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void readDecisions(ATN atn) {
|
||||
final ndecisions = readInt();
|
||||
for (var i = 1; i <= ndecisions; i++) {
|
||||
final s = readInt();
|
||||
DecisionState decState = atn.states[s];
|
||||
atn.decisionToState.add(decState);
|
||||
decState.decision = i - 1;
|
||||
}
|
||||
}
|
||||
|
||||
void readLexerActions(ATN atn) {
|
||||
if (atn.grammarType == ATNType.LEXER) {
|
||||
if (isFeatureSupported(ADDED_LEXER_ACTIONS, uuid)) {
|
||||
atn.lexerActions = List<LexerAction>(readInt());
|
||||
for (var i = 0; i < atn.lexerActions.length; i++) {
|
||||
final actionType = LexerActionType.values[readInt()];
|
||||
var data1 = readInt();
|
||||
if (data1 == 0xFFFF) {
|
||||
data1 = -1;
|
||||
}
|
||||
|
||||
var data2 = readInt();
|
||||
if (data2 == 0xFFFF) {
|
||||
data2 = -1;
|
||||
}
|
||||
final lexerAction =
|
||||
lexerActionFactory(actionType, data1, data2);
|
||||
|
||||
atn.lexerActions[i] = lexerAction;
|
||||
}
|
||||
} else {
|
||||
// for compatibility with older serialized ATNs, convert the old
|
||||
// serialized action index for action transitions to the new
|
||||
// form, which is the index of a LexerCustomAction
|
||||
final legacyLexerActions = <LexerAction>[];
|
||||
for (var state in atn.states) {
|
||||
for (var i = 0; i < state.numberOfTransitions; i++) {
|
||||
final transition = state.transition(i);
|
||||
if (transition is ActionTransition) {
|
||||
final ruleIndex = transition.ruleIndex;
|
||||
final actionIndex = transition.actionIndex;
|
||||
final lexerAction =
|
||||
LexerCustomAction(ruleIndex, actionIndex);
|
||||
state.setTransition(
|
||||
i,
|
||||
ActionTransition(transition.target, ruleIndex,
|
||||
legacyLexerActions.length, false));
|
||||
legacyLexerActions.add(lexerAction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atn.lexerActions = legacyLexerActions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void generateRuleBypassTransitions(ATN atn) {
|
||||
for (var i = 0; i < atn.ruleToStartState.length; i++) {
|
||||
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1;
|
||||
}
|
||||
for (var i = 0; i < atn.ruleToStartState.length; i++) {
|
||||
generateRuleBypassTransition(atn, i);
|
||||
}
|
||||
}
|
||||
|
||||
void generateRuleBypassTransition(ATN atn, int idx) {
|
||||
final bypassStart = BasicBlockStartState();
|
||||
bypassStart.ruleIndex = idx;
|
||||
atn.addState(bypassStart);
|
||||
|
||||
final bypassStop = BlockEndState();
|
||||
bypassStop.ruleIndex = idx;
|
||||
atn.addState(bypassStop);
|
||||
|
||||
bypassStart.endState = bypassStop;
|
||||
atn.defineDecisionState(bypassStart);
|
||||
|
||||
bypassStop.startState = bypassStart;
|
||||
|
||||
ATNState endState;
|
||||
Transition excludeTransition;
|
||||
if (atn.ruleToStartState[idx].isLeftRecursiveRule) {
|
||||
// wrap from the beginning of the rule to the StarLoopEntryState
|
||||
endState = null;
|
||||
for (var state in atn.states) {
|
||||
if (state.ruleIndex != idx) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(state is StarLoopEntryState)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
final maybeLoopEndState =
|
||||
state.transition(state.numberOfTransitions - 1).target;
|
||||
if (!(maybeLoopEndState is LoopEndState)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (maybeLoopEndState.epsilonOnlyTransitions &&
|
||||
maybeLoopEndState.transition(0).target is RuleStopState) {
|
||||
endState = state;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (endState == null) {
|
||||
throw UnsupportedError(
|
||||
"Couldn't identify final state of the precedence rule prefix section.");
|
||||
}
|
||||
|
||||
excludeTransition =
|
||||
(endState as StarLoopEntryState).loopBackState.transition(0);
|
||||
} else {
|
||||
endState = atn.ruleToStopState[idx];
|
||||
}
|
||||
|
||||
// all non-excluded transitions that currently target end state need to target blockEnd instead
|
||||
for (var state in atn.states) {
|
||||
for (var transition in state.transitions) {
|
||||
if (transition == excludeTransition) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (transition.target == endState) {
|
||||
transition.target = bypassStop;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// all transitions leaving the rule start state need to leave blockStart instead
|
||||
while (atn.ruleToStartState[idx].numberOfTransitions > 0) {
|
||||
final transition = atn.ruleToStartState[idx].removeTransition(
|
||||
atn.ruleToStartState[idx].numberOfTransitions - 1);
|
||||
bypassStart.addTransition(transition);
|
||||
}
|
||||
|
||||
// link the new states
|
||||
atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart));
|
||||
bypassStop.addTransition(EpsilonTransition(endState));
|
||||
|
||||
ATNState matchState = BasicState();
|
||||
atn.addState(matchState);
|
||||
matchState.addTransition(
|
||||
AtomTransition(bypassStop, atn.ruleToTokenType[idx]));
|
||||
bypassStart.addTransition(EpsilonTransition(matchState));
|
||||
}
|
||||
|
||||
/// Analyze the [StarLoopEntryState] states in the specified ATN to set
|
||||
/// the {@link StarLoopEntryState#isPrecedenceDecision} field to the
|
||||
/// correct value.
|
||||
///
|
||||
/// @param atn The ATN.
|
||||
void markPrecedenceDecisions(ATN atn) {
|
||||
for (var state in atn.states) {
|
||||
if (state is StarLoopEntryState) {
|
||||
/* We analyze the ATN to determine if this ATN decision state is the
|
||||
* decision for the closure block that determines whether a
|
||||
* precedence rule should continue or complete.
|
||||
*/
|
||||
if (atn.ruleToStartState[state.ruleIndex].isLeftRecursiveRule) {
|
||||
final maybeLoopEndState =
|
||||
state.transition(state.numberOfTransitions - 1).target;
|
||||
if (maybeLoopEndState is LoopEndState) {
|
||||
if (maybeLoopEndState.epsilonOnlyTransitions &&
|
||||
maybeLoopEndState.transition(0).target is RuleStopState) {
|
||||
state.isPrecedenceDecision = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void verifyATN(ATN atn) {
|
||||
// verify assumptions
|
||||
for (var state in atn.states) {
|
||||
if (state == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
checkCondition(state.onlyHasEpsilonTransitions() ||
|
||||
state.numberOfTransitions <= 1);
|
||||
|
||||
if (state is PlusBlockStartState) {
|
||||
checkCondition(state.loopBackState != null);
|
||||
}
|
||||
|
||||
if (state is StarLoopEntryState) {
|
||||
final starLoopEntryState = state;
|
||||
checkCondition(starLoopEntryState.loopBackState != null);
|
||||
checkCondition(starLoopEntryState.numberOfTransitions == 2);
|
||||
|
||||
if (starLoopEntryState.transition(0).target is StarBlockStartState) {
|
||||
checkCondition(
|
||||
starLoopEntryState.transition(1).target is LoopEndState);
|
||||
checkCondition(!starLoopEntryState.nonGreedy);
|
||||
} else if (starLoopEntryState.transition(0).target is LoopEndState) {
|
||||
checkCondition(
|
||||
starLoopEntryState.transition(1).target is StarBlockStartState);
|
||||
checkCondition(starLoopEntryState.nonGreedy);
|
||||
} else {
|
||||
throw StateError('');
|
||||
}
|
||||
}
|
||||
|
||||
if (state is StarLoopbackState) {
|
||||
checkCondition(state.numberOfTransitions == 1);
|
||||
checkCondition(state.transition(0).target is StarLoopEntryState);
|
||||
}
|
||||
|
||||
if (state is LoopEndState) {
|
||||
checkCondition(state.loopBackState != null);
|
||||
}
|
||||
|
||||
if (state is RuleStartState) {
|
||||
checkCondition(state.stopState != null);
|
||||
}
|
||||
|
||||
if (state is BlockStartState) {
|
||||
checkCondition(state.endState != null);
|
||||
}
|
||||
|
||||
if (state is BlockEndState) {
|
||||
checkCondition(state.startState != null);
|
||||
}
|
||||
|
||||
if (state is DecisionState) {
|
||||
final decisionState = state;
|
||||
checkCondition(decisionState.numberOfTransitions <= 1 ||
|
||||
decisionState.decision >= 0);
|
||||
} else {
|
||||
checkCondition(
|
||||
state.numberOfTransitions <= 1 || state is RuleStopState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void checkCondition(bool condition, [String message = '']) {
|
||||
if (!condition) {
|
||||
throw StateError(message);
|
||||
}
|
||||
}
|
||||
|
||||
int readInt() {
|
||||
return data[pos++];
|
||||
}
|
||||
|
||||
int readInt32() {
|
||||
final low = readInt();
|
||||
final high = readInt();
|
||||
return low | (high << 16);
|
||||
}
|
||||
|
||||
int readLong() {
|
||||
final low = readInt32();
|
||||
final high = readInt32();
|
||||
return (low & 0x00000000FFFFFFFF) | (high << 32);
|
||||
}
|
||||
|
||||
static final byteToHex = List.generate(256, (i) => i.toRadixString(16).padLeft(2, '0').toUpperCase());
|
||||
|
||||
String readUUID() {
|
||||
final bb = List<int>(16);
|
||||
for (var i = 7; i >= 0; i--) {
|
||||
final int = readInt();
|
||||
/* jshint bitwise: false */
|
||||
bb[(2 * i) + 1] = int & 0xFF;
|
||||
bb[2 * i] = (int >> 8) & 0xFF;
|
||||
}
|
||||
return byteToHex[bb[0]] + byteToHex[bb[1]] +
|
||||
byteToHex[bb[2]] + byteToHex[bb[3]] + '-' +
|
||||
byteToHex[bb[4]] + byteToHex[bb[5]] + '-' +
|
||||
byteToHex[bb[6]] + byteToHex[bb[7]] + '-' +
|
||||
byteToHex[bb[8]] + byteToHex[bb[9]] + '-' +
|
||||
byteToHex[bb[10]] + byteToHex[bb[11]] +
|
||||
byteToHex[bb[12]] + byteToHex[bb[13]] +
|
||||
byteToHex[bb[14]] + byteToHex[bb[15]];
|
||||
}
|
||||
|
||||
Transition edgeFactory(ATN atn, TransitionType type, int src, int trg,
|
||||
int arg1, int arg2, int arg3, List<IntervalSet> sets) {
|
||||
final target = atn.states[trg];
|
||||
switch (type) {
|
||||
case TransitionType.EPSILON:
|
||||
return EpsilonTransition(target);
|
||||
case TransitionType.RANGE:
|
||||
return arg3 != 0
|
||||
? RangeTransition(target, Token.EOF, arg2)
|
||||
: RangeTransition(target, arg1, arg2);
|
||||
case TransitionType.RULE:
|
||||
final rt =
|
||||
RuleTransition(atn.states[arg1], arg2, arg3, target);
|
||||
return rt;
|
||||
case TransitionType.PREDICATE:
|
||||
final pt =
|
||||
PredicateTransition(target, arg1, arg2, arg3 != 0);
|
||||
return pt;
|
||||
case TransitionType.PRECEDENCE:
|
||||
return PrecedencePredicateTransition(target, arg1);
|
||||
case TransitionType.ATOM:
|
||||
return arg3 != 0
|
||||
? AtomTransition(target, Token.EOF)
|
||||
: AtomTransition(target, arg1);
|
||||
case TransitionType.ACTION:
|
||||
final a =
|
||||
ActionTransition(target, arg1, arg2, arg3 != 0);
|
||||
return a;
|
||||
case TransitionType.SET:
|
||||
return SetTransition(target, sets[arg1]);
|
||||
case TransitionType.NOT_SET:
|
||||
return NotSetTransition(target, sets[arg1]);
|
||||
case TransitionType.WILDCARD:
|
||||
return WildcardTransition(target);
|
||||
case TransitionType.INVALID:
|
||||
throw ArgumentError.value(type, 'transition type', 'not valid.');
|
||||
default:
|
||||
throw ArgumentError.value(type, 'transition type', 'not valid.');
|
||||
}
|
||||
}
|
||||
|
||||
ATNState stateFactory(StateType type, int ruleIndex) {
|
||||
ATNState s;
|
||||
switch (type) {
|
||||
case StateType.INVALID_TYPE:
|
||||
return null;
|
||||
case StateType.BASIC:
|
||||
s = BasicState();
|
||||
break;
|
||||
case StateType.RULE_START:
|
||||
s = RuleStartState();
|
||||
break;
|
||||
case StateType.BLOCK_START:
|
||||
s = BasicBlockStartState();
|
||||
break;
|
||||
case StateType.PLUS_BLOCK_START:
|
||||
s = PlusBlockStartState();
|
||||
break;
|
||||
case StateType.STAR_BLOCK_START:
|
||||
s = StarBlockStartState();
|
||||
break;
|
||||
case StateType.TOKEN_START:
|
||||
s = TokensStartState();
|
||||
break;
|
||||
case StateType.RULE_STOP:
|
||||
s = RuleStopState();
|
||||
break;
|
||||
case StateType.BLOCK_END:
|
||||
s = BlockEndState();
|
||||
break;
|
||||
case StateType.STAR_LOOP_BACK:
|
||||
s = StarLoopbackState();
|
||||
break;
|
||||
case StateType.STAR_LOOP_ENTRY:
|
||||
s = StarLoopEntryState();
|
||||
break;
|
||||
case StateType.PLUS_LOOP_BACK:
|
||||
s = PlusLoopbackState();
|
||||
break;
|
||||
case StateType.LOOP_END:
|
||||
s = LoopEndState();
|
||||
break;
|
||||
default:
|
||||
throw ArgumentError.value(type, 'state type', 'not valid.');
|
||||
}
|
||||
|
||||
s.ruleIndex = ruleIndex;
|
||||
return s;
|
||||
}
|
||||
|
||||
LexerAction lexerActionFactory(LexerActionType type, int data1, int data2) {
|
||||
switch (type) {
|
||||
case LexerActionType.CHANNEL:
|
||||
return LexerChannelAction(data1);
|
||||
|
||||
case LexerActionType.CUSTOM:
|
||||
return LexerCustomAction(data1, data2);
|
||||
|
||||
case LexerActionType.MODE:
|
||||
return LexerModeAction(data1);
|
||||
|
||||
case LexerActionType.MORE:
|
||||
return LexerMoreAction.INSTANCE;
|
||||
|
||||
case LexerActionType.POP_MODE:
|
||||
return LexerPopModeAction.INSTANCE;
|
||||
|
||||
case LexerActionType.PUSH_MODE:
|
||||
return LexerPushModeAction(data1);
|
||||
|
||||
case LexerActionType.SKIP:
|
||||
return LexerSkipAction.INSTANCE;
|
||||
|
||||
case LexerActionType.TYPE:
|
||||
return LexerTypeAction(data1);
|
||||
default:
|
||||
throw ArgumentError.value(type, 'lexer action type', 'not valid.');
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
* Use of this file is governed by the BSD 3-clause license that
|
||||
* can be found in the LICENSE.txt file in the project root.
|
||||
*/
|
||||
|
||||
import '../../dfa/dfa.dart';
|
||||
import '../../prediction_context.dart';
|
||||
import 'atn.dart';
|
||||
import 'atn_config_set.dart';
|
||||
|
||||
abstract class ATNSimulator {
|
||||
/// Must distinguish between missing edge and edge we know leads nowhere */
|
||||
|
||||
static final DFAState ERROR =
|
||||
DFAState(stateNumber: 0x7FFFFFFF, configs: ATNConfigSet());
|
||||
|
||||
final ATN atn;
|
||||
|
||||
/// The context cache maps all PredictionContext objects that are equals()
|
||||
/// to a single cached copy. This cache is shared across all contexts
|
||||
/// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
|
||||
/// to use only cached nodes/graphs in addDFAState(). We don't want to
|
||||
/// fill this during closure() since there are lots of contexts that
|
||||
/// pop up but are not used ever again. It also greatly slows down closure().
|
||||
///
|
||||
/// <p>This cache makes a huge difference in memory and a little bit in speed.
|
||||
/// For the Java grammar on java.*, it dropped the memory requirements
|
||||
/// at the end from 25M to 16M. We don't store any of the full context
|
||||
/// graphs in the DFA because they are limited to local context only,
|
||||
/// but apparently there's a lot of repetition there as well. We optimize
|
||||
/// the config contexts before storing the config set in the DFA states
|
||||
/// by literally rebuilding them with cached subgraphs only.</p>
|
||||
///
|
||||
/// <p>I tried a cache for use during closure operations, that was
|
||||
/// whacked after each adaptivePredict(). It cost a little bit
|
||||
/// more time I think and doesn't save on the overall footprint
|
||||
/// so it's not worth the complexity.</p>
|
||||
final PredictionContextCache sharedContextCache;
|
||||
|
||||
ATNSimulator(this.atn, this.sharedContextCache);
|
||||
|
||||
void reset();
|
||||
|
||||
/// Clear the DFA cache used by the current instance. Since the DFA cache may
|
||||
/// be shared by multiple ATN simulators, this method may affect the
|
||||
/// performance (but not accuracy) of other parsers which are being used
|
||||
/// concurrently.
|
||||
///
|
||||
/// @throws UnsupportedOperationException if the current instance does not
|
||||
/// support clearing the DFA.
|
||||
///
|
||||
/// @since 4.3
|
||||
void clearDFA() {
|
||||
throw UnsupportedError(
|
||||
'This ATN simulator does not support clearing the DFA.');
|
||||
}
|
||||
|
||||
PredictionContext getCachedContext(PredictionContext context) {
|
||||
if (sharedContextCache == null) return context;
|
||||
|
||||
final visited = <PredictionContext, PredictionContext>{};
|
||||
return PredictionContext.getCachedContext(
|
||||
context, sharedContextCache, visited);
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to cache [PredictionContext] objects. Its used for the shared
|
||||
/// context cash associated with contexts in DFA states. This cache
|
||||
/// can be used for both lexers and parsers.
|
||||
class PredictionContextCache {
|
||||
final cache = <PredictionContext, PredictionContext>{};
|
||||
|
||||
/// Add a context to the cache and return it. If the context already exists,
|
||||
/// return that one instead and do not add a new context to the cache.
|
||||
/// Protect shared cache from unsafe thread access.
|
||||
PredictionContext add(PredictionContext ctx) {
|
||||
if (ctx == PredictionContext.EMPTY) return PredictionContext.EMPTY;
|
||||
final existing = cache[ctx];
|
||||
if (existing != null) {
|
||||
// System.out.println(name+" reuses "+existing);
|
||||
return existing;
|
||||
}
|
||||
cache[ctx] = ctx;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
PredictionContext operator [](PredictionContext ctx) {
|
||||
return cache[ctx];
|
||||
}
|
||||
|
||||
int get length {
|
||||
return cache.length;
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue