Merge branch 'feature/templates' into feature/templates-org
|
@ -62,5 +62,4 @@ tool/target
|
|||
runtime-testsuite/target
|
||||
tool-testsuite/target
|
||||
runtime/Cpp/demo/generated
|
||||
runtime/Cpp/demo/Mac/antlrcpp.xcodeproj/xcuserdata
|
||||
runtime/Cpp/demo/Mac/antlrcpp.xcodeproj/project.xcworkspace/xcuserdata
|
||||
xcuserdata
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
sudo: true
|
||||
language: java
|
||||
script:
|
||||
- mvn install
|
||||
jdk:
|
||||
- openjdk6
|
||||
- oraclejdk7
|
||||
- oraclejdk8
|
||||
before_install:
|
||||
- sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
|
||||
- sudo add-apt-repository ppa:fkrull/deadsnakes -y
|
||||
- sudo add-apt-repository ppa:rwky/nodejs -y
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq python3.5
|
||||
- sudo apt-get install -qq nodejs
|
||||
- echo "deb http://download.mono-project.com/repo/debian wheezy/snapshots/3.12.1 main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list
|
||||
- sudo apt-get install -qq mono-complete
|
||||
- python --version
|
||||
- python3 --version
|
60
CHANGES.txt
|
@ -1,5 +1,45 @@
|
|||
****************************************************************************
|
||||
As of ANTLR 4.2.1, March 25 2014, we are no longer updating this file. Instead,
|
||||
we are using the github release mechanism. For example, here is
|
||||
4.2.1 release notes:
|
||||
|
||||
https://github.com/antlr/antlr4/releases/tag/4.2.1
|
||||
****************************************************************************
|
||||
|
||||
ANTLR v4 Honey Badger
|
||||
|
||||
January 15, 2014
|
||||
|
||||
* Unit tests for lexer actions from yesterday.
|
||||
* Refactored TreeView so we can refresh tree externally w/o creating new one.
|
||||
Needed for intellij plugin.
|
||||
|
||||
January 14, 2014
|
||||
|
||||
* Updated serialized ATN representation of lexer actions, allowing the lexer
|
||||
interpreter to execute the majority of lexer commands (#408)
|
||||
|
||||
January 12, 2014
|
||||
|
||||
* Support executing precedence predicates during the SLL phase of
|
||||
adaptivePredict (#401). The result is a massive performance boost for grammars
|
||||
containing direct left-recursion (improvements of 5% to 1000+% have been
|
||||
observed, depending on the grammar and input).
|
||||
|
||||
December 29, 2013
|
||||
|
||||
* Internal change: Tool.loadGrammar() -> parseGrammar(). Tool.load()->parse()
|
||||
|
||||
* Added Tool.loadGrammar(fileName) that completely parses, extracts implicit lexer,
|
||||
and processes into Grammar object. Does not geneate code. Use
|
||||
Grammar.getImplicitLexer() to get the lexer created during processing of
|
||||
combined grammar.
|
||||
|
||||
* Added Grammar.load(fileName) that creates Tool object for you. loadGrammar()
|
||||
lets you create your own Tool for setting error handlers etc...
|
||||
|
||||
final Grammar g = Grammar.load("/tmp/MyGrammar.g4");
|
||||
|
||||
December 19, 2013
|
||||
|
||||
* Sam:
|
||||
|
@ -14,19 +54,19 @@ November 24, 2013
|
|||
|
||||
* Ter adds tree pattern matching. Preferred interface:
|
||||
|
||||
ParseTree t = parser.expr();
|
||||
ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
|
||||
ParseTreeMatch m = p.match(t);
|
||||
String id = m.get("ID");
|
||||
ParseTree t = parser.expr();
|
||||
ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
|
||||
ParseTreeMatch m = p.match(t);
|
||||
String id = m.get("ID");
|
||||
|
||||
or
|
||||
|
||||
String xpath = "//blockStatement/*";
|
||||
String treePattern = "int <Identifier> = <expression>;";
|
||||
ParseTreePattern p =
|
||||
parser.compileParseTreePattern(treePattern,
|
||||
JavaParser.RULE_localVariableDeclarationStatement);
|
||||
List<ParseTreeMatch> matches = p.findAll(tree, xpath);
|
||||
String xpath = "//blockStatement/*";
|
||||
String treePattern = "int <Identifier> = <expression>;";
|
||||
ParseTreePattern p =
|
||||
parser.compileParseTreePattern(treePattern,
|
||||
JavaParser.RULE_localVariableDeclarationStatement);
|
||||
List<ParseTreeMatch> matches = p.findAll(tree, xpath);
|
||||
|
||||
November 20, 2013
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[The "BSD license"]
|
||||
Copyright (c) 2013 Terence Parr, Sam Harwell
|
||||
Copyright (c) 2015 Terence Parr, Sam Harwell
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# ANTLR v4
|
||||
|
||||
**ANTLR** (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface (or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
*Given day-job constraints, my time working on this project is limited so I'll have to focus first on fixing bugs rather than changing/improving the feature set. Likely I'll do it in bursts every few months. Please do not be offended if your bug or pull request does not yield a response! --parrt*
|
||||
|
||||
## Authors and major contributors
|
||||
|
||||
* [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
|
||||
ANTLR project lead and supreme dictator for life
|
||||
[University of San Francisco](http://www.usfca.edu/)
|
||||
* [Sam Harwell](http://tunnelvisionlabs.com/) (Tool co-author, Java and C# target)
|
||||
* Eric Vergnaud (Javascript, Python2, Python3 targets and significant work on C# target)
|
||||
|
||||
## Useful information
|
||||
|
||||
* [Release notes](https://github.com/antlr/antlr4/releases)
|
||||
* [Getting started with v4](https://raw.githubusercontent.com/antlr/antlr4/master/doc/getting-started.md)
|
||||
* [Official site](http://www.antlr.org/)
|
||||
* [Documentation](https://raw.githubusercontent.com/antlr/antlr4/master/doc/index.md)
|
||||
* [FAQ](https://raw.githubusercontent.com/antlr/antlr4/master/doc/faq/index.md)
|
||||
* [API](http://www.antlr.org/api/Java/index.html)
|
||||
* [ANTLR v3](http://www.antlr3.org/)
|
||||
* [v3 to v4 Migration, differences](https://raw.githubusercontent.com/antlr/antlr4/master/doc/faq/general.md)
|
||||
|
||||
You might also find the following pages useful, particularly if you want to mess around with the various target languages.
|
||||
|
||||
* [How to build ANTLR itself](https://raw.githubusercontent.com/antlr/antlr4/master/doc/building-antlr.md)
|
||||
* [How we create and deploy an ANTLR release](https://raw.githubusercontent.com/antlr/antlr4/master/doc/releasing-antlr.md)
|
||||
|
||||
## The Definitive ANTLR 4 Reference
|
||||
|
||||
Programmers run into parsing problems all the time. Whether it’s a data format like JSON, a network protocol like SMTP, a server configuration file for Apache, a PostScript/PDF file, or a simple spreadsheet macro language—ANTLR v4 and this book will demystify the process. ANTLR v4 has been rewritten from scratch to make it easier than ever to build parsers and the language applications built on top. This completely rewritten new edition of the bestselling Definitive ANTLR Reference shows you how to take advantage of these new features.
|
||||
|
||||
You can buy the book [The Definitive ANTLR 4 Reference](http://amzn.com/1934356999) at amazon or an [electronic version at the publisher's site](https://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference).
|
||||
|
||||
You will find the [Book source code](http://pragprog.com/titles/tpantlr2/source_code) useful.
|
||||
|
||||
## Additional grammars
|
||||
[This repository](https://github.com/antlr/grammars-v4) is a collection of grammars without actions where the
|
||||
root directory name is the all-lowercase name of the language parsed
|
||||
by the grammar. For example, java, cpp, csharp, c, etc...
|
||||
|
||||
Travis Status
|
||||
---------
|
||||
|
||||
<a href="https://travis-ci.org/antlr/antlr4"><img src="https://api.travis-ci.org/antlr/antlr4.png"></a>
|
121
README.txt
|
@ -1,121 +0,0 @@
|
|||
ANTLR v4
|
||||
|
||||
Terence Parr, parrt@cs.usfca.edu
|
||||
ANTLR project lead and supreme dictator for life
|
||||
University of San Francisco
|
||||
|
||||
INTRODUCTION
|
||||
|
||||
Hi and welcome to the Honey Badger 4.1 release of ANTLR!
|
||||
|
||||
INSTALLATION
|
||||
|
||||
UNIX
|
||||
|
||||
0. Install Java (version 1.6 or higher)
|
||||
|
||||
1. Download
|
||||
|
||||
$ cd /usr/local/lib
|
||||
$ curl -O http://www.antlr4.org/download/antlr-4.1-complete.jar
|
||||
|
||||
Or just download in browser using URL:
|
||||
|
||||
http://www.antlr4.org/download/antlr-4.1-complete.jar
|
||||
|
||||
and put it somewhere rational like /usr/local/lib.
|
||||
|
||||
2. Add antlr-4.1-complete.jar to your CLASSPATH:
|
||||
|
||||
$ export CLASSPATH=".:/usr/local/lib/antlr-4.1-complete.jar:$CLASSPATH"
|
||||
|
||||
Is also a good idea to put this in your .bash_profile or whatever your
|
||||
startup script is.
|
||||
|
||||
3. Create aliases for the ANTLR Tool, and TestRig.
|
||||
|
||||
$ alias antlr4='java -jar /usr/local/lib/antlr-4.1-complete.jar'
|
||||
$ alias grun='java org.antlr.v4.runtime.misc.TestRig'
|
||||
|
||||
WINDOWS (Thanks to Graham Wideman)
|
||||
|
||||
0. Install Java (version 1.6 or higher)
|
||||
|
||||
1. Download http://antlr.org/download/antlr-4.1-complete.jar
|
||||
Save to your directory for 3rd party Java libraries, say C:\Javalib
|
||||
|
||||
2. Add antlr-4.1-complete.jar to CLASSPATH, either:
|
||||
|
||||
* Permanently: Using System Properties dialog > Environment variables >
|
||||
Create or append to CLASSPATH variable
|
||||
|
||||
* Temporarily, at command line:
|
||||
SET CLASSPATH=C:\Javalib\antlr-4.1-complete.jar;%CLASSPATH%
|
||||
|
||||
3. Create short convenient commands for the ANTLR Tool, and TestRig,
|
||||
using batch files or doskey commands:
|
||||
|
||||
* Batch files (in directory in system PATH)
|
||||
|
||||
antlr4.bat: java org.antlr.v4.Tool %*
|
||||
run.bat: java org.antlr.v4.runtime.misc.TestRig %*
|
||||
|
||||
* Or, use doskey commands:
|
||||
|
||||
doskey antlr4=java org.antlr.v4.Tool $*
|
||||
doskey grun =java org.antlr.v4.runtime.misc.TestRig $*
|
||||
|
||||
TESTING INSTALLATION
|
||||
|
||||
Either launch org.antlr.v4.Tool directly:
|
||||
|
||||
$ java org.antlr.v4.Tool
|
||||
ANTLR Parser Generator Version 4.1
|
||||
-o ___ specify output directory where all output is generated
|
||||
-lib ___ specify location of .tokens files
|
||||
...
|
||||
|
||||
or use -jar option on java:
|
||||
|
||||
$ java -jar /usr/local/lib/antlr-4.1-complete.jar
|
||||
ANTLR Parser Generator Version 4.1
|
||||
-o ___ specify output directory where all output is generated
|
||||
-lib ___ specify location of .tokens files
|
||||
...
|
||||
|
||||
|
||||
EXAMPLE
|
||||
|
||||
In a temporary directory, put the following grammar inside file Hello.g4:
|
||||
|
||||
// Define a grammar called Hello
|
||||
// match keyword hello followed by an identifier
|
||||
// match lower-case identifiers
|
||||
grammar Hello;
|
||||
r : 'hello' ID ;
|
||||
ID : [a-z]+ ;
|
||||
WS : [ \t\n]+ -> skip ; // skip spaces, tabs, newlines
|
||||
|
||||
Then run ANTLR the tool on it:
|
||||
|
||||
$ cd /tmp
|
||||
$ antlr4 Hello.g4
|
||||
$ javac Hello*.java
|
||||
|
||||
Now test it:
|
||||
|
||||
$ grun Hello r -tree
|
||||
hello parrt
|
||||
^D
|
||||
(r hello parrt)
|
||||
|
||||
(That ^D means EOF on unix; it's ^Z in Windows.) The -tree option prints
|
||||
the parse tree in LISP notation.
|
||||
|
||||
BOOK SOURCE CODE
|
||||
|
||||
http://pragprog.com/titles/tpantlr2/source_code
|
||||
|
||||
GRAMMARS
|
||||
|
||||
https://github.com/antlr/grammars-v4
|
|
@ -1,7 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="src" output="target/classes" path="src/main/java"/>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
|
||||
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER"/>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -1,23 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>antlr4-maven-plugin</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.jdt.core.javabuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.m2e.core.maven2Builder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.jdt.core.javanature</nature>
|
||||
<nature>org.eclipse.m2e.core.maven2Nature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
|
@ -1,2 +0,0 @@
|
|||
eclipse.preferences.version=1
|
||||
encoding/<project>=UTF-8
|
|
@ -1,5 +0,0 @@
|
|||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
|
||||
org.eclipse.jdt.core.compiler.compliance=1.6
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.6
|
|
@ -1,4 +0,0 @@
|
|||
activeProfiles=
|
||||
eclipse.preferences.version=1
|
||||
resolveWorkspaceProjects=true
|
||||
version=1
|
|
@ -1,9 +1,8 @@
|
|||
<!--
|
||||
|
||||
[The "BSD license"]
|
||||
|
||||
ANTLR - Copyright (c) 2005-2010 Terence Parr
|
||||
Maven Plugin - Copyright (c) 2009 Jim Idle
|
||||
[The "BSD license"]
|
||||
ANTLR - Copyright (c) Terence Parr, Sam Harwell
|
||||
Maven Plugin - Copyright (c) Jim Idle
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -30,178 +29,132 @@
|
|||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-master</artifactId>
|
||||
<version>4.5.4-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>antlr4-maven-plugin</artifactId>
|
||||
<packaging>maven-plugin</packaging>
|
||||
<name>ANTLR 4 Maven plugin</name>
|
||||
<description>Maven plugin for ANTLR 4 grammars</description>
|
||||
|
||||
<prerequisites>
|
||||
<maven>3.0</maven>
|
||||
</prerequisites>
|
||||
|
||||
<!-- Ancilliary information for completeness -->
|
||||
<inceptionYear>2009</inceptionYear>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<!-- ============================================================================= -->
|
||||
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<!-- What are we depedent on for the Mojos to execute? We need the plugin
|
||||
API itself and of course we need the ANTLR Tool and runtime and any of their
|
||||
dependencies, which we inherit. The Tool itself provides us with all the
|
||||
dependencies, so we need only name it here. -->
|
||||
<dependencies>
|
||||
|
||||
<parent>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-master</artifactId>
|
||||
<version>4.1.1-SNAPSHOT</version>
|
||||
</parent>
|
||||
<!-- The things we need to build the target language recognizer -->
|
||||
<dependency>
|
||||
<groupId>org.apache.maven</groupId>
|
||||
<artifactId>maven-plugin-api</artifactId>
|
||||
<version>3.0.5</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven</groupId>
|
||||
<artifactId>maven-project</artifactId>
|
||||
<version>2.2.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.plexus</groupId>
|
||||
<artifactId>plexus-compiler-api</artifactId>
|
||||
<version>2.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.sonatype.plexus</groupId>
|
||||
<artifactId>plexus-build-api</artifactId>
|
||||
<version>0.0.7</version>
|
||||
</dependency>
|
||||
<!-- The version of ANTLR tool that this version of the plugin controls.
|
||||
We have decided that this should be in lockstep with ANTLR itself, other
|
||||
than -1 -2 -3 etc patch releases. -->
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<!-- Testing requirements... -->
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.shared</groupId>
|
||||
<artifactId>maven-plugin-testing-harness</artifactId>
|
||||
<version>1.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugin-tools</groupId>
|
||||
<artifactId>maven-plugin-annotations</artifactId>
|
||||
<version>3.2</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<artifactId>antlr4-maven-plugin</artifactId>
|
||||
<packaging>maven-plugin</packaging>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.3</version>
|
||||
<configuration>
|
||||
<!-- see http://jira.codehaus.org/browse/MNG-5346 -->
|
||||
<skipErrorNoDescriptorsFound>true</skipErrorNoDescriptorsFound>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>mojo-descriptor</id>
|
||||
<goals>
|
||||
<goal>descriptor</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>help-goal</id>
|
||||
<goals>
|
||||
<goal>helpmojo</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<name>ANTLR 4 Maven plugin</name>
|
||||
<description>Maven plugin for ANTLR 4 grammars</description>
|
||||
<url>http://www.antlr.org</url>
|
||||
|
||||
<prerequisites>
|
||||
<maven>3.0</maven>
|
||||
</prerequisites>
|
||||
|
||||
<!-- Ancilliary information for completeness
|
||||
-->
|
||||
<inceptionYear>2009</inceptionYear>
|
||||
|
||||
<!-- ============================================================================= -->
|
||||
|
||||
<!--
|
||||
|
||||
What are we depedent on for the Mojos to execute? We need the
|
||||
plugin API itself and of course we need the ANTLR Tool and runtime
|
||||
and any of their dependencies, which we inherit. The Tool itself provides
|
||||
us with all the dependencies, so we need only name it here.
|
||||
-->
|
||||
<dependencies>
|
||||
|
||||
<!--
|
||||
The things we need to build the target language recognizer
|
||||
-->
|
||||
<dependency>
|
||||
<groupId>org.apache.maven</groupId>
|
||||
<artifactId>maven-plugin-api</artifactId>
|
||||
<version>3.0.5</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.maven</groupId>
|
||||
<artifactId>maven-project</artifactId>
|
||||
<version>2.2.1</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.plexus</groupId>
|
||||
<artifactId>plexus-compiler-api</artifactId>
|
||||
<version>2.2</version>
|
||||
</dependency>
|
||||
|
||||
<!--
|
||||
The version of ANTLR tool that this version of the plugin controls.
|
||||
We have decided that this should be in lockstep with ANTLR itself, other
|
||||
than -1 -2 -3 etc patch releases.
|
||||
-->
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
|
||||
<!--
|
||||
Testing requirements...
|
||||
-->
|
||||
<dependency>
|
||||
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.shared</groupId>
|
||||
<artifactId>maven-plugin-testing-harness</artifactId>
|
||||
<version>1.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.maven.plugin-tools</groupId>
|
||||
<artifactId>maven-plugin-annotations</artifactId>
|
||||
<version>3.2</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
<defaultGoal>install</defaultGoal>
|
||||
|
||||
<plugins>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.2</version>
|
||||
<configuration>
|
||||
<!-- see http://jira.codehaus.org/browse/MNG-5346 -->
|
||||
<skipErrorNoDescriptorsFound>true</skipErrorNoDescriptorsFound>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>mojo-descriptor</id>
|
||||
<goals>
|
||||
<goal>descriptor</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>help-goal</id>
|
||||
<goals>
|
||||
<goal>helpmojo</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-site-plugin</artifactId>
|
||||
<version>3.3</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-project-info-reports-plugin</artifactId>
|
||||
<version>2.7</version>
|
||||
<configuration>
|
||||
<dependencyLocationsEnabled>false</dependencyLocationsEnabled>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
|
||||
</build>
|
||||
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.2</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9</version>
|
||||
<configuration>
|
||||
<quiet>true</quiet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jxr-plugin</artifactId>
|
||||
<version>2.3</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.3</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9</version>
|
||||
<configuration>
|
||||
<quiet>true</quiet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jxr-plugin</artifactId>
|
||||
<version>2.3</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
</project>
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<lifecycleMappingMetadata>
|
||||
<pluginExecutions>
|
||||
<pluginExecution>
|
||||
<pluginExecutionFilter>
|
||||
<goals>
|
||||
<goal>antlr4</goal>
|
||||
</goals>
|
||||
</pluginExecutionFilter>
|
||||
<action>
|
||||
<execute>
|
||||
<runOnIncremental>true</runOnIncremental>
|
||||
<runOnConfiguration>true</runOnConfiguration>
|
||||
</execute>
|
||||
</action>
|
||||
</pluginExecution>
|
||||
</pluginExecutions>
|
||||
</lifecycleMappingMetadata>
|
|
@ -28,10 +28,14 @@
|
|||
*/
|
||||
package org.antlr.mojo.antlr4;
|
||||
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.tool.ANTLRMessage;
|
||||
import org.antlr.v4.tool.ANTLRToolListener;
|
||||
import org.apache.maven.plugin.logging.Log;
|
||||
import org.sonatype.plexus.build.incremental.BuildContext;
|
||||
import org.stringtemplate.v4.ST;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
/**
|
||||
* This implementation of {@link ANTLRToolListener} reports messages to the
|
||||
|
@ -41,6 +45,8 @@ import org.apache.maven.plugin.logging.Log;
|
|||
*/
|
||||
public class Antlr4ErrorLog implements ANTLRToolListener {
|
||||
|
||||
private final Tool tool;
|
||||
private final BuildContext buildContext;
|
||||
private final Log log;
|
||||
|
||||
/**
|
||||
|
@ -48,43 +54,70 @@ public class Antlr4ErrorLog implements ANTLRToolListener {
|
|||
*
|
||||
* @param log The Maven log
|
||||
*/
|
||||
public Antlr4ErrorLog(@NotNull Log log) {
|
||||
public Antlr4ErrorLog(Tool tool, BuildContext buildContext, Log log) {
|
||||
this.tool = tool;
|
||||
this.buildContext = buildContext;
|
||||
this.log = log;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message The message to send to Maven
|
||||
*/
|
||||
@Override
|
||||
public void info(String message) {
|
||||
if (tool.errMgr.formatWantsSingleLineMessage()) {
|
||||
message = message.replace('\n', ' ');
|
||||
}
|
||||
log.info(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message The message to send to Maven.
|
||||
*/
|
||||
@Override
|
||||
public void error(ANTLRMessage message) {
|
||||
log.error(message.toString());
|
||||
ST msgST = tool.errMgr.getMessageTemplate(message);
|
||||
String outputMsg = msgST.render();
|
||||
if (tool.errMgr.formatWantsSingleLineMessage()) {
|
||||
outputMsg = outputMsg.replace('\n', ' ');
|
||||
}
|
||||
|
||||
log.error(outputMsg);
|
||||
|
||||
if (message.fileName != null) {
|
||||
String text = message.getMessageTemplate(false).render();
|
||||
buildContext.addMessage(new File(message.fileName), message.line, message.charPosition, text, BuildContext.SEVERITY_ERROR, message.getCause());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message
|
||||
*/
|
||||
@Override
|
||||
public void warning(ANTLRMessage message) {
|
||||
log.warn(message.toString());
|
||||
ST msgST = tool.errMgr.getMessageTemplate(message);
|
||||
String outputMsg = msgST.render();
|
||||
if (tool.errMgr.formatWantsSingleLineMessage()) {
|
||||
outputMsg = outputMsg.replace('\n', ' ');
|
||||
}
|
||||
|
||||
log.warn(outputMsg);
|
||||
|
||||
if (message.fileName != null) {
|
||||
String text = message.getMessageTemplate(false).render();
|
||||
buildContext.addMessage(new File(message.fileName), message.line, message.charPosition, text, BuildContext.SEVERITY_WARNING, message.getCause());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,13 +32,13 @@ package org.antlr.mojo.antlr4;
|
|||
import org.antlr.v4.Tool;
|
||||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.runtime.misc.MultiMap;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Utils;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.apache.maven.plugin.AbstractMojo;
|
||||
import org.apache.maven.plugin.MojoExecutionException;
|
||||
import org.apache.maven.plugin.MojoFailureException;
|
||||
import org.apache.maven.plugin.logging.Log;
|
||||
import org.apache.maven.plugins.annotations.Component;
|
||||
import org.apache.maven.plugins.annotations.LifecyclePhase;
|
||||
import org.apache.maven.plugins.annotations.Mojo;
|
||||
import org.apache.maven.plugins.annotations.Parameter;
|
||||
|
@ -49,11 +49,13 @@ import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
|
|||
import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
|
||||
import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
|
||||
import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
|
||||
import org.sonatype.plexus.build.incremental.BuildContext;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.URI;
|
||||
|
@ -143,10 +145,11 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
* the generate phase of the plugin. Note that the plugin is smart enough to
|
||||
* realize that imported grammars should be included but not acted upon
|
||||
* directly by the ANTLR Tool.
|
||||
* <p/>
|
||||
* <p>
|
||||
* A set of Ant-like inclusion patterns used to select files from the source
|
||||
* directory for processing. By default, the pattern
|
||||
* <code>**/*.g4</code> is used to select grammar files.
|
||||
* </p>
|
||||
*/
|
||||
@Parameter
|
||||
protected Set<String> includes = new HashSet<String>();
|
||||
|
@ -181,6 +184,9 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
@Parameter(defaultValue = "${basedir}/src/main/antlr4/imports")
|
||||
private File libDirectory;
|
||||
|
||||
@Component
|
||||
private BuildContext buildContext;
|
||||
|
||||
public File getSourceDirectory() {
|
||||
return sourceDirectory;
|
||||
}
|
||||
|
@ -206,9 +212,9 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
* The main entry point for this Mojo, it is responsible for converting
|
||||
* ANTLR 4.x grammars into the target language specified by the grammar.
|
||||
*
|
||||
* @throws MojoExecutionException if a configuration or grammar error causes
|
||||
* @exception MojoExecutionException if a configuration or grammar error causes
|
||||
* the code generation process to fail
|
||||
* @throws MojoFailureException if an instance of the ANTLR 4 {@link Tool}
|
||||
* @exception MojoFailureException if an instance of the ANTLR 4 {@link Tool}
|
||||
* cannot be created
|
||||
*/
|
||||
@Override
|
||||
|
@ -347,9 +353,9 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
/**
|
||||
*
|
||||
* @param sourceDirectory
|
||||
* @throws InclusionScanException
|
||||
* @exception InclusionScanException
|
||||
*/
|
||||
@NotNull
|
||||
|
||||
private List<List<String>> processGrammarFiles(List<String> args, File sourceDirectory) throws InclusionScanException {
|
||||
// Which files under the source set should we be looking for as grammar files
|
||||
SourceMapping mapping = new SuffixMapping("g4", Collections.<String>emptySet());
|
||||
|
@ -366,6 +372,22 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
scan.addSourceMapping(mapping);
|
||||
Set<File> grammarFiles = scan.getIncludedSources(sourceDirectory, null);
|
||||
|
||||
// We don't want the plugin to run for every grammar, regardless of whether
|
||||
// it's changed since the last compilation. Check the mtime of the tokens vs
|
||||
// the grammar file mtime to determine whether we even need to execute.
|
||||
Set<File> grammarFilesToProcess = new HashSet<File>();
|
||||
|
||||
for (File grammarFile : grammarFiles) {
|
||||
String tokensFileName = grammarFile.getName().split("\\.")[0] + ".tokens";
|
||||
File outputFile = new File(outputDirectory, tokensFileName);
|
||||
if ( (! outputFile.exists()) ||
|
||||
outputFile.lastModified() < grammarFile.lastModified() ) {
|
||||
grammarFilesToProcess.add(grammarFile);
|
||||
}
|
||||
}
|
||||
|
||||
grammarFiles = grammarFilesToProcess;
|
||||
|
||||
if (grammarFiles.isEmpty()) {
|
||||
getLog().info("No grammars to process");
|
||||
return Collections.emptyList();
|
||||
|
@ -375,6 +397,12 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
// Iterate each grammar file we were given and add it into the tool's list of
|
||||
// grammars to process.
|
||||
for (File grammarFile : grammarFiles) {
|
||||
if (!buildContext.hasDelta(grammarFile)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
buildContext.removeMessages(grammarFile);
|
||||
|
||||
getLog().debug("Grammar file '" + grammarFile.getPath() + "' detected.");
|
||||
|
||||
String relPathBase = findSourceSubdir(sourceDirectory, grammarFile.getPath());
|
||||
|
@ -452,7 +480,7 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
|
||||
public CustomTool(String[] args) {
|
||||
super(args);
|
||||
addListener(new Antlr4ErrorLog(getLog()));
|
||||
addListener(new Antlr4ErrorLog(this, buildContext, getLog()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -486,8 +514,8 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
|
||||
URI relativePath = project.getBasedir().toURI().relativize(outputFile.toURI());
|
||||
getLog().debug(" Writing file: " + relativePath);
|
||||
FileWriter fw = new FileWriter(outputFile);
|
||||
return new BufferedWriter(fw);
|
||||
OutputStream outputStream = buildContext.newFileOutputStream(outputFile);
|
||||
return new BufferedWriter(new OutputStreamWriter(outputStream));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
279
build.xml
|
@ -1,279 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project name="ANTLR4" default="distribute" basedir=".">
|
||||
<target name="basic-init">
|
||||
<property file="user.build.properties"/>
|
||||
<property name="dist.dir" value="${basedir}/dist" />
|
||||
<property name="build.dir" value="${basedir}/build" />
|
||||
<property name="lib.dir" value="${basedir}/lib" />
|
||||
</target>
|
||||
|
||||
<target name="antlr3-init" depends="basic-init">
|
||||
<property name="antlr3.version" value="3.5"/>
|
||||
<property name="antlr3.jar.name" value="antlr-${antlr3.version}-complete-no-st3.jar"/>
|
||||
<property name="antlr3.jar" value="${lib.dir}/${antlr3.jar.name}"/>
|
||||
<mkdir dir="${lib.dir}"/>
|
||||
<get src="http://antlr3.org/download/${antlr3.jar.name}" dest="${antlr3.jar}" skipexisting="true"/>
|
||||
<path id="cp.antlr3" path="${antlr3.jar}"/>
|
||||
|
||||
<property name="build.antlr3.dir" value="${build.dir}/generated-sources/antlr3" />
|
||||
<property name="antlr3.touch" value="${build.dir}/antlr3-${antlr3.version}.touch"/>
|
||||
</target>
|
||||
|
||||
<target name="antlr4-init" depends="basic-init">
|
||||
<property name="antlr4.version" value="4.1"/>
|
||||
<property name="antlr4.jar.name" value="antlr-${antlr4.version}-complete.jar"/>
|
||||
<property name="antlr4.jar" value="${lib.dir}/${antlr4.jar.name}"/>
|
||||
<mkdir dir="${lib.dir}"/>
|
||||
<get src="http://antlr.org/download/${antlr4.jar.name}" dest="${antlr4.jar}" skipexisting="true"/>
|
||||
<path id="cp.antlr4" path="${antlr4.jar}"/>
|
||||
|
||||
<property name="build.antlr4.dir" value="${build.dir}/generated-sources/antlr4" />
|
||||
<property name="antlr4.touch" value="${build.dir}/antlr4-${antlr4.version}.touch"/>
|
||||
</target>
|
||||
|
||||
<target name="build-init" depends="basic-init">
|
||||
<property name="version" value="4.1.1-dev"/>
|
||||
<property name="build.sysclasspath" value="ignore"/>
|
||||
<property name="install.root.dir" value="${dist.dir}/antlr-${version}" />
|
||||
<property name="jar.file" value="${dist.dir}/antlr-${version}-complete.jar" />
|
||||
</target>
|
||||
|
||||
<target name="clean" depends="basic-init">
|
||||
<delete dir="${build.dir}" includeemptydirs="true"/>
|
||||
<delete dir="${dist.dir}" includeemptydirs="true"/>
|
||||
</target>
|
||||
|
||||
<target name="clean-all" depends="clean">
|
||||
<delete dir="${lib.dir}" includeemptydirs="true"/>
|
||||
</target>
|
||||
|
||||
<target name="antlr3-up-to-date" depends="basic-init,antlr3-init">
|
||||
<uptodate targetfile="${antlr3.touch}" property="is.antlr3.uptodate">
|
||||
<srcfiles dir="${basedir}/tool/src">
|
||||
<include name="**/*.g"/>
|
||||
<include name="**/*.tokens"/>
|
||||
</srcfiles>
|
||||
<srcfiles file="${antlr3.jar}"/>
|
||||
</uptodate>
|
||||
</target>
|
||||
|
||||
<target name="antlr4-up-to-date" depends="basic-init,antlr4-init">
|
||||
<uptodate targetfile="${antlr4.touch}" property="is.antlr4.uptodate">
|
||||
<srcfiles dir="${basedir}/tool/src">
|
||||
<include name="**/*.g4"/>
|
||||
<include name="**/*.tokens"/>
|
||||
</srcfiles>
|
||||
<srcfiles file="${antlr4.jar}"/>
|
||||
</uptodate>
|
||||
</target>
|
||||
|
||||
<target name="up-to-date" depends="antlr3-up-to-date,antlr4-up-to-date,build-init">
|
||||
<uptodate targetfile="${jar.file}" property="is.source.uptodate">
|
||||
<srcfiles dir="${basedir}/tool/src">
|
||||
<include name="**/*.java"/>
|
||||
<include name="**/*.g"/>
|
||||
<include name="**/*.tokens"/>
|
||||
</srcfiles>
|
||||
<srcfiles dir="${basedir}/tool/resources">
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</srcfiles>
|
||||
<srcfiles dir="${basedir}/runtime/Java/src/">
|
||||
<include name="**/*.java"/>
|
||||
<include name="**/*.g"/>
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</srcfiles>
|
||||
<srcfiles dir="${build.antlr3.dir}"/>
|
||||
<srcfiles file="${basedir}/runtime/Java/lib/org.abego.treelayout.core.jar"/>
|
||||
<srcfiles file="${antlr3.jar}"/>
|
||||
</uptodate>
|
||||
|
||||
<condition property="is.jar.uptodate">
|
||||
<and>
|
||||
<istrue value="${is.source.uptodate}"/>
|
||||
<istrue value="${is.antlr3.uptodate}"/>
|
||||
</and>
|
||||
</condition>
|
||||
</target>
|
||||
|
||||
<macrodef name="antlr3">
|
||||
<attribute name="srcpath"/>
|
||||
<element name="args" optional="true"/>
|
||||
<sequential>
|
||||
<local name="path.antlr3.local"/>
|
||||
<local name="sources.antlr3.local"/>
|
||||
<path id="path.antlr3.local">
|
||||
<fileset dir="${basedir}/tool/src/@{srcpath}" includes="*.g"/>
|
||||
</path>
|
||||
<pathconvert pathsep=" " property="sources.antlr3.local" refid="path.antlr3.local">
|
||||
<map from="${basedir}/tool/src/@{srcpath}/" to=""/>
|
||||
</pathconvert>
|
||||
<mkdir dir="${build.antlr3.dir}/@{srcpath}"/>
|
||||
<java classname="org.antlr.Tool" fork="true" failonerror="true" maxmemory="300m"
|
||||
dir="${basedir}/tool/src/@{srcpath}">
|
||||
<arg value="-o"/>
|
||||
<arg value="${build.antlr3.dir}/@{srcpath}"/>
|
||||
<args/>
|
||||
<arg line="${sources.antlr3.local}"/>
|
||||
<classpath>
|
||||
<path refid="cp.antlr3"/>
|
||||
<pathelement location="${java.class.path}"/>
|
||||
</classpath>
|
||||
</java>
|
||||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<macrodef name="antlr4">
|
||||
<attribute name="srcpath"/>
|
||||
<element name="args" optional="true"/>
|
||||
<sequential>
|
||||
<local name="path.antlr4.local"/>
|
||||
<local name="sources.antlr4.local"/>
|
||||
<path id="path.antlr4.local">
|
||||
<fileset dir="${basedir}/runtime/Java/src/@{srcpath}" includes="*.g4"/>
|
||||
</path>
|
||||
<pathconvert pathsep=" " property="sources.antlr4.local" refid="path.antlr4.local">
|
||||
<map from="${basedir}/runtime/Java/src/@{srcpath}/" to=""/>
|
||||
</pathconvert>
|
||||
<mkdir dir="${build.antlr4.dir}/@{srcpath}"/>
|
||||
<java classname="org.antlr.v4.Tool" fork="true" failonerror="true" maxmemory="300m"
|
||||
dir="${basedir}/runtime/Java/src/@{srcpath}">
|
||||
<arg value="-o"/>
|
||||
<arg value="${build.antlr4.dir}/@{srcpath}"/>
|
||||
<args/>
|
||||
<arg line="${sources.antlr4.local}"/>
|
||||
<classpath>
|
||||
<path refid="cp.antlr4"/>
|
||||
<pathelement location="${java.class.path}"/>
|
||||
</classpath>
|
||||
</java>
|
||||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<target name="antlr3" depends="build-init,antlr3-init,antlr3-up-to-date" unless="is.antlr3.uptodate">
|
||||
<mkdir dir="${build.antlr3.dir}" />
|
||||
|
||||
<path id="sources.antlr3">
|
||||
<fileset dir="${basedir}/tool/src" includes="**/*.g"/>
|
||||
</path>
|
||||
<pathconvert pathsep="${line.separator} " property="echo.sources.antlr3" refid="sources.antlr3">
|
||||
<map from="${basedir}/tool/src/" to=""/>
|
||||
</pathconvert>
|
||||
<echo message="Generating ANTLR 3 grammars:${line.separator} ${echo.sources.antlr3}"/>
|
||||
|
||||
<antlr3 srcpath="org/antlr/v4/parse"/>
|
||||
|
||||
<antlr3 srcpath="org/antlr/v4/codegen">
|
||||
<args>
|
||||
<arg value="-lib"/>
|
||||
<arg value="${build.antlr3.dir}/org/antlr/v4/parse"/>
|
||||
</args>
|
||||
</antlr3>
|
||||
|
||||
<touch file="${antlr3.touch}" mkdirs="true"/>
|
||||
</target>
|
||||
|
||||
<target name="antlr4" depends="build-init,antlr4-init,antlr4-up-to-date" unless="is.antlr4.uptodate">
|
||||
<mkdir dir="${build.antlr4.dir}" />
|
||||
|
||||
<path id="sources.antlr4">
|
||||
<fileset dir="${basedir}/runtime/Java/src" includes="**/*.g4"/>
|
||||
</path>
|
||||
<pathconvert pathsep="${line.separator} " property="echo.sources.antlr4" refid="sources.antlr4">
|
||||
<map from="${basedir}/runtime/Java/src/" to=""/>
|
||||
</pathconvert>
|
||||
<echo message="Generating ANTLR 4 grammars:${line.separator} ${echo.sources.antlr4}"/>
|
||||
|
||||
<antlr4 srcpath="org/antlr/v4/runtime/tree/xpath">
|
||||
<args>
|
||||
<arg value="-package"/>
|
||||
<arg value="org.antlr.v4.runtime.tree.xpath"/>
|
||||
</args>
|
||||
</antlr4>
|
||||
|
||||
<touch file="${antlr4.touch}" mkdirs="true"/>
|
||||
</target>
|
||||
|
||||
<target name="compile" depends="build-init,antlr3,antlr4,up-to-date" description="Compile for generic OS" unless="is.jar.uptodate">
|
||||
<mkdir dir="${build.dir}/classes"/>
|
||||
<javac
|
||||
destdir="${build.dir}/classes"
|
||||
source="1.5"
|
||||
target="1.5"
|
||||
debug="true"
|
||||
excludes="org/antlr/v4/test/**">
|
||||
<compilerarg value="-Xlint"/>
|
||||
<compilerarg value="-Xlint:-serial"/>
|
||||
<classpath>
|
||||
<path refid="cp.antlr3"/>
|
||||
<pathelement location="${basedir}/runtime/Java/lib/org.abego.treelayout.core.jar"/>
|
||||
</classpath>
|
||||
<src path="${basedir}/tool/src:${basedir}/runtime/Java/src:${build.antlr3.dir}:${build.antlr4.dir}"/>
|
||||
</javac>
|
||||
</target>
|
||||
|
||||
<target name="build-jar" depends="up-to-date,compile" description="Build ANTLR 4.jar" unless="is.jar.uptodate">
|
||||
<mkdir dir="${dist.dir}"/>
|
||||
|
||||
<jar jarfile="${jar.file}">
|
||||
<fileset dir="${build.dir}/classes" includes="**/*.class"/>
|
||||
<fileset dir="${basedir}/tool/resources">
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</fileset>
|
||||
<zipfileset includes="org/antlr/**, org/antlr/stringtemplate/**, org/stringtemplate/v4/**" src="${antlr3.jar}"/>
|
||||
<zipfileset includes="**/*.class" src="${basedir}/runtime/Java/lib/org.abego.treelayout.core.jar"/>
|
||||
<manifest>
|
||||
<attribute name="Version" value="${version}"/>
|
||||
<attribute name="Main-Class" value="org.antlr.v4.Tool"/>
|
||||
</manifest>
|
||||
</jar>
|
||||
</target>
|
||||
|
||||
<target name="zip-source" depends="compile">
|
||||
<mkdir dir="${install.root.dir}"/>
|
||||
|
||||
<mkdir dir="${install.root.dir}/src"/>
|
||||
<copy todir="${install.root.dir}/src">
|
||||
<fileset dir="${basedir}/tool/src/">
|
||||
<include name="**/*.java"/>
|
||||
<include name="**/*.g"/>
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</fileset>
|
||||
<fileset dir="${basedir}/tool/resources/">
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</fileset>
|
||||
<fileset dir="${basedir}/runtime/Java/src/">
|
||||
<include name="**/*.java"/>
|
||||
<include name="**/*.g"/>
|
||||
<include name="**/*.st"/>
|
||||
<include name="**/*.stg"/>
|
||||
</fileset>
|
||||
<fileset dir="${build.antlr3.dir}"/>
|
||||
</copy>
|
||||
|
||||
<copy todir="${install.root.dir}">
|
||||
<fileset dir=".">
|
||||
<include name="build.properties"/>
|
||||
<include name="build.xml"/>
|
||||
<include name="LICENSE.txt"/>
|
||||
<include name="README.txt"/>
|
||||
<include name="doxyfile"/>
|
||||
</fileset>
|
||||
</copy>
|
||||
|
||||
<copy todir="${install.root.dir}/lib" file="${antlr3.jar}">
|
||||
</copy>
|
||||
|
||||
<zip destfile="${dist.dir}/antlr-${version}-src.zip">
|
||||
<zipfileset dir="${install.root.dir}" prefix="antlr-${version}"/>
|
||||
</zip>
|
||||
</target>
|
||||
|
||||
<target name="distribute" depends="clean, zip-source, build-jar">
|
||||
</target>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,64 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<package xmlns="http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd">
|
||||
<metadata>
|
||||
<id>Antlr4.Runtime</id>
|
||||
<version>0.0.0</version>
|
||||
<authors>Sam Harwell, Terence Parr</authors>
|
||||
<owners>Sam Harwell</owners>
|
||||
<description>The runtime library for parsers generated by the C# target of ANTLR 4. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2008 or newer.</description>
|
||||
<language>en-us</language>
|
||||
<projectUrl>https://github.com/sharwell/antlr4cs</projectUrl>
|
||||
<licenseUrl>https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt</licenseUrl>
|
||||
<iconUrl>https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png</iconUrl>
|
||||
<copyright>Copyright © Sam Harwell 2014</copyright>
|
||||
<releaseNotes>https://github.com/sharwell/antlr4cs/releases/v$version$</releaseNotes>
|
||||
<requireLicenseAcceptance>true</requireLicenseAcceptance>
|
||||
<tags>antlr antlr4 parsing</tags>
|
||||
<title>ANTLR 4 Runtime</title>
|
||||
<summary>The runtime library for parsers generated by the C# target of ANTLR 4.</summary>
|
||||
</metadata>
|
||||
<files>
|
||||
<!-- Runtime Libraries -->
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net20\$Configuration$\Antlr4.Runtime.dll" target="lib\net20"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net20\$Configuration$\Antlr4.Runtime.pdb" target="lib\net20"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net20\$Configuration$\Antlr4.Runtime.xml" target="lib\net20"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net30\$Configuration$\Antlr4.Runtime.dll" target="lib\net30"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net30\$Configuration$\Antlr4.Runtime.pdb" target="lib\net30"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net30\$Configuration$\Antlr4.Runtime.xml" target="lib\net30"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-client\$Configuration$\Antlr4.Runtime.dll" target="lib\net35-client"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-client\$Configuration$\Antlr4.Runtime.pdb" target="lib\net35-client"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-client\$Configuration$\Antlr4.Runtime.xml" target="lib\net35-client"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net40-client\$Configuration$\Antlr4.Runtime.dll" target="lib\net40-client"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net40-client\$Configuration$\Antlr4.Runtime.pdb" target="lib\net40-client"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net40-client\$Configuration$\Antlr4.Runtime.xml" target="lib\net40-client"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net45\$Configuration$\Antlr4.Runtime.dll" target="lib\net45"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net45\$Configuration$\Antlr4.Runtime.pdb" target="lib\net45"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net45\$Configuration$\Antlr4.Runtime.xml" target="lib\net45"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-cf\$Configuration$\Antlr4.Runtime.dll" target="lib\net35-cf"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-cf\$Configuration$\Antlr4.Runtime.pdb" target="lib\net35-cf"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\net35-cf\$Configuration$\Antlr4.Runtime.xml" target="lib\net35-cf"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net40\$Configuration$\Antlr4.Runtime.dll" target="lib\portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net40\$Configuration$\Antlr4.Runtime.pdb" target="lib\portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net40\$Configuration$\Antlr4.Runtime.xml" target="lib\portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net45\$Configuration$\Antlr4.Runtime.dll" target="lib\portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net45\$Configuration$\Antlr4.Runtime.pdb" target="lib\portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\portable-net45\$Configuration$\Antlr4.Runtime.xml" target="lib\portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1"/>
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\netcore45\$Configuration$\Antlr4.Runtime.dll" target="lib\netcore45"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\netcore45\$Configuration$\Antlr4.Runtime.pdb" target="lib\netcore45"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\bin\netcore45\$Configuration$\Antlr4.Runtime.xml" target="lib\netcore45"/>
|
||||
|
||||
<!-- Source Code -->
|
||||
|
||||
<file exclude="..\runtime\CSharp\Antlr4.Runtime\obj\**\*.cs" src="..\runtime\CSharp\Antlr4.Runtime\**\*.cs" target="src"/>
|
||||
<file src="..\runtime\CSharp\Antlr4.Runtime\**\$Configuration$\*Lexer.cs" target="src"/>
|
||||
</files>
|
||||
</package>
|
|
@ -0,0 +1,35 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<package xmlns="http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd">
|
||||
<metadata minClientVersion="2.7">
|
||||
<id>Antlr4.VS2008</id>
|
||||
<version>0.0.0</version>
|
||||
<authors>Sam Harwell, Terence Parr</authors>
|
||||
<owners>Sam Harwell</owners>
|
||||
<description>The C# target of the ANTLR 4 parser generator for Visual Studio 2008 projects. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2008.</description>
|
||||
<language>en-us</language>
|
||||
<projectUrl>https://github.com/sharwell/antlr4cs</projectUrl>
|
||||
<licenseUrl>https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt</licenseUrl>
|
||||
<iconUrl>https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png</iconUrl>
|
||||
<copyright>Copyright © Sam Harwell 2014</copyright>
|
||||
<releaseNotes>https://github.com/sharwell/antlr4cs/releases/v$version$</releaseNotes>
|
||||
<requireLicenseAcceptance>true</requireLicenseAcceptance>
|
||||
<developmentDependency>true</developmentDependency>
|
||||
<tags>antlr antlr4 parsing</tags>
|
||||
<title>ANTLR 4 (Visual Studio 2008)</title>
|
||||
<summary>The C# target of the ANTLR 4 parser generator for Visual Studio 2008 projects.</summary>
|
||||
<dependencies>
|
||||
<dependency id="Antlr4.Runtime" version="$version$" />
|
||||
</dependencies>
|
||||
</metadata>
|
||||
<files>
|
||||
<!-- Tools -->
|
||||
|
||||
<file src="..\tool\target\antlr4-csharp-$CSharpToolVersion$-complete.jar" target="tools"/>
|
||||
|
||||
<!-- Build Configuration -->
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net35\$Configuration$\Antlr4.net35.props" target="build\Antlr4.props"/>
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net35\$Configuration$\Antlr4.net35.targets" target="build\Antlr4.targets"/>
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net35\$Configuration$\Antlr4BuildTasks.net35.dll" target="build"/>
|
||||
</files>
|
||||
</package>
|
|
@ -0,0 +1,35 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<package xmlns="http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd">
|
||||
<metadata minClientVersion="2.7">
|
||||
<id>Antlr4</id>
|
||||
<version>0.0.0</version>
|
||||
<authors>Sam Harwell, Terence Parr</authors>
|
||||
<owners>Sam Harwell</owners>
|
||||
<description>The C# target of the ANTLR 4 parser generator for Visual Studio 2010+ projects. This package supports projects targeting .NET 2.0 or newer, and built using Visual Studio 2010 or newer.</description>
|
||||
<language>en-us</language>
|
||||
<projectUrl>https://github.com/sharwell/antlr4cs</projectUrl>
|
||||
<licenseUrl>https://raw.github.com/sharwell/antlr4cs/master/LICENSE.txt</licenseUrl>
|
||||
<iconUrl>https://raw.github.com/antlr/website-antlr4/master/images/icons/antlr.png</iconUrl>
|
||||
<copyright>Copyright © Sam Harwell 2014</copyright>
|
||||
<releaseNotes>https://github.com/sharwell/antlr4cs/releases/v$version$</releaseNotes>
|
||||
<requireLicenseAcceptance>true</requireLicenseAcceptance>
|
||||
<developmentDependency>true</developmentDependency>
|
||||
<tags>antlr antlr4 parsing</tags>
|
||||
<title>ANTLR 4</title>
|
||||
<summary>The C# target of the ANTLR 4 parser generator for Visual Studio 2010+ projects.</summary>
|
||||
<dependencies>
|
||||
<dependency id="Antlr4.Runtime" version="$version$" />
|
||||
</dependencies>
|
||||
</metadata>
|
||||
<files>
|
||||
<!-- Tools -->
|
||||
|
||||
<file src="..\tool\target\antlr4-csharp-$CSharpToolVersion$-complete.jar" target="tools"/>
|
||||
|
||||
<!-- Build Configuration -->
|
||||
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net40\$Configuration$\Antlr4.net40.props" target="build\Antlr4.props"/>
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net40\$Configuration$\Antlr4.net40.targets" target="build\Antlr4.targets"/>
|
||||
<file src="..\runtime\CSharp\Antlr4BuildTasks\bin\net40\$Configuration$\Antlr4BuildTasks.net40.dll" target="build"/>
|
||||
</files>
|
||||
</package>
|
|
@ -0,0 +1,139 @@
|
|||
param (
|
||||
[switch]$Debug,
|
||||
[string]$VisualStudioVersion = "12.0",
|
||||
[switch]$NoClean,
|
||||
[string]$Java6Home,
|
||||
[string]$MavenHome,
|
||||
[string]$MavenRepo = "$($env:USERPROFILE)\.m2",
|
||||
[switch]$SkipMaven,
|
||||
[switch]$SkipKeyCheck
|
||||
)
|
||||
|
||||
# build the solutions
|
||||
$SolutionPath = "..\Runtime\CSharp\Antlr4.sln"
|
||||
$CF35SolutionPath = "..\Runtime\CSharp\Antlr4.VS2008.sln"
|
||||
|
||||
# make sure the script was run from the expected path
|
||||
if (!(Test-Path $SolutionPath)) {
|
||||
echo "The script was run from an invalid working directory."
|
||||
exit 1
|
||||
}
|
||||
|
||||
. .\version.ps1
|
||||
|
||||
If ($Debug) {
|
||||
$BuildConfig = 'Debug'
|
||||
} Else {
|
||||
$BuildConfig = 'Release'
|
||||
}
|
||||
|
||||
If ($NoClean) {
|
||||
$Target = 'build'
|
||||
} Else {
|
||||
$Target = 'rebuild'
|
||||
}
|
||||
|
||||
If (-not $MavenHome) {
|
||||
$MavenHome = $env:M2_HOME
|
||||
}
|
||||
|
||||
$Java6RegKey = 'HKLM:\SOFTWARE\JavaSoft\Java Runtime Environment\1.6'
|
||||
$Java6RegValue = 'JavaHome'
|
||||
If (-not $Java6Home -and (Test-Path $Java6RegKey)) {
|
||||
$JavaHomeKey = Get-Item -LiteralPath $Java6RegKey
|
||||
If ($JavaHomeKey.GetValue($Java6RegValue, $null) -ne $null) {
|
||||
$JavaHomeProperty = Get-ItemProperty $Java6RegKey $Java6RegValue
|
||||
$Java6Home = $JavaHomeProperty.$Java6RegValue
|
||||
}
|
||||
}
|
||||
|
||||
# this is configured here for path checking, but also in the .props and .targets files
|
||||
[xml]$pom = Get-Content "..\tool\pom.xml"
|
||||
$CSharpToolVersionNodeInfo = Select-Xml "/mvn:project/mvn:version" -Namespace @{mvn='http://maven.apache.org/POM/4.0.0'} $pom
|
||||
$CSharpToolVersion = $CSharpToolVersionNodeInfo.Node.InnerText.trim()
|
||||
|
||||
# build the main project
|
||||
$msbuild = "$env:windir\Microsoft.NET\Framework64\v4.0.30319\msbuild.exe"
|
||||
|
||||
&$msbuild '/nologo' '/m' '/nr:false' "/t:$Target" "/p:Configuration=$BuildConfig" "/p:VisualStudioVersion=$VisualStudioVersion" $SolutionPath
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
$host.ui.WriteErrorLine('Build failed, aborting!')
|
||||
exit $p.ExitCode
|
||||
}
|
||||
|
||||
# build the compact framework project
|
||||
$msbuild = "$env:windir\Microsoft.NET\Framework\v4.0.30319\msbuild.exe"
|
||||
|
||||
&$msbuild '/nologo' '/m' '/nr:false' '/t:rebuild' "/p:Configuration=$BuildConfig" $CF35SolutionPath
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
$host.ui.WriteErrorLine('.NET 3.5 Compact Framework Build failed, aborting!')
|
||||
exit $p.ExitCode
|
||||
}
|
||||
|
||||
if (-not (Test-Path 'nuget')) {
|
||||
mkdir "nuget"
|
||||
}
|
||||
|
||||
# Build the Java library using Maven
|
||||
If (-not $SkipMaven) {
|
||||
$OriginalPath = $PWD
|
||||
|
||||
cd '..\tool'
|
||||
$MavenPath = "$MavenHome\bin\mvn.bat"
|
||||
If (-not (Test-Path $MavenPath)) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate Maven binary: $MavenPath")
|
||||
cd $OriginalPath
|
||||
exit 1
|
||||
}
|
||||
|
||||
If (-not (Test-Path $Java6Home)) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate Java 6 installation: $Java6Home")
|
||||
cd $OriginalPath
|
||||
exit 1
|
||||
}
|
||||
|
||||
$MavenGoal = 'package'
|
||||
&$MavenPath '-DskipTests=true' '--errors' '-e' '-Dgpg.useagent=true' "-Djava6.home=$Java6Home" '-Psonatype-oss-release' $MavenGoal
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
$host.ui.WriteErrorLine('Maven build of the C# Target custom Tool failed, aborting!')
|
||||
cd $OriginalPath
|
||||
exit $p.ExitCode
|
||||
}
|
||||
|
||||
cd $OriginalPath
|
||||
}
|
||||
|
||||
$JarPath = "..\tool\target\antlr4-csharp-$CSharpToolVersion-complete.jar"
|
||||
if (!(Test-Path $JarPath)) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate the complete jar used for building C# parsers: $JarPath")
|
||||
exit 1
|
||||
}
|
||||
|
||||
# By default, do not create a NuGet package unless the expected strong name key files were used
|
||||
if (-not $SkipKeyCheck) {
|
||||
. .\keys.ps1
|
||||
|
||||
foreach ($pair in $Keys.GetEnumerator()) {
|
||||
$assembly = Resolve-FullPath -Path "..\runtime\CSharp\Antlr4.Runtime\bin\$($pair.Key)\$BuildConfig\Antlr4.Runtime.dll"
|
||||
# Run the actual check in a separate process or the current process will keep the assembly file locked
|
||||
powershell -Command ".\check-key.ps1 -Assembly '$assembly' -ExpectedKey '$($pair.Value)' -Build '$($pair.Key)'"
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Exit $p.ExitCode
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$packages = @(
|
||||
'Antlr4.Runtime'
|
||||
'Antlr4'
|
||||
'Antlr4.VS2008')
|
||||
|
||||
$nuget = '..\runtime\CSharp\.nuget\NuGet.exe'
|
||||
ForEach ($package in $packages) {
|
||||
If (-not (Test-Path ".\$package.nuspec")) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate NuGet package specification: $package")
|
||||
exit 1
|
||||
}
|
||||
|
||||
&$nuget 'pack' ".\$package.nuspec" '-OutputDirectory' 'nuget' '-Prop' "Configuration=$BuildConfig" '-Version' "$AntlrVersion" '-Prop' "M2_REPO=$M2_REPO" '-Prop' "CSharpToolVersion=$CSharpToolVersion" '-Symbols'
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
param(
|
||||
[string]$Assembly,
|
||||
[string]$ExpectedKey,
|
||||
[string]$Build = $null
|
||||
)
|
||||
|
||||
function Get-PublicKeyToken() {
|
||||
param([string]$assembly = $null)
|
||||
if ($assembly) {
|
||||
$bytes = $null
|
||||
$bytes = [System.Reflection.Assembly]::ReflectionOnlyLoadFrom($assembly).GetName().GetPublicKeyToken()
|
||||
if ($bytes) {
|
||||
$key = ""
|
||||
for ($i=0; $i -lt $bytes.Length; $i++) {
|
||||
$key += "{0:x2}" -f $bytes[$i]
|
||||
}
|
||||
|
||||
$key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (-not $Build) {
|
||||
$Build = $Assembly
|
||||
}
|
||||
|
||||
$actual = Get-PublicKeyToken -assembly $Assembly
|
||||
if ($actual -ne $ExpectedKey) {
|
||||
$host.ui.WriteErrorLine("Invalid publicKeyToken for '$Build'; expected '$ExpectedKey' but found '$actual'")
|
||||
exit 1
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
# Note: these values may only change during minor release
|
||||
$Keys = @{
|
||||
'net20' = '7983ae52036899ac'
|
||||
'net30' = '7671200403f6656a'
|
||||
'net35-cf' = '770a97458f51159e'
|
||||
'net35-client' = '4307381ae04f9aa7'
|
||||
'net40-client' = 'bb1075973a9370c4'
|
||||
'net45' = 'edc21c04cf562012'
|
||||
'netcore45' = 'e4e9019902d0b6e2'
|
||||
'portable-net40' = '90bf14da8e1462b4'
|
||||
'portable-net45' = '3d23c8e77559f391'
|
||||
}
|
||||
|
||||
function Resolve-FullPath() {
|
||||
param([string]$Path)
|
||||
[System.IO.Path]::GetFullPath((Join-Path (pwd) $Path))
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
. .\version.ps1
|
||||
|
||||
If ($AntlrVersion.EndsWith('-dev')) {
|
||||
$host.ui.WriteErrorLine("Cannot push development version '$AntlrVersion' to NuGet.")
|
||||
Exit 1
|
||||
}
|
||||
|
||||
$packages = @(
|
||||
'Antlr4.Runtime'
|
||||
'Antlr4'
|
||||
'Antlr4.VS2008')
|
||||
|
||||
# Make sure all packages exist before pushing any packages
|
||||
ForEach ($package in $packages) {
|
||||
If (-not (Test-Path ".\nuget\$package.$AntlrVersion.nupkg")) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate NuGet package: $JarPath")
|
||||
exit 1
|
||||
}
|
||||
|
||||
If (-not (Test-Path ".\nuget\$package.$AntlrVersion.symbols.nupkg")) {
|
||||
$host.ui.WriteErrorLine("Couldn't locate NuGet symbols package: $JarPath")
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
$nuget = '..\runtime\CSharp\.nuget\NuGet.exe'
|
||||
ForEach ($package in $packages) {
|
||||
&$nuget 'push' ".\nuget\$package.$AntlrVersion.nupkg"
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
$AntlrVersion = "4.5.1"
|
|
@ -55,3 +55,41 @@ YYYY/MM/DD, github id, Full name, email
|
|||
2013/01/29, metadave, Dave Parfitt, diparfitt@gmail.com
|
||||
2013/03/06, bkiers, Bart Kiers, bkiers@gmail.com
|
||||
2013/08/20, cayhorstmann, Cay Horstmann, cay@horstmann.com
|
||||
2014/03/18, aphyr, Kyle Kingsbury, aphyr@aphyr.com
|
||||
2014/06/07, ericvergnaud, Eric Vergnaud, eric.vergnaud@wanadoo.fr
|
||||
2014/07/04, jimidle, Jim Idle, jimi@Idle.ws
|
||||
2014/09/04. jeduden, Jan-Eric Duden, jeduden@gmail.com
|
||||
2014/09/27, petrbel, Petr Bělohlávek, antlr@petrbel.cz
|
||||
2014/10/18, sergiusignacius, Sérgio Silva, serge.a.silva@gmail.com
|
||||
2014/10/26, bdkearns, Brian Kearns, bdkearns@gmail.com
|
||||
2014/10/27, michaelpj, Michael Peyton Jones, michaelpj@gmail.com
|
||||
2015/01/29, TomLottermann, Thomas Lottermann, tomlottermann@gmail.com
|
||||
2015/02/15, pavlo, Pavlo Lysov, pavlikus@gmail.com
|
||||
2015/03/07, RedTailedHawk, Lawrence Parker, larry@answerrocket.com
|
||||
2015/04/03, rljacobson, Robert Jacobson, rljacobson@gmail.com
|
||||
2015/04/06, ojakubcik, Ondrej Jakubcik, ojakubcik@gmail.com
|
||||
2015/04/29, jszheng, Jinshan Zheng, zheng_js@hotmail.com
|
||||
2015/05/08, ViceIce, Michael Kriese, michael.kriese@gmx.de
|
||||
2015/05/09, lkraz, Luke Krasnoff, luke.krasnoff@gmail.com
|
||||
2015/05/12, Pursuit92, Josh Chase, jcjoshuachase@gmail.com
|
||||
2015/05/20, peturingi, Pétur Ingi Egilsson, petur@petur.eu
|
||||
2015/05/27, jcbrinfo, Jean-Christophe Beaupré, jcbrinfo@users.noreply.github.com
|
||||
2015/06/29, jvanzyl, Jason van Zyl, jason@takari.io
|
||||
2015/08/18, krzkaczor, Krzysztof Kaczor, krzysztof@kaczor.io
|
||||
2015/09/18, worsht, Rajiv Subrahmanyam, rajiv.public@gmail.com
|
||||
2015/09/24, HSorensen, Henrik Sorensen, henrik.b.sorensen@gmail.com
|
||||
2015/10/06, brwml, Bryan Wilhelm, bryan.wilhelm@microsoft.com
|
||||
2015/10/08, fedotovalex, Alex Fedotov, me@alexfedotov.com
|
||||
2015/10/12, KvanTTT, Ivan Kochurkin, ivan.kochurkin@gmail.com
|
||||
2015/10/21, martin-probst, Martin Probst, martin-probst@web.de
|
||||
2015/10/21, hkff, Walid Benghabrit, walid.benghabrit@mines-nantes.fr
|
||||
2015/11/12, cooperra, Robbie Cooper, cooperra@users.noreply.github.com
|
||||
2015/11/25, abego, Udo Borkowski, ub@abego.org
|
||||
2015/12/17, sebadur, Sebastian Badur, sebadur@users.noreply.github.com
|
||||
2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com
|
||||
2015/12/24, dtymon, David Tymon, david.tymon@gmail.com
|
||||
2016/02/18, reitzig, Raphael Reitzig, reitzig[at]cs.uni-kl.de
|
||||
2016/03/10, mike-lischke, Mike Lischke, mike@lischke-online.de
|
||||
2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com
|
||||
2016/03/29, msteiger, Martin Steiger, antlr@martin-steiger.de
|
||||
2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
# Integrating ANTLR into Development Systems
|
||||
|
||||
The Java target is the reference implementation mirrored by other targets. The following pages help you integrate ANTLR into development environments and build systems appropriate for your target language. As of January 2015, we have Java, C#, Python 2, Python 3, and JavaScript targets.
|
||||
|
||||
The easiest thing is probably just to use an [ANTLR plug-in](http://www.antlr.org/tools.html) for your favorite development environment.
|
||||
|
||||
Java IDE Integration
|
||||
C# IDE Integration
|
|
@ -0,0 +1,247 @@
|
|||
# Integrating ANTLR JavaScript parsers with ACE editor
|
||||
|
||||
Having the ability to parse code other than JavaScript is great, but nowadays users expect to be able to edit code with nice edit features such as keyword highlighting, indentation and brace matching, and advanced ones such as syntax checking.
|
||||
|
||||
I have been through the process of integrating an ANTLR parser with ACE, the dominant code editor for web based code editing. Information about ACE can be found on their web site.
|
||||
|
||||
This page describes my experience, and humbly aims to help you get started. It is not however a reference guide, and no support is provided.
|
||||
|
||||
## Architecture
|
||||
|
||||
The ACE editor is organized as follows
|
||||
|
||||
1. The editor itself is a <div> which once initialized comprises a number of elements. This UI element is responsible for the display, and the generation of edit events.
|
||||
1. The editor relies on a Session, which manages events and configuration.
|
||||
1. The code itself is stored in a Document. Any insertion or deletion of text is reflected in the Document.
|
||||
1. Keyword highlighting, indentation and brace matching are delegated to a mode. There is no direct equivalent of an ACE mode in ANTLR. While keywords are the equivalent of ANTLR lexer tokens, indentation and brace matching are edit tasks, not parsing ones. A given ACE editor can only have one mode, which corresponds to the language being edited. There is no need for ANTLR integration to support keyword highlighting, indentation and brace matching.
|
||||
1. Syntax checking is delegated to a worker. This is where ANTLR integration is needed. If syntax checking is enabled, ACE asks the mode to create a worker. In JavaScript, workers run in complete isolation i.e. they don't share code or variables with other workers, or with the HTML page itself.
|
||||
1. The below diagram describes how the whole system works. In green are the components *you* need to provide. You'll notice that there is no need to load ANTLR in the HTML page itself. You'll also notice that ACE maintains a document in each thread. This is done through low level events sent by the ACE session to the worker which describe the delta. Once applied to the worker document, a high level event is triggered, which is easy to handle since at this point the worker document is a perfect copy of the UI document.
|
||||
|
||||
<img src=images/ACE-Architecture.001.png>
|
||||
|
||||
## Step-by-step guide
|
||||
|
||||
The first thing to do is to create an editor in your html page. This is thoroughly described in the ACE documentation, so we'll just sum it up here:
|
||||
|
||||
```xml
|
||||
<script src="../js/ace/ace.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script>
|
||||
var editor = ace.edit("editor");
|
||||
</script>
|
||||
```
|
||||
|
||||
This should give you a working editor. You may want to control its sizing using CSS. I personally load the editor in an iframe and set its style to position: absolute, top: 0, left: 0 etc... but I'm sure you know better than me how to achieve results.
|
||||
|
||||
The second thing to do is to configure the ACE editor to use your mode i.e. language configuration. A good place to start is to inherit from the built-in TextMode. The following is a very simple example, which only caters for comments, literals, and a limited subset of separators and keywords :
|
||||
|
||||
```javascript
|
||||
ace.define('ace/mode/my-mode',["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/text_highlight_rules", "ace/worker/worker_client" ], function(require, exports, module) {
|
||||
var oop = require("ace/lib/oop");
|
||||
var TextMode = require("ace/mode/text").Mode;
|
||||
var TextHighlightRules = require("ace/mode/text_highlight_rules").TextHighlightRules;
|
||||
|
||||
var MyHighlightRules = function() {
|
||||
var keywordMapper = this.createKeywordMapper({
|
||||
"keyword.control": "if|then|else",
|
||||
"keyword.operator": "and|or|not",
|
||||
"keyword.other": "class",
|
||||
"storage.type": "int|float|text",
|
||||
"storage.modifier": "private|public",
|
||||
"support.function": "print|sort",
|
||||
"constant.language": "true|false"
|
||||
}, "identifier");
|
||||
this.$rules = {
|
||||
"start": [
|
||||
{ token : "comment", regex : "//" },
|
||||
{ token : "string", regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]' },
|
||||
{ token : "constant.numeric", regex : "0[xX][0-9a-fA-F]+\\b" },
|
||||
{ token : "constant.numeric", regex: "[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b" },
|
||||
{ token : "keyword.operator", regex : "!|%|\\\\|/|\\*|\\-|\\+|~=|==|<>|!=|<=|>=|=|<|>|&&|\\|\\|" },
|
||||
{ token : "punctuation.operator", regex : "\\?|\\:|\\,|\\;|\\." },
|
||||
{ token : "paren.lparen", regex : "[[({]" },
|
||||
{ token : "paren.rparen", regex : "[\\])}]" },
|
||||
{ token : "text", regex : "\\s+" },
|
||||
{ token: keywordMapper, regex: "[a-zA-Z_$][a-zA-Z0-9_$]*\\b" }
|
||||
]
|
||||
};
|
||||
};
|
||||
oop.inherits(MyHighlightRules, TextHighlightRules);
|
||||
|
||||
var MyMode = function() {
|
||||
this.HighlightRules = MyHighlightRules;
|
||||
};
|
||||
oop.inherits(MyMode, TextMode);
|
||||
|
||||
(function() {
|
||||
|
||||
this.$id = "ace/mode/my-mode";
|
||||
|
||||
}).call(MyMode.prototype);
|
||||
|
||||
exports.Mode = MyMode;
|
||||
});
|
||||
```
|
||||
|
||||
Now if you store the above in a file called "my-mode.js", setting the ACE Editor becomes straightforward:
|
||||
|
||||
```xml
|
||||
<script src="../js/ace/ace.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="../js/my-mode.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script>
|
||||
var editor = ace.edit("editor");
|
||||
editor.getSession().setMode("ace/mode/my-mode");
|
||||
</script>
|
||||
```
|
||||
|
||||
At this point you should have a working editor, able to highlight keywords. You may wonder why you need to set the tokens when you have already done so in your ANTLR lexer grammar. First, ACE expects a classification (control, operator, type...) which does not exist in ANTLR. Second, there is no need for ANTLR to achieve this, since ACE comes with its own lexer.
|
||||
|
||||
Ok, now that we have a working editor comes the time where we need syntax validation. This is where the worker comes in the picture.
|
||||
|
||||
Creating the worker is the responsibility of the mode you provide. So you need to enhance it with something like the following:
|
||||
|
||||
```javascript
|
||||
var WorkerClient = require("ace/worker/worker_client").WorkerClient;
|
||||
this.createWorker = function(session) {
|
||||
this.$worker = new WorkerClient(["ace"], "ace/worker/my-worker", "MyWorker", "../js/my-worker.js");
|
||||
this.$worker.attachToDocument(session.getDocument());
|
||||
|
||||
this.$worker.on("errors", function(e) {
|
||||
session.setAnnotations(e.data);
|
||||
});
|
||||
|
||||
this.$worker.on("annotate", function(e) {
|
||||
session.setAnnotations(e.data);
|
||||
});
|
||||
|
||||
this.$worker.on("terminate", function() {
|
||||
session.clearAnnotations();
|
||||
});
|
||||
|
||||
return this.$worker;
|
||||
|
||||
};
|
||||
```
|
||||
|
||||
The above code needs to be placed in the existing worker, after:
|
||||
|
||||
```javascript
|
||||
this.$id = "ace/mode/my-mode";
|
||||
```
|
||||
|
||||
Please note that the mode code runs on the UI side, not the worker side. The event handlers here are for events sent by the worker, not to the worker.
|
||||
|
||||
Obviously the above won't work out of the box, because you need to provide the "my-worker.js" file.
|
||||
|
||||
Creating a worker from scratch is not something I've tried. Simply put, your worker needs to handle all messages sent by ACE using the WorkerClient created by the mode. This is not a simple task, and is better delegated to existing ACE code, so we can focus on tasks specific to our language.
|
||||
|
||||
What I did is I started from "mode-json.js", a rather simple worker which comes with ACE, stripped out all JSON validation related stuff out of it, and saved the remaining code in a file name "worker-base.js" which you can find [here](resources/worker-base.js). Once this done, I was able to create a simple worker, as follows:
|
||||
|
||||
```javascript
|
||||
importScripts("worker-base.js");
|
||||
ace.define('ace/worker/my-worker',["require","exports","module","ace/lib/oop","ace/worker/mirror"], function(require, exports, module) {
|
||||
"use strict";
|
||||
|
||||
var oop = require("ace/lib/oop");
|
||||
var Mirror = require("ace/worker/mirror").Mirror;
|
||||
|
||||
var MyWorker = function(sender) {
|
||||
Mirror.call(this, sender);
|
||||
this.setTimeout(200);
|
||||
this.$dialect = null;
|
||||
};
|
||||
|
||||
oop.inherits(MyWorker, Mirror);
|
||||
|
||||
(function() {
|
||||
|
||||
this.onUpdate = function() {
|
||||
var value = this.doc.getValue();
|
||||
var annotations = validate(value);
|
||||
this.sender.emit("annotate", annotations);
|
||||
};
|
||||
|
||||
}).call(MyWorker.prototype);
|
||||
|
||||
exports.MyWorker = MyWorker;
|
||||
});
|
||||
|
||||
var validate = function(input) {
|
||||
return [ { row: 0, column: 0, text: "MyMode says Hello!", type: "error" } ];
|
||||
};
|
||||
```
|
||||
|
||||
At this point, you should have an editor which displays an error icon next to the first line. When you hover over the error icon, it should display: MyMode says Hello!. Is that not a friendly worker? Yum.
|
||||
|
||||
What remains to be done is have our validate function actually validate the input. Finally ANTLR comes in the picture!
|
||||
|
||||
To start with, let's load ANTLR and your parser, listener etc.. Easy, since you could write:
|
||||
|
||||
```js
|
||||
var antlr4 = require('antlr4/index');
|
||||
```
|
||||
|
||||
This may work, but it's actually unreliable. The reason is that the require function used by ANTLR, which exactly mimics the NodeJS require function, uses a different syntax than the require function that comes with ACE. So we need to bring in a require function that conforms to the NodeJS syntax. I personally use one that comes from Torben Haase's Honey project, which you can find here. But hey, now we're going to have 2 'require' functions not compatible with each other! Indeed, this is why you need to take special care, as follows:
|
||||
|
||||
```js
|
||||
// load nodejs compatible require
|
||||
var ace_require = require;
|
||||
require = undefined;
|
||||
var Honey = { 'requirePath': ['..'] }; // walk up to js folder, see Honey docs
|
||||
importScripts("../lib/require.js");
|
||||
var antlr4_require = require;
|
||||
require = ace_require;
|
||||
```
|
||||
Now it's safe to load antlr, and the parsers generated for your language. Assuming that your language files (generated or hand-built) are in a folder with an index.js file that calls require for each file, your parser loading code can be as simple as follows:
|
||||
```js
|
||||
// load antlr4 and myLanguage
|
||||
var antlr4, mylanguage;
|
||||
try {
|
||||
require = antlr4_require;
|
||||
antlr4 = require('antlr4/index');
|
||||
mylanguage = require('mylanguage/index');
|
||||
} finally {
|
||||
require = ace_require;
|
||||
}
|
||||
```
|
||||
Please note the try-finally construct. ANTLR uses 'require' synchronously so it's perfectly safe to ignore the ACE 'require' while running ANTLR code. ACE itself does not guarantee synchronous execution, so you are much safer always switching 'require' back to 'ace_require'.
|
||||
Now detecting deep syntax errors in your code is a task for your ANTLR listener or visitor or whatever piece of code you've delegated this to. We're not going to describe this here, since it would require some knowledge of your language. However, detecting grammar syntax errors is something ANTLR does beautifully (isn't that why you went for ANTLR in the first place?). So what we will illustrate here is how to report grammar syntax errors. I have no doubt that from there, you will be able to extend the validator to suit your specific needs.
|
||||
Whenever ANTLR encounters an unexpected token, it fires an error. By default, the error is routed to an error listener which simply writes to the console.
|
||||
What we need to do is replace this listener by our own listener, se we can route errors to the ACE editor. First, let's create such a listener:
|
||||
```js
|
||||
// class for gathering errors and posting them to ACE editor
|
||||
var AnnotatingErrorListener = function(annotations) {
|
||||
antlr4.error.ErrorListener.call(this);
|
||||
this.annotations = annotations;
|
||||
return this;
|
||||
};
|
||||
|
||||
AnnotatingErrorListener.prototype = Object.create(antlr4.error.ErrorListener.prototype);
|
||||
AnnotatingErrorListener.prototype.constructor = AnnotatingErrorListener;
|
||||
|
||||
AnnotatingErrorListener.prototype.syntaxError = function(recognizer, offendingSymbol, line, column, msg, e) {
|
||||
this.annotations.push({
|
||||
row: line - 1,
|
||||
column: column,
|
||||
text: msg,
|
||||
type: "error"
|
||||
});
|
||||
};
|
||||
```
|
||||
With this, all that remains to be done is plug the listener in when we parse the code. Here is how I do it:
|
||||
```js
|
||||
var validate = function(input) {
|
||||
var stream = new antlr4.InputStream(input);
|
||||
var lexer = new mylanguage.MyLexer(stream);
|
||||
var tokens = new antlr4.CommonTokenStream(lexer);
|
||||
var parser = new mylanguage.MyParser(tokens);
|
||||
var annotations = [];
|
||||
var listener = new AnnotatingErrorListener(annotations)
|
||||
parser.removeErrorListeners();
|
||||
parser.addErrorListener(listener);
|
||||
parser.parseMyRule();
|
||||
return annotations;
|
||||
};
|
||||
```
|
||||
You know what? That's it! You now have an ACE editor that does syntax validation using ANTLR! I hope you find this useful, and simple enough to get started.
|
||||
What I did not address here is packaging, not something I'm an expert at. The good news is that it makes development simple, since I don't have to run any compilation process. I just edit my code, reload my editor page, and check how it goes.
|
||||
Now wait, hey! How do you debug this? Well, as usual, using Chrome, since neither Firefox or Safari are able to debug worker code. What a shame...
|
|
@ -0,0 +1,204 @@
|
|||
# Actions and Attributes
|
||||
|
||||
In Chapter 10, Attributes and Actions, we learned how to embed actions within grammars and looked at the most common token and rule attributes. This section summarizes the important syntax and semantics from that chapter and provides a complete list of all available attributes. (You can learn more about actions in the grammar from the free excerpt on listeners and actions.)
|
||||
|
||||
Actions are blocks of text written in the target language and enclosed in curly braces. The recognizer triggers them according to their locations within the grammar. For example, the following rule emits "found a decl" after the parser has seen a valid declaration:
|
||||
|
||||
```
|
||||
decl: type ID ';' {System.out.println("found a decl");} ;
|
||||
type: 'int' | 'float' ;
|
||||
```
|
||||
|
||||
Most often, actions access the attributes of tokens and rule references:
|
||||
|
||||
```
|
||||
decl: type ID ';'
|
||||
{System.out.println("var "+$ID.text+":"+$type.text+";");}
|
||||
| t=ID id=ID ';'
|
||||
{System.out.println("var "+$id.text+":"+$t.text+";");}
|
||||
;
|
||||
```
|
||||
|
||||
## Token Attributes
|
||||
|
||||
All tokens have a collection of predefined, read-only attributes. The attributes include useful token properties such as the token type and text matched for a token. Actions can access these attributes via $ label.attribute where label labels a particular instance of a token reference (a and b in the example below are used in the action code as $a and $b). Often, a particular token is only referenced once in the rule, in which case the token name itself can be used unambiguously in the action code (token INT can be used as $INT in the action). The following example illustrates token attribute expression syntax:
|
||||
|
||||
```
|
||||
r : INT {int x = $INT.line;}
|
||||
( ID {if ($INT.line == $ID.line) ...;} )?
|
||||
a=FLOAT b=FLOAT {if ($a.line == $b.line) ...;}
|
||||
;
|
||||
```
|
||||
|
||||
The action within the `(...)?` subrule can see the `INT` token matched before it in the outer level.
|
||||
|
||||
Because there are two references to the `FLOAT` token, a reference to `$FLOAT` in an action is not unique; you must use labels to specify which token reference you’re interested in.
|
||||
|
||||
Token references within different alternatives are unique because only one of them can be matched for any invocation of the rule. For example, in the following rule, actions in both alternatives can reference $ID directly without using a label:
|
||||
|
||||
```
|
||||
r : ... ID {System.out.println($ID.text);}
|
||||
| ... ID {System.out.println($ID.text);}
|
||||
;
|
||||
```
|
||||
|
||||
To access the tokens matched for literals, you must use a label:
|
||||
|
||||
```
|
||||
stat: r='return' expr ';' {System.out.println("line="+$r.line);} ;
|
||||
```
|
||||
|
||||
Most of the time you access the attributes of the token, but sometimes it is useful to access the Token object itself because it aggregates all the attributes. Further, you can use it to test whether an optional subrule matched a token:
|
||||
|
||||
```
|
||||
stat: 'if' expr 'then' stat (el='else' stat)?
|
||||
{if ( $el!=null ) System.out.println("found an else");}
|
||||
| ...
|
||||
;
|
||||
```
|
||||
|
||||
`$T` and `$L` evaluate to `Token` objects for token name `T` and token label `L`. `$ll` evaluates to `List<Token>` for list label `ll`. `$T.attr` evaluates to the type and value specified in the following table for attribute `attr`:
|
||||
|
||||
|
||||
|Attribute|Type|Description|
|
||||
|---------|----|-----------|
|
||||
|text|String|The text matched for the token; translates to a call to getText. Example: $ID.text.|
|
||||
|type|int|The token type (nonzero positive integer) of the token such as INT; translates to a call to getType. Example: $ID.type.|
|
||||
|line|int|The line number on which the token occurs, counting from 1; translates to a call to getLine. Example: $ID.line.|
|
||||
|pos|int|The character position within the line at which the token’s first character occurs counting from zero; translates to a call togetCharPositionInLine. Example: $ID.pos.|
|
||||
|index|int|The overall index of this token in the token stream, counting from zero; translates to a call to getTokenIndex. Example: $ID.index.|
|
||||
|channel|int|The token’s channel number. The parser tunes to only one channel, effectively ignoring off-channel tokens. The default channel is 0 (Token.DEFAULT_CHANNEL), and the default hidden channel is Token.HIDDEN_CHANNEL. Translates to a call to getChannel. Example: $ID.channel.|
|
||||
|int|int|The integer value of the text held by this token; it assumes that the text is a valid numeric string. Handy for building calculators and so on. Translates to Integer.valueOf(text-of-token). Example: $INT.int.|
|
||||
|
||||
## Parser Rule Attributes
|
||||
|
||||
ANTLR predefines a number of read-only attributes associated with parser rule references that are available to actions. Actions can access rule attributes only for references that precede the action. The syntax is $ r.attr for rule name r or a label assigned to a rule reference. For example, $expr.text returns the complete text matched by a preceding invocation of rule expr:
|
||||
|
||||
```
|
||||
returnStat : 'return' expr {System.out.println("matched "+$expr.text);} ;
|
||||
```
|
||||
|
||||
Using a rule label looks like this:
|
||||
|
||||
```
|
||||
returnStat : 'return' e=expr {System.out.println("matched "+e.text);} ;
|
||||
```
|
||||
|
||||
You can also use `$ followed by the name of the attribute to access the value associated with the currently executing rule. For example, `$start` is the starting token of the current rule.
|
||||
|
||||
```
|
||||
returnStat : 'return' expr {System.out.println("first token "+$start.getText());} ;
|
||||
```
|
||||
|
||||
`$r` and `$rl` evaluate to `ParserRuleContext` objects of type `RContext` for rule name `r` and rule label `rl`. `$rll` evaluates to `List<RContext>` for rule list label `rll`. `$r.attr` evaluates to the type and value specified in the following table for attribute `attr`:
|
||||
|
||||
|Attribute|Type|Description|
|
||||
|---------|----|-----------|
|
||||
|text|String|The text matched for a rule or the text matched from the start of the rule up until the point of the `$text` expression evaluation. Note that this includes the text for all tokens including those on hidden channels, which is what you want because usually that has all the whitespace and comments. When referring to the current rule, this attribute is available in any action including any exception actions.|
|
||||
|start|Token|The first token to be potentially matched by the rule that is on the main token channel; in other words, this attribute is never a hidden token. For rules that end up matching no tokens, this attribute points at the first token that could have been matched by this rule. When referring to the current rule, this attribute is available to any action within the rule.|
|
||||
|stop|Token|The last nonhidden channel token to be matched by the rule. When referring to the current rule, this attribute is available only to the after and finally actions.|
|
||||
|ctx|ParserRuleContext|The rule context object associated with a rule invocation. All of the other attributes are available through this attribute. For example, `$ctx.start` accesses the start field within the current rules context object. It’s the same as `$start`.|
|
||||
|
||||
## Dynamically-Scoped Attributes
|
||||
|
||||
You can pass information to and from rules using parameters and return values, just like functions in a general-purpose programming language. Programming languages don’t allow functions to access the local variables or parameters of invoking functions, however. For example, the following reference to local variable xfrom a nested method call is illegal in Java:
|
||||
|
||||
```java
|
||||
void f() {
|
||||
int x = 0;
|
||||
g();
|
||||
}
|
||||
void g() {
|
||||
h();
|
||||
}
|
||||
void h() {
|
||||
int y = x; // INVALID reference to f's local variable x
|
||||
}
|
||||
```
|
||||
|
||||
Variable x is available only within the scope of f, which is the text lexically delimited by curly brackets. For this reason, Java is said to use lexical scoping. Lexical scoping is the norm for most programming languages. Languages that allow methods further down in the call chain to access local variables defined earlier are said to use dynamic scoping. The term dynamic refers to the fact that a compiler cannot statically determine the set of visible variables. This is because the set of variables visible to a method changes depending on who calls that method.
|
||||
|
||||
It turns out that, in the grammar realm, distant rules sometimes need to communicate with each other, mostly to provide context information to rules matched below in the rule invocation chain. (Naturally, this assumes that you are using actions directly in the grammar instead of the parse-tree listener event mechanism.) ANTLR allows dynamic scoping in that actions can access attributes from invoking rules using syntax `$r::x` where `r` is a rule name and `x` is an attribute within that rule. It is up to the programmer to ensure that `r` is in fact an invoking rule of the current rule. A runtime exception occurs if `r` is not in the current call chain when you access `$r::x`.
|
||||
|
||||
To illustrate the use of dynamic scoping, consider the real problem of defining variables and ensuring that variables in expressions are defined. The following grammar defines the symbols attribute where it belongs in the block rule but adds variable names to it in rule `decl`. Rule `stat` then consults the list to see whether variables have been defined.
|
||||
|
||||
```
|
||||
grammar DynScope;
|
||||
|
||||
prog: block ;
|
||||
|
||||
block
|
||||
/* List of symbols defined within this block */
|
||||
locals [
|
||||
List<String> symbols = new ArrayList<String>()
|
||||
]
|
||||
: '{' decl* stat+ '}'
|
||||
// print out all symbols found in block
|
||||
// $block::symbols evaluates to a List as defined in scope
|
||||
{System.out.println("symbols="+$symbols);}
|
||||
;
|
||||
|
||||
/** Match a declaration and add identifier name to list of symbols */
|
||||
decl: 'int' ID {$block::symbols.add($ID.text);} ';' ;
|
||||
|
||||
/** Match an assignment then test list of symbols to verify
|
||||
* that it contains the variable on the left side of the assignment.
|
||||
* Method contains() is List.contains() because $block::symbols
|
||||
* is a List.
|
||||
*/
|
||||
stat: ID '=' INT ';'
|
||||
{
|
||||
if ( !$block::symbols.contains($ID.text) ) {
|
||||
System.err.println("undefined variable: "+$ID.text);
|
||||
}
|
||||
}
|
||||
| block
|
||||
;
|
||||
|
||||
ID : [a-z]+ ;
|
||||
INT : [0-9]+ ;
|
||||
WS : [ \t\r\n]+ -> skip ;
|
||||
```
|
||||
|
||||
Here’s a simple build and test sequence:
|
||||
|
||||
```bash
|
||||
$ antlr4 DynScope.g4
|
||||
$ javac DynScope*.java
|
||||
$ grun DynScope prog
|
||||
=> {
|
||||
=> int i;
|
||||
=> i = 0;
|
||||
=> j = 3;
|
||||
=> }
|
||||
=> EOF
|
||||
<= undefined variable: j
|
||||
symbols=[i]
|
||||
```
|
||||
|
||||
There’s an important difference between a simple field declaration in a `@members` action and dynamic scoping. symbols is a local variable and so there is a copy for each invocation of rule `block`. That’s exactly what we want for nested blocks so that we can reuse the same input variable name in an inner block. For example, the following nested code block redefines i in the inner scope. This new definition must hide the definition in the outer scope.
|
||||
|
||||
```
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
i = 0;
|
||||
{
|
||||
int i;
|
||||
int x;
|
||||
x = 5;
|
||||
}
|
||||
x = 3;
|
||||
}
|
||||
```
|
||||
|
||||
Here’s the output generated for that input by DynScope:
|
||||
|
||||
```bash
|
||||
$ grun DynScope prog nested-input
|
||||
symbols=[i, x]
|
||||
undefined variable: x
|
||||
symbols=[i, j]
|
||||
```
|
||||
|
||||
Referencing `$block::symbols` accesses the `symbols` field of the most recently invoked `block`’s rule context object. If you need access to a symbols instance from a rule invocation farther up the call chain, you can walk backwards starting at the current context, `$ctx`. Use `getParent` to walk up the chain.
|
|
@ -0,0 +1,119 @@
|
|||
# Adding unit tests
|
||||
|
||||
## Generating Runtime Tests
|
||||
|
||||
Because ANTLR supports multiple target languages, the unit tests are broken into two groups: the unit tests that test the tool itself (in `tool-testsuite`) and the unit tests that test the parser runtimes (in antlr4/runtime-testsuite). To avoid a lot of cut-and-paste, we generate all **runtime** tests from a set of templates using [runtime-testsuite/src/org/antlr/v4/testgen/TestGenerator.java](../runtime-testsuite/src/org/antlr/v4/testgen/TestGenerator.java). The `mvn` command is simple to use:
|
||||
|
||||
```
|
||||
$ cd ~/antlr/code/antlr4/runtime-testsuite
|
||||
$ mvn -Pgen generate-test-sources
|
||||
...
|
||||
rootDir = /Users/parrt/antlr/code/antlr4/runtime-testsuite
|
||||
outputDir = /Users/parrt/antlr/code/antlr4/runtime-testsuite/test
|
||||
templates = /Users/parrt/antlr/code/antlr4/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates
|
||||
target = ALL
|
||||
browsers = false
|
||||
viz = false
|
||||
```
|
||||
|
||||
It basically runs the Java program:
|
||||
|
||||
```bash
|
||||
$ java org.antlr.v4.testgen.TestGenerator \
|
||||
-root ~/antlr/code/antlr4/runtime-testsuite \
|
||||
-outdir ~/antlr/code/antlr4/runtime-testsuite/test \
|
||||
-templates ~/antlr/code/antlr4/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates
|
||||
```
|
||||
|
||||
## Adding a runtime test
|
||||
|
||||
For each target, you will find an `Index.stg` file with a dictionary of all test groups. E.g., `runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Index.stg` looks like:
|
||||
|
||||
```
|
||||
TestFolders ::= [
|
||||
"CompositeLexers": [],
|
||||
"CompositeParsers": [],
|
||||
"FullContextParsing": [],
|
||||
"LeftRecursion": [],
|
||||
"LexerErrors": [],
|
||||
"LexerExec": [],
|
||||
"Listeners": [],
|
||||
"ParserErrors": [],
|
||||
"ParserExec": [],
|
||||
"ParseTrees": [],
|
||||
"Performance": [],
|
||||
"SemPredEvalLexer": [],
|
||||
"SemPredEvalParser": [],
|
||||
"Sets": []
|
||||
]
|
||||
```
|
||||
|
||||
Then each group has a subdirectory with another index. E.g., `Sets/Index.stg` looks like:
|
||||
|
||||
```
|
||||
TestTemplates ::= [
|
||||
"SeqDoesNotBecomeSet": [],
|
||||
"ParserSet": [],
|
||||
"ParserNotSet": [],
|
||||
"ParserNotToken": [],
|
||||
"ParserNotTokenWithLabel": [],
|
||||
"RuleAsSet": [],
|
||||
"NotChar": [],
|
||||
"OptionalSingleElement": [],
|
||||
...
|
||||
```
|
||||
|
||||
For every name mentioned, you will find a `.stg` file with the actual test. E.g., `Sets/StarSet.stg`:
|
||||
|
||||
```
|
||||
TestType() ::= "Parser"
|
||||
|
||||
Options ::= [
|
||||
"Debug": false
|
||||
]
|
||||
|
||||
Grammar ::= [
|
||||
"T": {<grammar("T")>}
|
||||
]
|
||||
|
||||
Input() ::= "abaac"
|
||||
|
||||
Rule() ::= "a"
|
||||
|
||||
Output() ::= <<
|
||||
abaac<\n>
|
||||
>>
|
||||
|
||||
Errors() ::= ""
|
||||
|
||||
grammar(grammarName) ::= <<
|
||||
grammar <grammarName>;
|
||||
a : ('a'|'b')* 'c' {<InputText():writeln()>} ;
|
||||
>>
|
||||
```
|
||||
|
||||
### Cross-language actions embedded within grammars
|
||||
|
||||
To get:
|
||||
|
||||
```
|
||||
System.out.println($set.stop);
|
||||
```
|
||||
|
||||
Use instead the language-neutral:
|
||||
|
||||
```
|
||||
<writeln("$set.stop")>
|
||||
```
|
||||
|
||||
File `runtime-testsuite/resources/org/antlr/v4/test/runtime/java/Java.test.stg` has templates like:
|
||||
|
||||
```
|
||||
writeln(s) ::= <<System.out.println(<s>);>>
|
||||
```
|
||||
|
||||
## Adding an ANTLR tool unit test
|
||||
|
||||
Just go into the appropriate Java test class in dir `antlr4/tool-testsuite/test/org/antlr/v4/test/tool` and add your unit test.
|
||||
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
# Building ANTLR
|
||||
|
||||
Most programmers do not need the information on this page because they will simply download the appropriate jar(s) or use ANTLR through maven (via ANTLR's antlr4-maven-plugin). If you would like to fork the project and fix bugs or tweak the runtime code generation, then you will almost certainly need to build ANTLR itself. There are two components:
|
||||
|
||||
1. the tool that compiles grammars down into parsers and lexers in one of the target languages
|
||||
1. the runtime used by those generated parsers and lexers.
|
||||
|
||||
I will assume that the root directory is `/tmp` for the purposes of explaining how to build ANTLR in this document.
|
||||
|
||||
# Get the source
|
||||
|
||||
The first step is to get the Java source code from the ANTLR 4 repository at github. You can download the repository from github, but the easiest thing to do is simply clone the repository on your local disk:
|
||||
|
||||
```bash
|
||||
$ cd /tmp
|
||||
/tmp $ git clone git@github.com:antlr/antlr4.git
|
||||
Cloning into 'antlr4'...
|
||||
remote: Counting objects: 43273, done.
|
||||
remote: Compressing objects: 100% (57/57), done.
|
||||
remote: Total 43273 (delta 26), reused 0 (delta 0)
|
||||
Receiving objects: 100% (43273/43273), 18.76 MiB | 1.60 MiB/s, done.
|
||||
Resolving deltas: 100% (22419/22419), done.
|
||||
Checking connectivity... done.
|
||||
```
|
||||
|
||||
# Compile
|
||||
|
||||
```bash
|
||||
$ cd /tmp
|
||||
$ git clone git@github.com:antlr/antlr4.git
|
||||
Cloning into 'antlr4'...
|
||||
remote: Counting objects: 59858, done.
|
||||
remote: Compressing objects: 100% (57/57), done.
|
||||
remote: Total 59858 (delta 28), reused 9 (delta 9), pack-reused 59786
|
||||
Receiving objects: 100% (59858/59858), 31.10 MiB | 819.00 KiB/s, done.
|
||||
Resolving deltas: 100% (31898/31898), done.
|
||||
Checking connectivity... done.
|
||||
$ cd antlr4
|
||||
$ mvn compile
|
||||
..
|
||||
[INFO] Reactor Summary:
|
||||
[INFO]
|
||||
[INFO] ANTLR 4 ............................................ SUCCESS [ 0.447 s]
|
||||
[INFO] ANTLR 4 Runtime .................................... SUCCESS [ 3.113 s]
|
||||
[INFO] ANTLR 4 Tool ....................................... SUCCESS [ 14.408 s]
|
||||
[INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 1.276 s]
|
||||
[INFO] ANTLR 4 Runtime Test Generator ..................... SUCCESS [ 0.773 s]
|
||||
[INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 6.920 s]
|
||||
[INFO] ------------------------------------------------------------------------
|
||||
[INFO] BUILD SUCCESS
|
||||
...
|
||||
```
|
||||
|
||||
# Testing tool and targets
|
||||
|
||||
In order to perform the tests on all target languages, make sure that you have `mono` and `nodejs` installed. For example, on OS X:
|
||||
|
||||
```bash
|
||||
$ brew install mono
|
||||
$ brew install node
|
||||
```
|
||||
|
||||
To run the tests and **install into local repository** `~/.m2/repository/org/antlr`, do this:
|
||||
|
||||
```bash
|
||||
$ mvn install
|
||||
...
|
||||
-------------------------------------------------------
|
||||
T E S T S
|
||||
-------------------------------------------------------
|
||||
Running org.antlr.v4.test.runtime.csharp.TestCompositeLexers
|
||||
dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068612451
|
||||
Starting build /usr/bin/xbuild /p:Configuration=Release /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068612451/Antlr4.Test.mono.csproj
|
||||
dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068615081
|
||||
Starting build /usr/bin/xbuild /p:Configuration=Release /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeLexers-1446068615081/Antlr4.Test.mono.csproj
|
||||
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.451 sec
|
||||
Running org.antlr.v4.test.runtime.csharp.TestCompositeParsers
|
||||
dir /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864
|
||||
antlr reports warnings from [-visitor, -Dlanguage=CSharp, -o, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864, -lib, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864, -encoding, UTF-8, /var/folders/s1/h3qgww1x0ks3pb30l8t1wgd80000gn/T/TestCompositeParsers-1446068615864/M.g4]
|
||||
...
|
||||
[INFO] ------------------------------------------------------------------------
|
||||
[INFO] Reactor Summary:
|
||||
[INFO]
|
||||
[INFO] ANTLR 4 ............................................ SUCCESS [ 0.462 s]
|
||||
[INFO] ANTLR 4 Runtime .................................... SUCCESS [ 9.163 s]
|
||||
[INFO] ANTLR 4 Tool ....................................... SUCCESS [ 3.683 s]
|
||||
[INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 1.897 s]
|
||||
[INFO] ANTLR 4 Runtime Test Generator ..................... SUCCESS [07:11 min]
|
||||
[INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 16.694 s]
|
||||
[INFO] ------------------------------------------------------------------------
|
||||
[INFO] BUILD SUCCESS
|
||||
[INFO] ------------------------------------------------------------------------
|
||||
[INFO] Total time: 07:43 min
|
||||
...
|
||||
```
|
||||
|
||||
You should see these jars (building 4.5.2-SNAPSHOT):
|
||||
|
||||
```bash
|
||||
/Users/parrt/.m2/repository/org/antlr $ find antlr4* -name '*.jar'
|
||||
antlr4/4.5/antlr4-4.5.jar
|
||||
antlr4/4.5.2-SNAPSHOT/antlr4-4.5.2-SNAPSHOT-tests.jar
|
||||
antlr4/4.5.2-SNAPSHOT/antlr4-4.5.2-SNAPSHOT.jar
|
||||
antlr4-maven-plugin/4.5/antlr4-maven-plugin-4.5.jar
|
||||
antlr4-maven-plugin/4.5.2-SNAPSHOT/antlr4-maven-plugin-4.5.2-SNAPSHOT.jar
|
||||
antlr4-runtime/4.5/antlr4-runtime-4.5.jar
|
||||
antlr4-runtime/4.5.2-SNAPSHOT/antlr4-runtime-4.5.2-SNAPSHOT.jar
|
||||
antlr4-runtime-testsuite/4.5.2-SNAPSHOT/antlr4-runtime-testsuite-4.5.2-SNAPSHOT-tests.jar
|
||||
antlr4-runtime-testsuite/4.5.2-SNAPSHOT/antlr4-runtime-testsuite-4.5.2-SNAPSHOT.jar
|
||||
antlr4-tool-testsuite/4.5.2-SNAPSHOT/antlr4-tool-testsuite-4.5.2-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
Note that ANTLR is written in itself, which is why maven downloads antlr4-4.5.jar for boostrapping 4.5.2-SNAPSHOT purposes.
|
||||
|
||||
To build without running the tests (saves about 8 minutes), do this:
|
||||
|
||||
```bash
|
||||
mvn -DskipTests install
|
||||
```
|
||||
|
||||
## Building ANTLR in Intellij IDE
|
||||
|
||||
After download ANTLR source, just "import project from existing sources" and click on the "Maven Projects" tab in right gutter of IDE. It should build stuff in the background automatically and look like:
|
||||
|
||||
<img src=images/intellij-maven.png width=200>
|
|
@ -0,0 +1,22 @@
|
|||
# Creating an ANTLR Language Target
|
||||
|
||||
This document describes how to make ANTLR generate parsers in a new language, *X*.
|
||||
|
||||
## Overview
|
||||
|
||||
Creating a new target involves the following key elements:
|
||||
|
||||
1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. This class describes language specific details about escape characters and strings and so on. There is very little to do here typically.
|
||||
1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit.
|
||||
1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below.
|
||||
1. Create a template file for runtime tests. All you have to do is provide a few simple templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code in a new target and check the results. All it needs to know is how to generate a test rig (i.e., a `main` program), how to define various class fields, compare members and so on. You must create a *X* directory underneath `runtime-testsuite/resources/org/antlr/v4/test/runtime`. Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit.
|
||||
|
||||
## Getting started
|
||||
|
||||
1. Fork the `antlr/antlr4` repository at github to your own user so that you have repository `username/antlr4`.
|
||||
2. Clone `username/antlr4`, forked repository, to your local disk. Your remote `origin` will be the forked repository on GitHub. Add a remote `upstream` to the original `antlr/antlr4` repository (URL `https://github.com/antlr/antlr4.git`). Changes that you would like to contribute back to the project are done with [pull requests](https://help.github.com/articles/using-pull-requests/).
|
||||
3. Try to build it before doing anything
|
||||
```bash
|
||||
$ mvn compile
|
||||
```
|
||||
That should proceed with success. See [Building ANTLR](building-antlr.md) for more details. (That link does not currently work as I have that documentation in a branch. see https://github.com/parrt/antlr4/blob/move-doc-to-repo/doc/building-antlr.md for now.)
|
|
@ -0,0 +1,99 @@
|
|||
# C♯
|
||||
|
||||
See also [Sam Harwell's Alternative C# target](https://github.com/tunnelvisionlabs/antlr4cs)
|
||||
|
||||
### Which frameworks are supported?
|
||||
|
||||
The C# runtime is CLS compliant, and only requires a corresponding 3.5 .Net framework.
|
||||
|
||||
In practice, the runtime has been extensively tested against:
|
||||
|
||||
* Microsoft .Net 3.5 framework
|
||||
* Mono .Net 3.5 framework
|
||||
|
||||
No issue was found, so you should find that the runtime works pretty much against any recent .Net framework.
|
||||
|
||||
### How do I get started?
|
||||
|
||||
You will find full instructions on the Git web page for ANTLR C# runtime.
|
||||
|
||||
### How do I use the runtime from my project?
|
||||
|
||||
(i.e., How do I run the generated lexer and/or parser?)
|
||||
|
||||
Let's suppose that your grammar is named, as above, "MyGrammar".
|
||||
|
||||
Let's suppose this parser comprises a rule named "StartRule"
|
||||
|
||||
The tool will have generated for you the following files:
|
||||
|
||||
* MyGrammarLexer.cs
|
||||
* MyGrammarParser.cs
|
||||
* MyGrammarListener.cs (if you have not activated the -no-listener option)
|
||||
* MyGrammarBaseListener.js (if you have not activated the -no-listener option)
|
||||
* MyGrammarVisitor.js (if you have activated the -visitor option)
|
||||
* MyGrammarBaseVisitor.js (if you have activated the -visitor option)
|
||||
|
||||
Now a fully functioning code might look like the following:
|
||||
|
||||
```
|
||||
using Antlr4.Runtime;
|
||||
|
||||
public void MyParseMethod() {
|
||||
String input = "your text to parse here";
|
||||
AntlrInputStream stream = new InputStream(input);
|
||||
ITokenSource lexer = new MyGrammarLexer(stream);
|
||||
ITokenStream tokens = new CommonTokenStream(lexer);
|
||||
MyGrammarParser parser = new MyGrammarParser(tokens);
|
||||
parser.buildParseTrees = true;
|
||||
IParseTree tree = parser.StartRule();
|
||||
}
|
||||
```
|
||||
|
||||
This program will work. But it won't be useful unless you do one of the following:
|
||||
|
||||
* you visit the parse tree using a custom listener
|
||||
* you visit the parse tree using a custom visitor
|
||||
* your grammar comprises production code (like AntLR3)
|
||||
|
||||
(please note that production code is target specific, so you can't have multi target grammars that include production code)
|
||||
|
||||
### How do I create and run a custom listener?
|
||||
|
||||
Let's suppose your MyGrammar grammar comprises 2 rules: "key" and "value".
|
||||
|
||||
The antlr4 tool will have generated the following listener (only partial code shown here):
|
||||
|
||||
```
|
||||
interface IMyGrammarParserListener : IParseTreeListener {
|
||||
void EnterKey (MyGrammarParser.KeyContext context);
|
||||
void ExitKey (MyGrammarParser.KeyContext context);
|
||||
void EnterValue (MyGrammarParser.ValueContext context);
|
||||
void ExitValue (MyGrammarParser.ValueContext context);
|
||||
}
|
||||
```
|
||||
|
||||
In order to provide custom behavior, you might want to create the following class:
|
||||
|
||||
```
|
||||
class KeyPrinter : MyGrammarBaseListener {
|
||||
// override default listener behavior
|
||||
void ExitKey (MyGrammarParser.KeyContext context) {
|
||||
Console.WriteLine("Oh, a key!");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In order to execute this listener, you would simply add the following lines to the above code:
|
||||
|
||||
|
||||
```
|
||||
...
|
||||
IParseTree tree = parser.StartRule() - only repeated here for reference
|
||||
KeyPrinter printer = new KeyPrinter();
|
||||
ParseTreeWalker.DEFAULT.walk(printer, tree);
|
||||
```
|
||||
|
||||
Further information can be found from The Definitive ANTLR Reference book.
|
||||
|
||||
The C# implementation of ANTLR is as close as possible to the Java one, so you shouldn't find it difficult to adapt the examples for C#.
|
|
@ -0,0 +1,11 @@
|
|||
# Actions and semantic predicates
|
||||
|
||||
## How do I test if an optional rule was matched?
|
||||
|
||||
For optional rule references such as the initialization clause in the following
|
||||
|
||||
```
|
||||
decl : 'var' ID (EQUALS expr)? ;
|
||||
```
|
||||
|
||||
testing to see if that clause was matched can be done using `$EQUALS!=null` or `$expr.ctx!=null` where `$expr.ctx` points to the context or parse tree created for that reference to rule expr.
|
|
@ -0,0 +1,5 @@
|
|||
# Error handling
|
||||
|
||||
## How do I perform semantic checking with ANTLR?
|
||||
|
||||
See [How to implement error handling in ANTLR4](http://stackoverflow.com/questions/21613421/how-to-implement-error-handling-in-antlr4/21615751#21615751).
|
|
@ -0,0 +1,100 @@
|
|||
# General
|
||||
|
||||
## Why do we need ANTLR v4?
|
||||
|
||||
*Oliver Zeigermann asked me some questions about v4. Here is our conversation.*
|
||||
|
||||
*See the [preface from the book](http://media.pragprog.com/titles/tpantlr2/preface.pdf)*
|
||||
|
||||
**Q: Why is the new version of ANTLR also called “honey badger”?**
|
||||
|
||||
ANTLR v4 is called the honey badger release after the fearless hero of the YouTube sensation, The Crazy Nastyass Honey Badger.
|
||||
|
||||
**Q: Why did you create a new version of ANTLR?**
|
||||
|
||||
Well, I start creating a new version because v3 had gotten very messy on the inside and also relied on grammars written in ANTLR v2. Unfortunately, v2's open-source license was unclear and so projects such as Eclipse could not include v3 because of its dependency on v2. In the end, Sam Harwell converted all of the v2 grammars into v3 so that v3 was written in itself. Because v3 has a very clean BSD license, the Eclipse project okayed for inclusion in that project in the summer of 2011.
|
||||
|
||||
As I was rewriting ANTLR, I wanted to experiment with a new variation of the LL(\*) parsing algorithm. As luck would have it, I came up with a cool new version called adaptive LL(\*) that pushes all of the grammar analysis effort to runtime. The parser warms up like Java does with its JIT on-the-fly compiler; the code gets faster and faster the longer it runs. The benefit is that the adaptive algorithm is much stronger than the static LL(\*) grammar analysis algorithm in v3. Honey Badger takes any grammar that you give it; it just doesn't give a damn. (v4 accepts even left recursive grammars, except for indirectly left recursive grammars where x calls y which calls x).
|
||||
|
||||
v4 is the culmination of 25 years of research into parsers and parser generators. I think I finally know what I want to build. :)
|
||||
|
||||
**Q: What makes you excited about ANTLR4?**
|
||||
|
||||
The biggest thing is the new adaptive parsing strategy, which lets us accept any grammar we care to write. That gives us a huge productivity boost because we can now write much more natural expression rules (which occur in almost every grammar). For example, bottom-up parser generators such as yacc let you write very natural grammars like this:
|
||||
|
||||
```
|
||||
e : e '*' e
|
||||
| e '+' e
|
||||
| INT
|
||||
;
|
||||
```
|
||||
|
||||
ANTLR v4 will also take that grammar now, translating it secretly to a non-left recursive version.
|
||||
|
||||
Another big thing with v4 is that my goal has shifted from performance to ease-of-use. For example, ANTLR automatically can build parse trees for you and generate listeners and visitors. This is not only a huge productivity win, but also an important step forward in building grammars that don't depend on embedded actions. Those embedded actions (raw Java code or whatever) locked the grammar into use with only one language. If we keep all of the actions out of the grammar and put them into external visitors, we can reuse the same grammar to generate code in any language for which we have an ANTLR target.
|
||||
|
||||
**Q: What do you think are the things people had problems with in ANTLR3?**
|
||||
|
||||
The biggest problem was figuring out why ANTLR did not like their grammar. The static analysis often could not figure out how to generate a parser for the grammar. This problem totally goes away with the honey badger because it will take just about anything you give it without a whimper.
|
||||
|
||||
**Q: And what with other compiler generator tools?**
|
||||
|
||||
The biggest problem for the average practitioner is that most parser generators do not produce code you can load into a debugger and step through. This immediately removes bottom-up parser generators and the really powerful GLR parser generators from consideration by the average programmer. There are a few other tools that generate source code like ANTLR does, but they don't have v4's adaptive LL(\*) parsers. You will be stuck with contorting your grammar to fit the needs of the tool's weaker, say, LL(k) parsing strategy. PEG-based tools have a number of weaknesses, but to mention one, they have essentially no error recovery because they cannot report an error and until they have parsed the entire input.
|
||||
|
||||
**Q: What are the main design decisions in ANTLR4?**
|
||||
|
||||
Ease-of-use over performance. I will worry about performance later. Simplicity over complexity. For example, I have taken out explicit/manual AST construction facilities and the tree grammar facilities. For 20 years I've been trying to get people to go that direction, but I've since decided that it was a mistake. It's much better to give people a parser generator that can automatically build trees and then let them use pure code to do whatever tree walking they want. People are extremely familiar and comfortable with visitors, for example.
|
||||
|
||||
**Q: What do you think people will like most on ANTLR4?**
|
||||
|
||||
The lack of errors when you run your grammar through ANTLR. The automatic tree construction and listener/visitor generation.
|
||||
|
||||
**What do you think are the problems people will try to solve with ANTLR4?**
|
||||
|
||||
In my experience, almost no one uses parser generators to build commercial compilers. So, people are using ANTLR for their everyday work, building everything from configuration files to little scripting languages.
|
||||
|
||||
In response to a question about this entry from stackoverflow.com: I believe that compiler developers are very concerned with parsing speed, error reporting, and error recovery. For that, they want absolute control over their parser. Also, some languages are so complicated, such as C++, that parser generators might build parsers slower than compiler developers want. The compiler developers also like the control of a recursive-descent parser for predicating the parse to handle context-sensitive constructs such as `T(i)` in C++.
|
||||
|
||||
There is also likely a sense that parsing is the easy part of building a compiler so they don't immediately jump automatically to parser generators. I think this is also a function of previous generation parser generators. McPeak's Elkhound GLR-based parser generator is powerful enough and fast enough, in the hands of someone that knows what they're doing, to be suitable for compilers. I can also attest to the fact that ANTLR v4 is now powerful enough and fast enough to compete well with handbuilt parsers. E.g., after warm-up, it's now taking just 1s to parse the entire JDK java/\* library.
|
||||
|
||||
## What is the difference between ANTLR 3 and 4?
|
||||
|
||||
The biggest difference between ANTLR 3 and 4 is that ANTLR 4 takes any grammar you give it unless the grammar had indirect left recursion. That means we don't need syntactic predicates or backtracking so ANTLR 4 does not support that syntax; you will get a warning for using it. ANTLR 4 allows direct left recursion so that expressing things like arithmetic expression syntax is very easy and natural:
|
||||
|
||||
```
|
||||
expr : expr '*' expr
|
||||
| expr '+' expr
|
||||
| INT
|
||||
;
|
||||
```
|
||||
|
||||
ANTLR 4 automatically constructs parse trees for you and abstract syntax tree (AST) construction is no longer an option. See also What if I need ASTs not parse trees for a compiler, for example?
|
||||
|
||||
Another big difference is that we discourage the use of actions directly within the grammar because ANTLR 4 automatically generates [listeners and visitors](https://raw.githubusercontent.com/antlr/antlr4/master/doc/listeners.md) for you to use that trigger method calls when some phrases of interest are recognized during a tree walk after parsing. See also [Parse Tree Matching and XPath](https://raw.githubusercontent.com/antlr/antlr4/master/doc/tree-matching.md).
|
||||
|
||||
Semantic predicates are still allowed in both the parser and lexer rules as our actions. For efficiency sake keep semantic predicates to the right edge of lexical rules.
|
||||
|
||||
There are no tree grammars because we use listeners and visitors instead.
|
||||
|
||||
## Why is my expression parser slow?
|
||||
|
||||
Make sure to use two-stage parsing. See example in [bug report](https://github.com/antlr/antlr4/issues/374).
|
||||
|
||||
```Java
|
||||
|
||||
CharStream input = new ANTLRFileStream(args[0]);
|
||||
ExprLexer lexer = new ExprLexer(input);
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexer);
|
||||
ExprParser parser = new ExprParser(tokens);
|
||||
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
|
||||
try {
|
||||
parser.stat(); // STAGE 1
|
||||
}
|
||||
catch (Exception ex) {
|
||||
tokens.reset(); // rewind input stream
|
||||
parser.reset();
|
||||
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
|
||||
parser.stat(); // STAGE 2
|
||||
// if we parse ok, it's LL not SLL
|
||||
}
|
||||
```
|
|
@ -0,0 +1,11 @@
|
|||
# Getting started
|
||||
|
||||
## How to I install and run a simple grammar?
|
||||
|
||||
See [Getting Started with ANTLR v4](https://raw.githubusercontent.com/antlr/antlr4/master/doc/getting-started.md).
|
||||
|
||||
## Why does my parser test program hang?
|
||||
|
||||
Your test program is likely not hanging but simply waiting for you to type some input for standard input. Don't forget that you need to type the end of file character, generally on a line by itself, at the end of the input. On a Mac or Linux machine it is ctrl-D, as gawd intended, or ctrl-Z on a Windows machine.
|
||||
|
||||
See [Getting Started with ANTLR v4](https://raw.githubusercontent.com/antlr/antlr4/master/doc/getting-started.md).
|
|
@ -0,0 +1,50 @@
|
|||
# Frequently-Asked Questions (FAQ)
|
||||
|
||||
This is the main landing page for the ANTLR 4 FAQ. The links below will take you to the appropriate file containing all answers for that subcategory.
|
||||
|
||||
*To add to or improve this FAQ, [fork](https://help.github.com/articles/fork-a-repo/) the [antlr/antlr4 repo](https://github.com/antlr/antlr4) then update this `doc/faq/index.md` or file(s) in that directory. Submit a [pull request](https://help.github.com/articles/creating-a-pull-request/) to get your changes incorporated into the main repository. Do not mix code and FAQ updates in the sample pull request.* **You must sign the contributors.txt certificate of origin with your pull request if you've not done so before.**
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [How to I install and run a simple grammar?](getting-started.md)
|
||||
* [Why does my parser test program hang?](getting-started.md)
|
||||
|
||||
## Installation
|
||||
|
||||
* [Why can't ANTLR (grun) find my lexer or parser?](installation.md)
|
||||
* [Why can't I run the ANTLR tool?](installation.md)
|
||||
* [Why doesn't my parser compile?](installation.md)
|
||||
|
||||
## General
|
||||
|
||||
* [Why do we need ANTLR v4?](general.md)
|
||||
* [What is the difference between ANTLR 3 and 4?](general.md)
|
||||
* [Why is my expression parser slow?](general.md)
|
||||
|
||||
## Grammar syntax
|
||||
|
||||
## Lexical analysis
|
||||
|
||||
* [How can I parse non-ASCII text and use characters in token rules?](lexical.md)
|
||||
* [How do I replace escape characters in string tokens?](lexical.md)
|
||||
* [Why are my keywords treated as identifiers?](lexical.md)
|
||||
* [Why are there no whitespace tokens in the token stream?](lexical.md)
|
||||
|
||||
## Parse Trees
|
||||
|
||||
* [How do I get the input text for a parse-tree subtree?](parse-trees.md)
|
||||
* [What if I need ASTs not parse trees for a compiler, for example?](parse-trees.md)
|
||||
* [When do I use listener/visitor vs XPath vs Tree pattern matching?](parse-trees.md)
|
||||
|
||||
## Translation
|
||||
|
||||
* [ASTs vs parse trees](parse-trees.md)
|
||||
* [Decoupling input walking from output generation](parse-trees.md)
|
||||
|
||||
## Actions and semantic predicates
|
||||
|
||||
* [How do I test if an optional rule was matched?](actions-preds.md)
|
||||
|
||||
## Error handling
|
||||
|
||||
* [How do I perform semantic checking with ANTLR?](error-handling.md)
|
|
@ -0,0 +1,60 @@
|
|||
# Installation
|
||||
|
||||
Please read carefully: [Getting Started with ANTLR v4](https://raw.githubusercontent.com/antlr/antlr4/master/doc/getting-started.md).
|
||||
|
||||
## Why can't ANTLR (grun) find my lexer or parser?
|
||||
|
||||
If you see "Can't load Hello as lexer or parser", it's because you don't have '.' (current directory) in your CLASSPATH.
|
||||
|
||||
```bash
|
||||
$ alias antlr4='java -jar /usr/local/lib/antlr-4.2.2-complete.jar'
|
||||
$ alias grun='java org.antlr.v4.runtime.misc.TestRig'
|
||||
$ export CLASSPATH="/usr/local/lib/antlr-4.2.2-complete.jar"
|
||||
$ antlr4 Hello.g4
|
||||
$ javac Hello*.java
|
||||
$ grun Hello r -tree
|
||||
Can't load Hello as lexer or parser
|
||||
$
|
||||
```
|
||||
|
||||
For mac/linux, use:
|
||||
|
||||
```bash
|
||||
export CLASSPATH=".:/usr/local/lib/antlr-4.2.2-complete.jar:$CLASSPATH"
|
||||
```
|
||||
|
||||
or for Windows:
|
||||
|
||||
```
|
||||
SET CLASSPATH=.;C:\Javalib\antlr4-complete.jar;%CLASSPATH%
|
||||
```
|
||||
|
||||
**See the dot at the beginning?** It's critical.
|
||||
|
||||
## Why can't I run the ANTLR tool?
|
||||
|
||||
If you get a no class definition found error, you are missing the ANTLR jar in your `CLASSPATH` (or you might only have the runtime jar):
|
||||
|
||||
```bash
|
||||
/tmp $ java org.antlr.v4.Tool Hello.g4
|
||||
Exception in thread "main" java.lang.NoClassDefFoundError: org/antlr/v4/Tool
|
||||
Caused by: java.lang.ClassNotFoundException: org.antlr.v4.Tool
|
||||
at java.net.URLClassLoader$1.run(URLClassLoader.java:202)
|
||||
at java.security.AccessController.doPrivileged(Native Method)
|
||||
at java.net.URLClassLoader.findClass(URLClassLoader.java:190)
|
||||
at java.lang.ClassLoader.loadClass(ClassLoader.java:306)
|
||||
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)
|
||||
at java.lang.ClassLoader.loadClass(ClassLoader.java:247)
|
||||
```
|
||||
|
||||
## Why doesn't my parser compile?
|
||||
|
||||
If you see these kinds of errors, it's because you don't have the runtime or complete ANTLR library in your CLASSPATH.
|
||||
|
||||
```bash
|
||||
/tmp $ javac Hello*.java
|
||||
HelloBaseListener.java:3: package org.antlr.v4.runtime does not exist
|
||||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
^
|
||||
...
|
||||
```
|
|
@ -0,0 +1,63 @@
|
|||
# Lexical analysis
|
||||
|
||||
## How can I parse non-ASCII text and use characters in token rules?
|
||||
|
||||
See [Using non-ASCII characters in token rules](http://stackoverflow.com/questions/28126507/antlr4-using-non-ascii-characters-in-token-rules/28129510#28129510).
|
||||
|
||||
## How do I replace escape characters in string tokens?
|
||||
|
||||
Unfortunately, manipulating the text of the token matched by a lexical rule is cumbersome (as of 4.2). You have to build up a buffer and then set the text at the end. Actions in the lexer execute at the associated position in the input just like they do in the parser. Here's an example that does escape character replacement in strings. It's not pretty but it works.
|
||||
|
||||
```
|
||||
grammar Foo;
|
||||
|
||||
@members {
|
||||
StringBuilder buf = new StringBuilder(); // can't make locals in lexer rules
|
||||
}
|
||||
|
||||
STR : '"'
|
||||
( '\\'
|
||||
( 'r' {buf.append('\r');}
|
||||
| 'n' {buf.append('\n');}
|
||||
| 't' {buf.append('\t');}
|
||||
| '\\' {buf.append('\\');}
|
||||
| '\"' {buf.append('"');}
|
||||
)
|
||||
| ~('\\'|'"') {buf.append((char)_input.LA(-1));}
|
||||
)*
|
||||
'"'
|
||||
{setText(buf.toString()); buf.setLength(0); System.out.println(getText());}
|
||||
;
|
||||
```
|
||||
|
||||
It's easier and more efficient to return original input string and then use a small function to rewrite the string later during a parse tree walk or whatever. But, here's how to do it from within the lexer.
|
||||
|
||||
Lexer actions don't work in the interpreter, which includes xpath and tree patterns.
|
||||
|
||||
For more on the argument against doing complicated things in the lexer, see the [related lexer-action issue at github](https://github.com/antlr/antlr4/issues/483#issuecomment-37326067).
|
||||
|
||||
## Why are my keywords treated as identifiers?
|
||||
|
||||
Keywords such as `begin` are also valid identifiers lexically and so that input is ambiguous. To resolve ambiguities, ANTLR gives precedence to the lexical rules specified first. That implies that you must put the identifier rule after all of your keywords:
|
||||
|
||||
```
|
||||
grammar T;
|
||||
|
||||
decl : DEF 'int' ID ';'
|
||||
|
||||
DEF : 'def' ; // ambiguous with ID as is 'int'
|
||||
ID : [a-z]+ ;
|
||||
```
|
||||
|
||||
Notice that literal `'int'` is also physically before the ID rule and will also get precedence.
|
||||
|
||||
## Why are there no whitespace tokens in the token stream?
|
||||
|
||||
The lexer is not sending white space to the parser, which means that the rewrite stream doesn't have access to the tokens either. It is because of the skip lexer command:
|
||||
|
||||
```
|
||||
WS : [ \t\r\n\u000C]+ -> skip
|
||||
;
|
||||
```
|
||||
|
||||
You have to change all those to `-> channel(HIDDEN)` which will send them to the parser on a different channel, making them available in the token stream, but invisible to the parser.
|
|
@ -0,0 +1,73 @@
|
|||
# Parse Trees
|
||||
|
||||
## How do I get the input text for a parse-tree subtree?
|
||||
|
||||
In ParseTree, you have this method:
|
||||
|
||||
```java
|
||||
/** Return the combined text of all leaf nodes. Does not get any
|
||||
* off-channel tokens (if any) so won't return whitespace and
|
||||
* comments if they are sent to parser on hidden channel.
|
||||
*/
|
||||
String getText();
|
||||
```
|
||||
|
||||
But, you probably want this method from TokenStream:
|
||||
|
||||
```java
|
||||
/**
|
||||
* Return the text of all tokens in the source interval of the specified
|
||||
* context. This method behaves like the following code, including potential
|
||||
* exceptions from the call to {@link #getText(Interval)}, but may be
|
||||
* optimized by the specific implementation.
|
||||
*
|
||||
* <p>If {@code ctx.getSourceInterval()} does not return a valid interval of
|
||||
* tokens provided by this stream, the behavior is unspecified.</p>
|
||||
*
|
||||
* <pre>
|
||||
* TokenStream stream = ...;
|
||||
* String text = stream.getText(ctx.getSourceInterval());
|
||||
* </pre>
|
||||
*
|
||||
* @param ctx The context providing the source interval of tokens to get
|
||||
* text for.
|
||||
* @return The text of all tokens within the source interval of {@code ctx}.
|
||||
*/
|
||||
public String getText(RuleContext ctx);
|
||||
```
|
||||
|
||||
That is, do this:
|
||||
|
||||
```
|
||||
mytokens.getText(mySubTree);
|
||||
```
|
||||
|
||||
## What if I need ASTs not parse trees for a compiler, for example?
|
||||
|
||||
For writing a compiler, either generate [LLVM-type static-single-assignment](http://llvm.org/docs/LangRef.html) form or construct an AST from the parse tree using a listener or visitor. Or, use actions in grammar, turning off auto-parse-tree construction.
|
||||
|
||||
## When do I use listener/visitor vs XPath vs Tree pattern matching?
|
||||
|
||||
### XPath
|
||||
|
||||
XPath works great when you need to find specific nodes, possibly in certain contexts. The context is limited to the parents on the way to the root of the tree. For example, if you want to find all ID nodes, use path `//ID`. If you want all variable declarations, you might use path `//vardecl`. If you only want fields declarations, then you can use some context information via path `/classdef/vardecl`, which would only find vardecls that our children of class definitions. You can merge the results of multiple XPath `findAll()`s simulating a set union for XPath. The only caveat is that the order from the original tree is not preserved when you union multiple `findAll()` sets.
|
||||
|
||||
### Tree pattern matching
|
||||
|
||||
Use tree pattern matching when you want to find specific subtree structures such as all assignments to 0 using pattern `x = 0;`. (Recall that these are very convenient because you specify the tree structure in the concrete syntax of the language described by the grammar.) If you want to find all assignments of any kind, you can use pattern `x = <expr>;` where `<expr>` will find any expression. This works great for matching particular substructures and therefore gives you a bit more ability to specify context. I.e., instead of just finding all identifiers, you can find all identifiers on the left hand side of an expression.
|
||||
|
||||
### Listeners/Visitors
|
||||
|
||||
Using the listener or visitor interfaces give you the most power but require implementing more methods. It might be more challenging to discover the emergent behavior of the listener than a simple tree pattern matcher that says *go find me X under node Y*.
|
||||
|
||||
Listeners are great when you want to visit many nodes in a tree.
|
||||
|
||||
Listeners allow you to compute and save context information necessary for processing at various nodes. For example, when building a symbol table manager for a compiler or translator, you need to compute symbol scopes such as globals, class, function, and code block. When you enter a class or function, you push a new scope and then pop it when you exit that class or function. When you see a symbol, you need to define it or look it up in the proper scope. By having enter/exit listener functions push and pop scopes, listener functions for defining variables simply say something like:
|
||||
|
||||
```java
|
||||
scopeStack.peek().define(new VariableSymbol("foo"))
|
||||
```
|
||||
|
||||
That way each listener function does not have to compute its appropriate scope.
|
||||
|
||||
Examples: [DefScopesAndSymbols.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/DefScopesAndSymbols.java) and [SetScopeListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/SetScopeListener.java) and [VerifyListener.java](https://github.com/mantra/compiler/blob/master/src/java/mantra/semantics/VerifyListener.java)
|
|
@ -0,0 +1,9 @@
|
|||
# Translation
|
||||
|
||||
## ASTs vs parse trees
|
||||
|
||||
I used to do specialized AST (**abstract** syntax tree) nodes rather than (concrete) parse trees because I used to think more about compilation and generating bytecode/assembly code. When I started thinking more about translation, I started using parse trees. For v4, I realized that I did mostly translation. I guess what I'm saying is that maybe parse trees are not as good as ASTs for generating bytecodes. Personally, I would rather see `(+ 3 4)` rather than `(expr 3 + 4)` for generating byte codes, but it's not the end of the world. (*Can someone fill this in?*)
|
||||
|
||||
## Decoupling input walking from output generation
|
||||
|
||||
I suggest creating an intermediate model that represents your output. You walk the parse tree to collect information and create your model. Then, you could almost certainly automatically walk this internal model to generate output based upon stringtemplates that match the class names of the internal model. In other words, define a special `IFStatement` object that has all of the fields you want and then create them as you walk the parse tree. This decoupling of the input from the output is very powerful. Just because we have a parse tree listener doesn't mean that the parse tree itself is necessarily the best data structure to hold all information necessary to generate code. Imagine a situation where the output is the exact reverse of the input. In that case, you really want to walk the input just to collect data. Generating output should be driven by the internal model not the way it was represented in the input.
|
|
@ -0,0 +1,131 @@
|
|||
# Getting Started with ANTLR v4
|
||||
|
||||
Hi and welcome to the version 4 release of ANTLR! It's named after the fearless hero of the [Crazy Nasty-Ass Honey Badger](http://www.youtube.com/watch?v=4r7wHMg5Yjg) since ANTLR v4 takes whatever you give it--it just doesn't give a crap! See [Why do we need ANTLR v4?](faq/general.md) and the [preface of the ANTLR v4 book](http://media.pragprog.com/titles/tpantlr2/preface.pdf).
|
||||
|
||||
## Installation
|
||||
|
||||
ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library.
|
||||
|
||||
The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.5-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3).
|
||||
|
||||
If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see Integrating ANTLR into Development Systems.
|
||||
|
||||
### UNIX
|
||||
|
||||
0. Install Java (version 1.6 or higher)
|
||||
1. Download
|
||||
```
|
||||
$ cd /usr/local/lib
|
||||
$ curl -O http://www.antlr.org/download/antlr-4.5-complete.jar
|
||||
```
|
||||
Or just download in browser from website:
|
||||
[http://www.antlr.org/download.html](http://www.antlr.org/download.html)
|
||||
and put it somewhere rational like `/usr/local/lib`.
|
||||
2. Add `antlr-4.5-complete.jar` to your `CLASSPATH`:
|
||||
```
|
||||
$ export CLASSPATH=".:/usr/local/lib/antlr-4.5-complete.jar:$CLASSPATH"
|
||||
```
|
||||
It's also a good idea to put this in your `.bash_profile` or whatever your startup script is.
|
||||
3. Create aliases for the ANTLR Tool, and `TestRig`.
|
||||
```
|
||||
$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.5-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
$ alias grun='java org.antlr.v4.runtime.misc.TestRig'
|
||||
```
|
||||
|
||||
### WINDOWS
|
||||
|
||||
(*Thanks to Graham Wideman*)
|
||||
|
||||
0. Install Java (version 1.6 or higher)
|
||||
1. Download antlr-4.5-complete.jar (or whatever version) from [http://www.antlr.org/download/](http://www.antlr.org/download/)
|
||||
Save to your directory for 3rd party Java libraries, say `C:\Javalib`
|
||||
2. Add `antlr-4.5-complete.jar` to CLASSPATH, either:
|
||||
* Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable
|
||||
* Temporarily, at command line:
|
||||
```
|
||||
SET CLASSPATH=.;C:\Javalib\antlr-4.5-complete.jar;%CLASSPATH%
|
||||
```
|
||||
3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands:
|
||||
* Batch files (in directory in system PATH) antlr4.bat and grun.bat
|
||||
```
|
||||
java org.antlr.v4.Tool %*
|
||||
```
|
||||
```
|
||||
java org.antlr.v4.runtime.misc.TestRig %*
|
||||
```
|
||||
* Or, use doskey commands:
|
||||
```
|
||||
doskey antlr4=java org.antlr.v4.Tool $*
|
||||
doskey grun =java org.antlr.v4.runtime.misc.TestRig $*
|
||||
```
|
||||
|
||||
### Testing the installation
|
||||
|
||||
Either launch org.antlr.v4.Tool directly:
|
||||
|
||||
```
|
||||
$ java org.antlr.v4.Tool
|
||||
ANTLR Parser Generator Version 4.5
|
||||
-o ___ specify output directory where all output is generated
|
||||
-lib ___ specify location of .tokens files
|
||||
...
|
||||
```
|
||||
|
||||
or use -jar option on java:
|
||||
|
||||
```
|
||||
$ java -jar /usr/local/lib/antlr-4.5-complete.jar
|
||||
ANTLR Parser Generator Version 4.5
|
||||
-o ___ specify output directory where all output is generated
|
||||
-lib ___ specify location of .tokens files
|
||||
...
|
||||
```
|
||||
|
||||
## A First Example
|
||||
|
||||
In a temporary directory, put the following grammar inside file Hello.g4:
|
||||
Hello.g4
|
||||
|
||||
```
|
||||
// Define a grammar called Hello
|
||||
grammar Hello;
|
||||
r : 'hello' ID ; // match keyword hello followed by an identifier
|
||||
ID : [a-z]+ ; // match lower-case identifiers
|
||||
WS : [ \t\r\n]+ -> skip ; // skip spaces, tabs, newlines
|
||||
```
|
||||
|
||||
Then run ANTLR the tool on it:
|
||||
|
||||
```
|
||||
$ cd /tmp
|
||||
$ antlr4 Hello.g4
|
||||
$ javac Hello*.java
|
||||
```
|
||||
|
||||
Now test it:
|
||||
|
||||
```
|
||||
$ grun Hello r -tree
|
||||
hello parrt
|
||||
^D
|
||||
(r hello parrt)
|
||||
(That ^D means EOF on unix; it's ^Z in Windows.) The -tree option prints the parse tree in LISP notation.
|
||||
It's nicer to look at parse trees visually.
|
||||
$ grun Hello r -gui
|
||||
hello parrt
|
||||
^D
|
||||
```
|
||||
|
||||
That pops up a dialog box showing that rule `r` matched keyword `hello` followed by identifier `parrt`.
|
||||
|
||||
![](images/hello-parrt.png)
|
||||
|
||||
## Book source code
|
||||
|
||||
The book has lots and lots of examples that should be useful to. You can download them here for free:
|
||||
|
||||
[http://pragprog.com/titles/tpantlr2/source_code](http://pragprog.com/titles/tpantlr2/source_code)
|
||||
|
||||
Also, there is a large collection of grammars for v4 at github:
|
||||
|
||||
[https://github.com/antlr/grammars-v4](https://github.com/antlr/grammars-v4)
|
|
@ -0,0 +1,184 @@
|
|||
# Grammar Structure
|
||||
|
||||
A grammar is essentially a grammar declaration followed by a list of rules, but has the general form:
|
||||
|
||||
```
|
||||
/** Optional javadoc style comment */
|
||||
grammar Name; ①
|
||||
options {...}
|
||||
import ... ;
|
||||
|
||||
tokens {...}
|
||||
channels {...} // lexer only
|
||||
@actionName {...}
|
||||
|
||||
rule1 // parser and lexer rules, possibly intermingled
|
||||
...
|
||||
ruleN
|
||||
```
|
||||
|
||||
The file name containing grammar `X` must be called `X.g4`. You can specify options, imports, token specifications, and actions in any order. There can be at most one each of options, imports, and token specifications. All of those elements are optional except for the header ① and at least one rule. Rules take the basic form:
|
||||
|
||||
```
|
||||
ruleName : alternative1 | ... | alternativeN ;
|
||||
```
|
||||
|
||||
Parser rule names must start with a lowercase letter and lexer rules must start with a capital letter.
|
||||
|
||||
Grammars defined without a prefix on the `grammar` header are combined grammars that can contain both lexical and parser rules. To make a parser grammar that only allows parser rules, use the following header.
|
||||
|
||||
```
|
||||
parser grammar Name;
|
||||
...
|
||||
```
|
||||
|
||||
And, naturally, a pure lexer grammar looks like this:
|
||||
|
||||
```
|
||||
lexer grammar Name;
|
||||
...
|
||||
```
|
||||
|
||||
Only lexer grammars can contain `mode` specifications.
|
||||
|
||||
Only lexer grammars can contain custom channels specifications
|
||||
|
||||
```
|
||||
channels {
|
||||
WHITESPACE_CHANNEL,
|
||||
COMMENTS_CHANNEL
|
||||
}
|
||||
```
|
||||
|
||||
Those channels can then be used like enums within lexer rules:
|
||||
|
||||
```
|
||||
WS : [ \r\t\n]+ -> channel(WHITESPACE_CHANNEL) ;
|
||||
```
|
||||
|
||||
Sections 15.5, [Lexer Rules](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) and Section 15.3, [Parser Rules](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) contain details on rule syntax. Section 15.8, Options describes grammar options and Section 15.4, Actions and Attributes has information on grammar-level actions.
|
||||
|
||||
## Grammar Imports
|
||||
|
||||
Grammar `imports` let you break up a grammar into logical and reusable chunks, as we saw in [Importing Grammars](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). ANTLR treats imported grammars very much like object-oriented programming languages treat superclasses. A grammar inherits all of the rules, tokens specifications, and named actions from the imported grammar. Rules in the “main grammar” override rules from imported grammars to implement inheritance.
|
||||
|
||||
Think of `import` as more like a smart include statement (which does not include rules that are already defined). The result of all imports is a single combined grammar; the ANTLR code generator sees a complete grammar and has no idea there were imported grammars.
|
||||
|
||||
To process a main grammar, the ANTLR tool loads all of the imported grammars into subordinate grammar objects. It then merges the rules, token types, and named actions from the imported grammars into the main grammar. In the diagram below, the grammar on the right illustrates the effect of grammar `MyELang` importing grammar `ELang`.
|
||||
|
||||
<img src=images/combined.png width=400>
|
||||
|
||||
`MyELang` inherits rules `stat`, `WS`, and `ID`, but overrides rule `expr` and adds `INT`. Here’s a sample build and test run that shows `MyELang` can recognize integer expressions whereas the original `ELang` can’t. The third, erroneous input statement triggers an error message that also demonstrates the parser was looking for `MyELang`’s expr not `ELang`’s.
|
||||
|
||||
```
|
||||
$ antlr4 MyELang.g4
|
||||
$ javac MyELang*.java
|
||||
$ grun MyELang stat
|
||||
=> 34;
|
||||
=> a;
|
||||
=> ;
|
||||
=> EOF
|
||||
<= line 3:0 extraneous input ';' expecting {INT, ID}
|
||||
```
|
||||
|
||||
If there were any `tokens` specifications, the main grammar would merge the token sets. Any named actions such as `@members` would be merged. In general, you should avoid named actions and actions within rules in imported grammars since that limits their reuse. ANTLR also ignores any options in imported grammars.
|
||||
|
||||
Imported grammars can also import other grammars. ANTLR pursues all imported grammars in a depth-first fashion. If two or more imported grammars define rule `r`, ANTLR chooses the first version of `r` it finds. In the following diagram, ANTLR examines grammars in the following order `Nested`, `G1`, `G3`, `G2`.
|
||||
|
||||
<img src=images/nested.png width=350>
|
||||
|
||||
`Nested` includes the `r` rule from `G3` because it sees that version before the `r` in `G2`.
|
||||
|
||||
Not every kind of grammar can import every other kind of grammar:
|
||||
|
||||
* Lexer grammars can import lexers.
|
||||
* Parsers can import parsers.
|
||||
* Combined grammars can import lexers or parsers.
|
||||
|
||||
ANTLR adds imported rules to the end of the rule list in a main lexer grammar. That means lexer rules in the main grammar get precedence over imported rules. For example, if a main grammar defines rule `IF : ’if’ ;` and an imported grammar defines rule `ID : [a-z]+ ;` (which also recognizes `if`), the imported `ID` won’t hide the main grammar’s `IF` token definition.
|
||||
|
||||
## Tokens Section
|
||||
|
||||
The purpose of the `tokens` section is to define token types needed by a grammar for which there is no associated lexical rule. The basic syntax is:
|
||||
|
||||
```
|
||||
tokens { Token1, ..., TokenN }
|
||||
```
|
||||
|
||||
Most of the time, the tokens section is used to define token types needed by actions in the grammar as shown in Section 10.3, [Recognizing Languages whose Keywords Aren’t Fixed](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference):
|
||||
|
||||
```
|
||||
// explicitly define keyword token types to avoid implicit definition warnings
|
||||
tokens { BEGIN, END, IF, THEN, WHILE }
|
||||
|
||||
@lexer::members { // keywords map used in lexer to assign token types
|
||||
Map<String,Integer> keywords = new HashMap<String,Integer>() {{
|
||||
put("begin", KeywordsParser.BEGIN);
|
||||
put("end", KeywordsParser.END);
|
||||
...
|
||||
}};
|
||||
}
|
||||
```
|
||||
|
||||
The `tokens` section really just defines a set of tokens to add to the overall set.
|
||||
|
||||
```
|
||||
$ cat Tok.g4
|
||||
grammar Tok;
|
||||
tokens { A, B, C }
|
||||
a : X ;
|
||||
$ antlr4 Tok.g4
|
||||
warning(125): Tok.g4:3:4: implicit definition of token X in parser
|
||||
$ cat Tok.tokens
|
||||
A=1
|
||||
B=2
|
||||
C=3
|
||||
X=4
|
||||
```
|
||||
|
||||
## Actions at the Grammar Level
|
||||
|
||||
Currently there are only two defined named actions (for the Java target) used outside of grammar rules: `header` and `members`. The former injects code into the generated recognizer class file, before the recognizer class definition, and the latter injects code into the recognizer class definition, as fields and methods.
|
||||
|
||||
For combined grammars, ANTLR injects the actions into both the parser and the lexer. To restrict an action to the generated parser or lexer, use `@parser::name` or `@lexer::name`.
|
||||
|
||||
Here’s an example where the grammar specifies a package for the generated code:
|
||||
|
||||
```
|
||||
grammar Count;
|
||||
|
||||
@header {
|
||||
package foo;
|
||||
}
|
||||
|
||||
@members {
|
||||
int count = 0;
|
||||
}
|
||||
|
||||
list
|
||||
@after {System.out.println(count+" ints");}
|
||||
: INT {count++;} (',' INT {count++;} )*
|
||||
;
|
||||
|
||||
INT : [0-9]+ ;
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
```
|
||||
|
||||
The grammar itself then should be in directory `foo` so that ANTLR generates code in that same `foo` directory (at least when not using the `-o` ANTLR tool option):
|
||||
|
||||
```
|
||||
$ cd foo
|
||||
$ antlr4 Count.g4 # generates code in the current directory (foo)
|
||||
$ ls
|
||||
Count.g4 CountLexer.java CountParser.java
|
||||
Count.tokens CountLexer.tokens
|
||||
CountBaseListener.java CountListener.java
|
||||
$ javac *.java
|
||||
$ cd ..
|
||||
$ grun foo.Count list
|
||||
=> 9, 10, 11
|
||||
=> EOF
|
||||
<= 3 ints
|
||||
```
|
||||
|
||||
The Java compiler expects classes in package `foo` to be in directory `foo`.
|
After Width: | Height: | Size: 87 KiB |
After Width: | Height: | Size: 3.4 KiB |
After Width: | Height: | Size: 719 B |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 20 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 79 KiB |
After Width: | Height: | Size: 92 KiB |
After Width: | Height: | Size: 4.1 KiB |
After Width: | Height: | Size: 499 B |
After Width: | Height: | Size: 62 KiB |
After Width: | Height: | Size: 48 KiB |
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 72 KiB |
After Width: | Height: | Size: 48 KiB |
After Width: | Height: | Size: 86 KiB |
After Width: | Height: | Size: 1.1 KiB |
After Width: | Height: | Size: 1.1 KiB |
After Width: | Height: | Size: 1.2 KiB |
After Width: | Height: | Size: 1.4 KiB |
|
@ -0,0 +1,66 @@
|
|||
# ANTLR 4 Documentation
|
||||
|
||||
Please check [Frequently asked questions (FAQ)](faq/index.md) before asking questions on stackoverflow or antlr-discussion list.
|
||||
|
||||
Notes:
|
||||
<ul>
|
||||
<li>To add to or improve this documentation, <a href=https://help.github.com/articles/fork-a-repo>fork</a> the <a href=https://github.com/antlr/antlr4>antlr/antlr4 repo</a> then update this `doc/index.md` or file(s) in that directory. Submit a <a href=https://help.github.com/articles/creating-a-pull-request>pull request</a> to get your changes incorporated into the main repository. Do not mix code and documentation updates in the sample pull request. <b>You must sign the contributors.txt certificate of origin with your pull request if you've not done so before.</b></li>
|
||||
|
||||
<li>Copyright © 2012, The Pragmatic Bookshelf. Pragmatic Bookshelf grants a nonexclusive, irrevocable, royalty-free, worldwide license to reproduce, distribute, prepare derivative works, and otherwise use this contribution as part of the ANTLR project and associated documentation.</li>
|
||||
|
||||
<li>This text was copied with permission from the <a href=http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference>The Definitive ANTLR 4 Reference</a>, though it is being morphed over time as the tool changes.</li>
|
||||
</ul>
|
||||
|
||||
Links in the documentation refer to various sections of the book but have been redirected to the general book page on the publisher's site. There are two excerpts on the publisher's website that might be useful to you without having to purchase the book: [Let's get Meta](http://media.pragprog.com/titles/tpantlr2/picture.pdf) and [Building a Translator with a Listener](http://media.pragprog.com/titles/tpantlr2/listener.pdf). You should also consider reading the following books (the vid describes the reference book):
|
||||
|
||||
<a href=""><img src=images/tpantlr2.png width=120></a>
|
||||
<a href=""><img src=images/tpdsl.png width=120></a>
|
||||
<a href="https://www.youtube.com/watch?v=OAoA3E-cyug"><img src=images/teronbook.png width=250></a>
|
||||
|
||||
This documentation is a reference and summarizes grammar syntax and the key semantics of ANTLR grammars. The source code for all examples in the book, not just this chapter, are free at the publisher's website. The following video is a general tour of ANTLR 4 and includes a description of how to use parse tree listeners to process Java files easily:
|
||||
|
||||
<a href="https://vimeo.com/59285751"><img src=images/tertalk.png width=200></a>
|
||||
|
||||
## Sections
|
||||
|
||||
* [Getting Started with ANTLR v4](getting-started.md)
|
||||
|
||||
* [Grammar Lexicon](lexicon.md)
|
||||
|
||||
* [Grammar Structure](grammars.md)
|
||||
|
||||
* [Parser Rules](parser-rules.md)
|
||||
|
||||
* [Left-recursive rules](left-recursion.md)
|
||||
|
||||
* [Actions and Attributes](actions.md)
|
||||
|
||||
* [Lexer Rules](lexer-rules.md)
|
||||
|
||||
* [Wildcard Operator and Nongreedy Subrules](wildcard.md)
|
||||
|
||||
* [Parse Tree Listeners](listeners.md)
|
||||
|
||||
* [Parse Tree Matching and XPath](tree-matching.md)
|
||||
|
||||
* [Semantic Predicates](predicates.md)
|
||||
|
||||
* [Options](options.md)
|
||||
|
||||
* [ANTLR Tool Command Line Options](tool-options.md)
|
||||
|
||||
* [Runtime Libraries and Code Generation Targets](targets.md)
|
||||
|
||||
* [Parser and lexer interpreters](interpreters.md)
|
||||
|
||||
* [Resources](resources.md)
|
||||
|
||||
# Building / releasing ANTLR itself
|
||||
|
||||
* [Building ANTLR itself](building-antlr.md)
|
||||
|
||||
* [Cutting an ANTLR Release](releasing-antlr.md)
|
||||
|
||||
* [Adding ANTLR unit tests](adding-tests.md)
|
||||
|
||||
* [Creating an ANTLR Language Target](creating-a-language-target.md)
|
|
@ -0,0 +1,79 @@
|
|||
# Parser and lexer interpreters
|
||||
|
||||
*Since ANTLR 4.2*
|
||||
|
||||
For small parsing tasks it is sometimes convenient to use ANTLR in interpreted mode, rather than generating a parser in a particular target, compiling it and running it as part of your application. Here's some sample code that creates lexer and parser Grammar objects and then creates interpreters. Once we have a ParserInterpreter, we can use it to parse starting in any rule we like, given a rule index (which the Grammar can provide).
|
||||
|
||||
```java
|
||||
LexerGrammar lg = new LexerGrammar(
|
||||
"lexer grammar L;\n" +
|
||||
"A : 'a' ;\n" +
|
||||
"B : 'b' ;\n" +
|
||||
"C : 'c' ;\n");
|
||||
Grammar g = new Grammar(
|
||||
"parser grammar T;\n" +
|
||||
"s : (A|B)* C ;\n",
|
||||
lg);
|
||||
LexerInterpreter lexEngine =
|
||||
lg.createLexerInterpreter(new ANTLRInputStream(input));
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
|
||||
ParserInterpreter parser = g.createParserInterpreter(tokens);
|
||||
ParseTree t = parser.parse(g.rules.get(startRule).index);
|
||||
```
|
||||
|
||||
You can also load combined grammars from a file:
|
||||
|
||||
```java
|
||||
public static ParseTree parse(String fileName,
|
||||
String combinedGrammarFileName,
|
||||
String startRule)
|
||||
throws IOException
|
||||
{
|
||||
final Grammar g = Grammar.load(combinedGrammarFileName);
|
||||
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRFileStream(fileName));
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
|
||||
ParserInterpreter parser = g.createParserInterpreter(tokens);
|
||||
ParseTree t = parser.parse(g.getRule(startRule).index);
|
||||
System.out.println("parse tree: "+t.toStringTree(parser));
|
||||
return t;
|
||||
}
|
||||
```
|
||||
|
||||
Then:
|
||||
|
||||
```java
|
||||
ParseTree t = parse("T.om",
|
||||
MantraGrammar,
|
||||
"compilationUnit");
|
||||
```
|
||||
|
||||
To load separate lexer/parser grammars, do this:
|
||||
|
||||
```java
|
||||
public static ParseTree parse(String fileNameToParse,
|
||||
String lexerGrammarFileName,
|
||||
String parserGrammarFileName,
|
||||
String startRule)
|
||||
throws IOException
|
||||
{
|
||||
final LexerGrammar lg = (LexerGrammar) Grammar.load(lexerGrammarFileName);
|
||||
final Grammar pg = Grammar.load(parserGrammarFileName, lg);
|
||||
ANTLRFileStream input = new ANTLRFileStream(fileNameToParse);
|
||||
LexerInterpreter lexEngine = lg.createLexerInterpreter(input);
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
|
||||
ParserInterpreter parser = pg.createParserInterpreter(tokens);
|
||||
ParseTree t = parser.parse(pg.getRule(startRule).index);
|
||||
System.out.println("parse tree: " + t.toStringTree(parser));
|
||||
return t;
|
||||
}
|
||||
```
|
||||
|
||||
Then:
|
||||
|
||||
```java
|
||||
ParseTree t = parse(fileName, XMLLexerGrammar, XMLParserGrammar, "document");
|
||||
```
|
||||
|
||||
This is also how we will integrate instantaneous parsing into ANTLRWorks2 and development environment plug-ins.
|
||||
|
||||
See [TestParserInterpreter.java](https://github.com/antlr/antlr4/blob/master/tool-testsuite/test/org/antlr/v4/test/tool/TestParserInterpreter.java).
|
|
@ -0,0 +1,244 @@
|
|||
# Java
|
||||
|
||||
## Development environments
|
||||
|
||||
### Intellij
|
||||
|
||||
There is a very complete and useful plug-in for intellij 12-14, you can grab at the [download page](https://plugins.jetbrains.com/plugin/7358?pr=). Check the [plugin readme](https://github.com/antlr/intellij-plugin-v4) for feature set. Just go to the preferences and click on the "Install plug-in from disk..." button from this dialog box:
|
||||
|
||||
<img src="images/idea-prefs.png">
|
||||
|
||||
Select the intellij-plugin-1.x.zip (or whatever version) file and hit okay or apply. It will ask you to restart the IDE. If you look at the plug-ins again, you will see:
|
||||
|
||||
<img src="images/idea-prefs-after-install.png">
|
||||
|
||||
Also, I have prepared a [video](https://youtu.be/eW4WFgRtFeY) that will help you generate grammars and so on using ANTLR v4 in Intellij (w/o the plugin).
|
||||
|
||||
### Eclipse
|
||||
|
||||
Edgar Espina has created an [eclipse plugin for ANTLR v4](https://youtu.be/eW4WFgRtFeY). Features: Advanced Syntax Highlighting, Automatic Code Generation (on save), Manual Code Generation (through External Tools menu), Code Formatter (Ctrl+Shift+F), Syntax Diagrams, Advanced Rule Navigation between files (F3), Quick fixes.
|
||||
|
||||
### NetBeans
|
||||
|
||||
Sam Harwell's [ANTLRWorks2](http://tunnelvisionlabs.com/products/demo/antlrworks) works also as a plug-in, not just a stand-alone tool built on top of NetBeans.
|
||||
|
||||
## Build systems
|
||||
|
||||
### ant
|
||||
|
||||
### mvn
|
||||
|
||||
*Maven Plugin Reference*
|
||||
|
||||
The reference pages for the latest version of the Maven plugin for ANTLR 4 can be found here:
|
||||
|
||||
[http://www.antlr.org/api/maven-plugin/latest/index.html](http://www.antlr.org/api/maven-plugin/latest/index.html)
|
||||
|
||||
*Walkthrough*
|
||||
|
||||
This section describes how to create a simple Antlr 4 project and build it using maven. We are going to use the ArrayInit.g4 example from chapter 3 of the book, and bring it under maven. We will need to rename files and modify them. We will conclude by building a portable stand alone application.
|
||||
|
||||
Generate the skeleton. To generate the maven skeleton, type these commands:
|
||||
|
||||
```bash
|
||||
mkdir SimpleAntlrMavenProject
|
||||
cd SimpleAntlrMavenProject
|
||||
mvn archetype:generate -DgroupId=org.abcd.examples -DartifactId=array-example -Dpackage=org.abcd.examples.ArrayInit -Dversion=1.0
|
||||
# Accept all the default values
|
||||
cd array-example
|
||||
```
|
||||
|
||||
Maven will ask a series of questions, simply accept the default answers by hitting enter.
|
||||
|
||||
Move into the directory created by maven:
|
||||
|
||||
```bash
|
||||
cd array-example
|
||||
```
|
||||
|
||||
We can use the find command to see the files created by maven:
|
||||
|
||||
```bash
|
||||
$ find . -type f
|
||||
./pom.xml
|
||||
./src/test/java/org/abcd/examples/ArrayInit/AppTest.java
|
||||
./src/main/java/org/abcd/examples/ArrayInit/App.java
|
||||
```
|
||||
|
||||
We need to edit the pom.xml file extensively. The App.java will be renamed to ArrayInit.java and will contain the main ANTLR java program which we will download from the book examples. The AppTest.java file will be renamed ArrayInitTest.java but will remain the empty test as created by maven. We will also be adding the grammar file ArrayInit.g4 from the book examples in there.
|
||||
|
||||
Get the examples for the book and put them in the Downloads folder. To obtain the ArrayInit.g4 grammar from the book, simply download it:
|
||||
|
||||
```bash
|
||||
pushd ~/Downloads
|
||||
wget http://media.pragprog.com/titles/tpantlr2/code/tpantlr2-code.tgz
|
||||
tar xvfz tpantlr2-code.tgz
|
||||
popd
|
||||
```
|
||||
|
||||
Copy the grammar to the maven project. The grammar file goes into a special folder under the src/ directory. The folder name must match the maven package name org.abcd.examples.ArrayInit.
|
||||
|
||||
```bash
|
||||
mkdir -p src/main/antlr4/org/abcd/examples/ArrayInit
|
||||
cp ~/Downloads/code/starter/ArrayInit.g4 src/main/antlr4/org/abcd/examples/ArrayInit
|
||||
```
|
||||
|
||||
Copy the main program to the maven project. We replace the maven App.java file with the main java program from the book. In the book, that main program is called Test.java, we rename it to ArrayInit.java:
|
||||
|
||||
```bash
|
||||
# Remove the maven file
|
||||
rm ./src/main/java/org/abcd/examples/ArrayInit/App.java
|
||||
# Copy and rename the example from the book
|
||||
cp ~/Downloads/code/starter/Test.java ./src/main/java/org/abcd/examples/ArrayInit/ArrayInit.java
|
||||
```
|
||||
|
||||
Spend a few minutes to read the main program. Notice that it reads the standard input stream. We need to remember this when we run the application.
|
||||
|
||||
Edit the ArrayInit.java file. We need to add a package declaration and to rename the class. Edit the file ./src/main/java/org/abcd/examples/ArrayInit/ArrayInit.java in your favorite editor. The head of the file should look like this when you are done:
|
||||
|
||||
```java
|
||||
package org.abcd.examples.ArrayInit;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.tree.*;
|
||||
|
||||
public class ArrayInit {
|
||||
...
|
||||
```
|
||||
|
||||
Edit the ArrayInitTest.java file. Maven creates a test file called AppTest.java, we need to rename it to match the name of our application:
|
||||
|
||||
```bash
|
||||
pushd src/test/java/org/abcd/examples/ArrayInit
|
||||
mv AppTest.java ArrayInitTest.java
|
||||
sed 's/App/ArrayInit/g' ArrayInitTest.java >ArrayInitTest.java.tmp
|
||||
mv ArrayInitTest.java.tmp ArrayInitTest.java
|
||||
popd
|
||||
```
|
||||
|
||||
Edit the pom.xml file. Now we need to extensively modify the pom.xml file. The final product looks like this:
|
||||
|
||||
```xml
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.abcd.examples</groupId>
|
||||
<artifactId>array-init</artifactId>
|
||||
<version>1.0</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>array-init</name>
|
||||
<url>http://maven.apache.org</url>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-runtime</artifactId>
|
||||
<version>4.5</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>3.8.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- This plugin sets up maven to use Java 7 -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.1</version>
|
||||
<configuration>
|
||||
<source>1.7</source>
|
||||
<target>1.7</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<!-- Plugin to compile the g4 files ahead of the java files
|
||||
See https://github.com/antlr/antlr4/blob/master/antlr4-maven-plugin/src/site/apt/examples/simple.apt.vm
|
||||
Except that the grammar does not need to contain the package declaration as stated in the documentation (I do not know why)
|
||||
To use this plugin, type:
|
||||
mvn antlr4:antlr4
|
||||
In any case, Maven will invoke this plugin before the Java source is compiled
|
||||
-->
|
||||
<plugin>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-maven-plugin</artifactId>
|
||||
<version>4.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>antlr4</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- plugin to create a self-contained portable package
|
||||
This allows us to execute our application like this:
|
||||
java -cp target/array-init-1.0-jar-with-dependencies.jar org.abcd.examples.ArrayInit.ArrayInit
|
||||
-->
|
||||
<plugin>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<descriptorRefs>
|
||||
<descriptorRef>jar-with-dependencies</descriptorRef>
|
||||
</descriptorRefs>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>simple-command</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>attached</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
```
|
||||
|
||||
This concludes the changes we had to make. We can look at the list of files we have with the find command:
|
||||
|
||||
```bash
|
||||
$ find . -type f
|
||||
./pom.xml
|
||||
./src/test/java/org/abcd/examples/ArrayInit/ArrayInitTest.java
|
||||
./src/main/antlr4/org/abcd/examples/ArrayInit/ArrayInit.g4
|
||||
./src/main/java/org/abcd/examples/ArrayInit/ArrayInit.java
|
||||
```
|
||||
|
||||
Building a stand alone application. With all the files now in place, we can ask maven to create a standalone application. The following command does this:
|
||||
|
||||
```bash
|
||||
mvn package
|
||||
```
|
||||
|
||||
Maven creates a self-contained jar file called target/array-init-1.0-jar-with-dependencies.jar. We can execute the jar file, but remember that it expects some input on the command line, which means the command will hang on the command line until we feed it some input:
|
||||
|
||||
```bash
|
||||
java -cp target/array-init-1.0-jar-with-dependencies.jar org.abcd.examples.ArrayInit.ArrayInit
|
||||
```
|
||||
|
||||
And let's feed it the following input:
|
||||
|
||||
```bash
|
||||
{1,2,3}
|
||||
^D
|
||||
```
|
||||
|
||||
The ^D signals the end of the input to the standard input stream and gets the rest of the application going. You should see the following output:
|
||||
|
||||
```bash
|
||||
(init { (value 1) , (value 2) , (value 3) })
|
||||
```
|
||||
|
||||
You can also build a jar file without the dependencies, and execute it with a maven command instead:
|
||||
|
||||
```bash
|
||||
mvn install
|
||||
mvn exec:java -Dexec.mainClass=org.abcd.examples.ArrayInit.ArrayInit
|
||||
{1,2,3}
|
||||
^D
|
||||
```
|
|
@ -0,0 +1,157 @@
|
|||
# JavaScript
|
||||
|
||||
## Which browsers are supported?
|
||||
|
||||
In theory, all browsers supporting ECMAScript 5.1.
|
||||
|
||||
In practice, this target has been extensively tested against:
|
||||
|
||||
* Firefox 34.0.5
|
||||
* Safari 8.0.2
|
||||
* Chrome 39.0.2171
|
||||
* Explorer 11.0.3
|
||||
|
||||
The tests were conducted using Selenium. No issue was found, so you should find that the runtime works pretty much against any recent JavaScript engine.
|
||||
|
||||
## Is NodeJS supported?
|
||||
|
||||
The runtime has also been extensively tested against Node.js 0.10.33. No issue was found.
|
||||
|
||||
## How to create a JavaScript lexer or parser?
|
||||
|
||||
This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example:
|
||||
|
||||
```bash
|
||||
$ antlr4 -Dlanguage=JavaScript MyGrammar.g4
|
||||
```
|
||||
|
||||
For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md).
|
||||
|
||||
## Where can I get the runtime?
|
||||
|
||||
Once you've generated the lexer and/or parser code, you need to download the runtime.
|
||||
|
||||
The JavaScript runtime is available from the ANTLR web site [download section](http://www.antlr.org/download/index.html). The runtime is provided in the form of source code, so no additional installation is required.
|
||||
|
||||
We will not document here how to refer to the runtime from your project, since this would differ a lot depending on your project type and IDE.
|
||||
|
||||
## How do I get the runtime in my browser?
|
||||
|
||||
The runtime is quite big and is currently maintained in the form of around 50 scripts, which follow the same structure as the runtimes for other targets (Java, C#, Python...).
|
||||
|
||||
This structure is key in keeping code maintainable and consistent across targets.
|
||||
|
||||
However, it would be a bit of a problem when it comes to get it into a browser. Nobody wants to write 50 times:
|
||||
|
||||
```
|
||||
<script src='lib/myscript.js'>
|
||||
```
|
||||
|
||||
In order to avoid having to do this, and also to have the exact same code for browsers and Node.js, we rely on a script which provides the equivalent of the Node.js 'require' function.
|
||||
|
||||
This script is provided by Torben Haase, and is NOT part of ANTLR JavaScript runtime, although the runtime heavily relies on it. Please note that syntax for 'require' in NodeJS is different from the one implemented by RequireJS and similar frameworks.
|
||||
|
||||
So in short, assuming you have at the root of your web site, both the 'antlr4' directory and a 'lib' directory with 'require.js' inside it, all you need to put in your HTML header is the following:
|
||||
|
||||
```xml
|
||||
<script src='lib/require.js'>
|
||||
<script>
|
||||
var antlr4 = require('antlr4/index');
|
||||
</script>
|
||||
```
|
||||
|
||||
This will load the runtime asynchronously.
|
||||
|
||||
## How do I get the runtime in Node.js?
|
||||
|
||||
Right now, there is no npm package available, so you need to register a link instead. This can be done by running the following command from the antlr4 directory:
|
||||
|
||||
```bash
|
||||
$ npm link antlr4
|
||||
```
|
||||
|
||||
This will install antlr4 using the package.son descriptor that comes with the script.
|
||||
|
||||
## How do I run the generated lexer and/or parser?
|
||||
|
||||
Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose this parser comprises a rule named "StartRule". The tool will have generated for you the following files:
|
||||
|
||||
* MyGrammarLexer.js
|
||||
* MyGrammarParser.js
|
||||
* MyGrammarListener.js (if you have not activated the -no-listener option)
|
||||
* MyGrammarVisitor.js (if you have activated the -visitor option)
|
||||
|
||||
(Developers used to Java/C# ANTLR will notice that there is no base listener or visitor generated, this is because JavaScript having no support for interfaces, the generated listener and visitor are fully fledged classes)
|
||||
|
||||
Now a fully functioning script might look like the following:
|
||||
|
||||
```javascript
|
||||
var input = "your text to parse here"
|
||||
var chars = new antlr4.InputStream(input);
|
||||
var lexer = new MyGrammarLexer.MyGrammarLexer(chars);
|
||||
var tokens = new antlr4.CommonTokenStream(lexer);
|
||||
var parser = new MyGrammarParser.MyGrammarParser(tokens);
|
||||
parser.buildParseTrees = true;
|
||||
var tree = parser.MyStartRule();
|
||||
```
|
||||
|
||||
This program will work. But it won't be useful unless you do one of the following:
|
||||
|
||||
* you visit the parse tree using a custom listener
|
||||
* you visit the parse tree using a custom visitor
|
||||
* your grammar comprises production code (like AntLR3)
|
||||
|
||||
(please note that production code is target specific, so you can't have multi target grammars that include production code)
|
||||
|
||||
## How do I create and run a custom listener?
|
||||
|
||||
Let's suppose your MyGrammar grammar comprises 2 rules: "key" and "value". The antlr4 tool will have generated the following listener:
|
||||
|
||||
```javascript
|
||||
MyGrammarListener = function(ParseTreeListener) {
|
||||
// some code here
|
||||
}
|
||||
// some code here
|
||||
MyGrammarListener.prototype.enterKey = function(ctx) {};
|
||||
MyGrammarListener.prototype.exitKey = function(ctx) {};
|
||||
MyGrammarListener.prototype.enterValue = function(ctx) {};
|
||||
MyGrammarListener.prototype.exitValue = function(ctx) {};
|
||||
```
|
||||
|
||||
In order to provide custom behavior, you might want to create the following class:
|
||||
|
||||
```javascript
|
||||
KeyPrinter = function() {
|
||||
MyGrammarListener.call(this); // inherit default listener
|
||||
return this;
|
||||
};
|
||||
|
||||
// inherit default listener
|
||||
KeyPrinter.prototype = Object.create(MyGrammarListener.prototype);
|
||||
KeyPrinter.prototype.constructor = KeyPrinter;
|
||||
|
||||
// override default listener behavior
|
||||
KeyPrinter.prototype.exitKey = function(ctx) {
|
||||
console.log("Oh, a key!");
|
||||
};
|
||||
```
|
||||
|
||||
In order to execute this listener, you would simply add the following lines to the above code:
|
||||
|
||||
```javascript
|
||||
...
|
||||
tree = parser.StartRule() - only repeated here for reference
|
||||
var printer = new KeyPrinter();
|
||||
antlr4.tree.ParseTreeWalker.DEFAULT.walk(printer, tree);
|
||||
```
|
||||
|
||||
## How do I integrate my parser with ACE editor?
|
||||
|
||||
This specific task is described in this [dedicated page](ace-javascript-target.md).
|
||||
|
||||
## How can I learn more about ANTLR?
|
||||
|
||||
|
||||
Further information can be found from "The definitive ANTLR 4 reference" book.
|
||||
|
||||
The JavaScript implementation of ANTLR is as close as possible to the Java one, so you shouldn't find it difficult to adapt the book's examples to JavaScript.
|
|
@ -0,0 +1,50 @@
|
|||
# Left-recursive rules
|
||||
|
||||
The most natural expression of some common language constructs is left recursive. For example C declarators and arithmetic expressions. Unfortunately, left recursive specifications of arithmetic expressions are typically ambiguous but much easier to write out than the multiple levels required in a typical top-down grammar. Here is a sample ANTLR 4 grammar with a left recursive expression rule:
|
||||
|
||||
```
|
||||
stat: expr '=' expr ';' // e.g., x=y; or x=f(x);
|
||||
| expr ';' // e.g., f(x); or f(g(x));
|
||||
;
|
||||
expr: expr '*' expr
|
||||
| expr '+' expr
|
||||
| expr '(' expr ')' // f(x)
|
||||
| id
|
||||
;
|
||||
```
|
||||
|
||||
In straight context free grammars, such a rule is ambiguous because `1+2*3` it can interpret either operator as occurring first, but ANTLR rewrites that to be non-left recursive and unambiguous using semantic predicates:
|
||||
|
||||
```
|
||||
expr[int pr] : id
|
||||
( {4 >= $pr}? '*' expr[5]
|
||||
| {3 >= $pr}? '+' expr[4]
|
||||
| {2 >= $pr}? '(' expr[0] ')'
|
||||
)*
|
||||
;
|
||||
```
|
||||
|
||||
The predicates resolve ambiguities by comparing the precedence of the current operator against the precedence of the previous operator. An expansion of expr[pr] can match only those subexpressions whose precedence meets or exceeds pr.
|
||||
|
||||
## Formal rules
|
||||
|
||||
The formal 4.0, 4.1 ANTLR left-recursion elimination rules were changed (simplified) for 4.2 and are laid out in the [ALL(*) tech report](http://www.antlr.org/papers/allstar-techreport.pdf):
|
||||
|
||||
* Binary expressions are expressions which contain a recursive invocation of the rule as the first and last element of the alternative.
|
||||
* Suffix expressions contain a recursive invocation of the rule as the first element of the alternative, but not as the last element.
|
||||
* Prefix expressions contain a recursive invocation of the rule as the last element of the alternative, but not as the first element.
|
||||
|
||||
There is no such thing as a "ternary" expression--they are just binary expressions in disguise.
|
||||
|
||||
The right associativity specifiers used to be on the individual tokens but it's done on alternative basis anyway so the option is now on the individual alternative; e.g.,
|
||||
|
||||
```
|
||||
e : e '*' e
|
||||
| e '+' e
|
||||
|<assoc=right> e '?' e ':' e
|
||||
|<assoc=right> e '=' e
|
||||
| INT
|
||||
;
|
||||
```
|
||||
|
||||
If your 4.0 or 4.1 grammar uses a right-associative ternary operator, you will need to update your grammar to include `<assoc=right>` on the alternative operator. To smooth the transition, `<assoc=right>` is still allowed on token references but it is ignored.
|
|
@ -0,0 +1,283 @@
|
|||
# Lexer Rules
|
||||
|
||||
A lexer grammar is composed of lexer rules, optionally broken into multiple modes. Lexical modes allow us to split a single lexer grammar into multiple sublexers. The lexer can only return tokens matched by rules from the current mode.
|
||||
|
||||
Lexer rules specify token definitions and more or less follow the syntax of parser rules except that lexer rules cannot have arguments, return values, or local variables. Lexer rule names must begin with an uppercase letter, which distinguishes them from parser rule names:
|
||||
|
||||
```
|
||||
/** Optional document comment */
|
||||
TokenName : alternative1 | ... | alternativeN ;
|
||||
```
|
||||
|
||||
You can also define rules that are not tokens but rather aid in the recognition of tokens. These fragment rules do not result in tokens visible to the parser:
|
||||
|
||||
```
|
||||
fragment
|
||||
HelperTokenRule : alternative1 | ... | alternativeN ;
|
||||
```
|
||||
|
||||
For example, `DIGIT` is a pretty common fragment rule:
|
||||
|
||||
```
|
||||
INT : DIGIT+ ; // references the DIGIT helper rule
|
||||
fragment DIGIT : [0-9] ; // not a token by itself
|
||||
```
|
||||
|
||||
## Lexical Modes
|
||||
|
||||
Modes allow you to group lexical rules by context, such as inside and outside of XML tags. It’s like having multiple sublexers, one for context. The lexer can only return tokens matched by entering a rule in the current mode. Lexers start out in the so-called default mode. All rules are considered to be within the default mode unless you specify a mode command. Modes are not allowed within combined grammars, just lexer grammars. (See grammar `XMLLexer` from [Tokenizing XML](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference).)
|
||||
|
||||
```
|
||||
rules in default mode
|
||||
...
|
||||
mode MODE1;
|
||||
rules in MODE1
|
||||
...
|
||||
mode MODEN;
|
||||
rules in MODEN
|
||||
...
|
||||
```
|
||||
|
||||
## Lexer Rule Elements
|
||||
|
||||
Lexer rules allow two constructs that are unavailable to parser rules: the .. range operator and the character set notation enclosed in square brackets, [characters]. Don’t confuse character sets with arguments to parser rules. [characters] only means character set in a lexer. Here’s a summary of all lexer rule elements:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Syntax</th><th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>T</td><td>
|
||||
Match token T at the current input position. Tokens always begin with a capital letter.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>’literal’</td><td>
|
||||
Match that character or sequence of characters. E.g., ’while’ or ’=’.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>[char set]</td><td>
|
||||
Match one of the characters specified in the character set. Interpret x-y as set of characters between range x and y, inclusively. The following escaped characters are interpreted as single special characters: \n, \r, \b, \t, and \f. To get ], \, or - you must escape them with \. You can also use Unicode character specifications: \uXXXX. Here are a few examples:
|
||||
|
||||
<pre>
|
||||
WS : [ \n\u000D] -> skip ; // same as [ \n\r]
|
||||
|
||||
ID : [a-zA-Z] [a-zA-Z0-9]* ; // match usual identifier spec
|
||||
|
||||
DASHBRACK : [\-\]]+ ; // match - or ] one or more times
|
||||
</pre>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>’x’..’y’</td><td>
|
||||
Match any single character between range x and y, inclusively. E.g., ’a’..’z’. ’a’..’z’ is identical to [a-z].</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>T</td><td>
|
||||
Invoke lexer rule T; recursion is allowed in general, but not left recursion. T can be a regular token or fragment rule.
|
||||
|
||||
<pre>
|
||||
ID : LETTER (LETTER|'0'..'9')* ;
|
||||
|
||||
fragment
|
||||
LETTER : [a-zA-Z\u0080-\u00FF_] ;
|
||||
</pre>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>.</td><td>
|
||||
The dot is a single-character wildcard that matches any single character. Example:
|
||||
<pre>
|
||||
ESC : '\\' . ; // match any escaped \x character
|
||||
</pre>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>{«action»}</td><td>
|
||||
Lexer actions can appear anywhere as of 4.2, not just at the end of the outermost alternative. The lexer executes the actions at the appropriate input position, according to the placement of the action within the rule. To execute a single action for a role that has multiple alternatives, you can enclose the alts in parentheses and put the action afterwards:
|
||||
|
||||
<pre>
|
||||
END : ('endif'|'end') {System.out.println("found an end");} ;
|
||||
</pre>
|
||||
|
||||
<p>The action conforms to the syntax of the target language. ANTLR copies the action’s contents into the generated code verbatim; there is no translation of expressions like $x.y as there is in parser actions.</p>
|
||||
<p>
|
||||
Only actions within the outermost token rule are executed. In other words, if STRING calls ESC_CHAR and ESC_CHAR has an action, that action is not executed when the lexer starts matching in STRING.</p></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>{«p»}?</td><td>
|
||||
Evaluate semantic predicate «p». If «p» evaluates to false at runtime, the surrounding rule becomes “invisible” (nonviable). Expression «p» conforms to the target language syntax. While semantic predicates can appear anywhere within a lexer rule, it is most efficient to have them at the end of the rule. The one caveat is that semantic predicates must precede lexer actions. See Predicates in Lexer Rules.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>~x</td><td>
|
||||
Match any single character not in the set described by x. Set x can be a single character literal, a range, or a subrule set like ~(’x’|’y’|’z’) or ~[xyz]. Here is a rule that uses ~ to match any character other than characters using ~[\r\n]*:
|
||||
<pre>
|
||||
COMMENT : '#' ~[\r\n]* '\r'? '\n' -> skip ;
|
||||
</pre>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Just as with parser rules, lexer rules allow subrules in parentheses and EBNF operators: `?`, `*`, `+`. The `COMMENT` rule illustrates the `*` and `?` operators. A common use of `+` is `[0-9]+` to match integers. Lexer subrules can also use the nongreedy `?` suffix on those EBNF operators.
|
||||
|
||||
## Recursive Lexer Rules
|
||||
|
||||
ANTLR lexer rules can be recursive, unlike most lexical grammar tools. This comes in really handy when you want to match nested tokens like nested action blocks: `{...{...}...}`.
|
||||
|
||||
```
|
||||
lexer grammar Recur;
|
||||
|
||||
ACTION : '{' ( ACTION | ~[{}] )* '}' ;
|
||||
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
```
|
||||
|
||||
## Redundant String Literals
|
||||
|
||||
Be careful that you don’t specify the same string literal on the right-hand side of multiple lexer rules. Such literals are ambiguous and could match multiple token types. ANTLR makes this literal unavailable to the parser. The same is true for rules across modes. For example, the following lexer grammar defines two tokens with the same character sequence:
|
||||
|
||||
```
|
||||
lexer grammar L;
|
||||
AND : '&' ;
|
||||
mode STR;
|
||||
MASK : '&' ;
|
||||
```
|
||||
|
||||
A parser grammar cannot reference literal ’&’, but it can reference the name of the tokens:
|
||||
|
||||
```
|
||||
parser grammar P;
|
||||
options { tokenVocab=L; }
|
||||
a : '&' // results in a tool error: no such token
|
||||
AND // no problem
|
||||
MASK // no problem
|
||||
;
|
||||
```
|
||||
|
||||
Here’s a build and test sequence:
|
||||
|
||||
```bash
|
||||
$ antlr4 L.g4 # yields L.tokens file needed by tokenVocab option in P.g4
|
||||
$ antlr4 P.g4
|
||||
error(126): P.g4:3:4: cannot create implicit token for string literal '&' in non-combined grammar
|
||||
```
|
||||
|
||||
## Lexer Rule Actions
|
||||
|
||||
An ANTLR lexer creates a Token object after matching a lexical rule. Each request for a token starts in `Lexer.nextToken`, which calls `emit` once it has identified a token. `emit` collects information from the current state of the lexer to build the token. It accesses fields `_type`, `_text`, `_channel`, `_tokenStartCharIndex`, `_tokenStartLine`, and `_tokenStartCharPositionInLine`. You can set the state of these with the various setter methods such as `setType`. For example, the following rule turns `enum` into an identifier if `enumIsKeyword` is false.
|
||||
|
||||
```
|
||||
ENUM : 'enum' {if (!enumIsKeyword) setType(Identifier);} ;
|
||||
```
|
||||
|
||||
ANTLR does no special `$x` attribute translations in lexer actions (unlike v3).
|
||||
|
||||
There can be at most a single action for a lexical rule, regardless of how many alternatives there are in that rule.
|
||||
|
||||
## Lexer Commands
|
||||
|
||||
To avoid tying a grammar to a particular target language, ANTLR supports lexer commands. Unlike arbitrary embedded actions, these commands follow specific syntax and are limited to a few common commands. Lexer commands appear at the end of the outermost alternative of a lexer rule definition. Like arbitrary actions, there can only be one per token rule. A lexer command consists of the `->` operator followed by one or more command names that can optionally take parameters:
|
||||
|
||||
```
|
||||
TokenName : «alternative» -> command-name
|
||||
TokenName : «alternative» -> command-name («identifier or integer»)
|
||||
```
|
||||
|
||||
An alternative can have more than one command separated by commas. Here are the valid command names:
|
||||
|
||||
* skip
|
||||
* more
|
||||
* popMode
|
||||
* mode( x )
|
||||
* pushMode( x )
|
||||
* type( x )
|
||||
* channel( x )
|
||||
|
||||
See the book source code for usage, some examples of which are shown here:
|
||||
|
||||
### skip
|
||||
|
||||
A 'skip' command tells the lexer to get another token and throw out the current text.
|
||||
|
||||
```
|
||||
ID : [a-zA-Z]+ ; // match identifiers
|
||||
INT : [0-9]+ ; // match integers
|
||||
NEWLINE:'\r'? '\n' ; // return newlines to parser (is end-statement signal)
|
||||
WS : [ \t]+ -> skip ; // toss out whitespace
|
||||
```
|
||||
|
||||
### mode(), pushMode(), popMode, and more
|
||||
|
||||
The mode commands alter the mode stack and hence the mode of the lexer. The 'more' command forces the lexer to get another token but without throwing out the current text. The token type will be that of the "final" rule matched (i.e., the one without a more or skip command).
|
||||
|
||||
```
|
||||
// Default "mode": Everything OUTSIDE of a tag
|
||||
COMMENT : '<!--' .*? '-->' ;
|
||||
CDATA : '<![CDATA[' .*? ']]>' ;OPEN : '<' -> pushMode(INSIDE) ;
|
||||
...
|
||||
XMLDeclOpen : '<?xml' S -> pushMode(INSIDE) ;
|
||||
SPECIAL_OPEN: '<?' Name -> more, pushMode(PROC_INSTR) ;
|
||||
// ----------------- Everything INSIDE of a tag ---------------------
|
||||
mode INSIDE;
|
||||
CLOSE : '>' -> popMode ;
|
||||
SPECIAL_CLOSE: '?>' -> popMode ; // close <?xml...?>
|
||||
SLASH_CLOSE : '/>' -> popMode ;
|
||||
```
|
||||
|
||||
Also check out:
|
||||
|
||||
```
|
||||
lexer grammar Strings;
|
||||
LQUOTE : '"' -> more, mode(STR) ;
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
mode STR;
|
||||
STRING : '"' -> mode(DEFAULT_MODE) ; // token we want parser to see
|
||||
TEXT : . -> more ; // collect more text for string
|
||||
```
|
||||
|
||||
Popping the bottom layer of a mode stack will result in an exception. Switching modes with `mode` changes the current stack top. More than one `more` is the same as just one and the position does not matter.
|
||||
|
||||
### type()
|
||||
|
||||
```
|
||||
lexer grammar SetType;
|
||||
tokens { STRING }
|
||||
DOUBLE : '"' .*? '"' -> type(STRING) ;
|
||||
SINGLE : '\'' .*? '\'' -> type(STRING) ;
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
```
|
||||
|
||||
For multiple 'type()' commands, only the rightmost has an effect.
|
||||
|
||||
### channel()
|
||||
|
||||
```
|
||||
BLOCK_COMMENT
|
||||
: '/*' .*? '*/' -> channel(HIDDEN)
|
||||
;
|
||||
LINE_COMMENT
|
||||
: '//' ~[\r\n]* -> channel(HIDDEN)
|
||||
;
|
||||
...
|
||||
// ----------
|
||||
// Whitespace
|
||||
//
|
||||
// Characters and character constructs that are of no import
|
||||
// to the parser and are used to make the grammar easier to read
|
||||
// for humans.
|
||||
//
|
||||
WS : [ \t\r\n\f]+ -> channel(HIDDEN) ;
|
||||
```
|
||||
|
||||
As of 4.5, you can also define channel names like enumerations with the following construct above the lexer rules:
|
||||
|
||||
```
|
||||
channels { WSCHANNEL, MYHIDDEN }
|
||||
```
|
|
@ -0,0 +1,110 @@
|
|||
# Grammar Lexicon
|
||||
|
||||
The lexicon of ANTLR is familiar to most programmers because it follows the syntax of C and its derivatives with some extensions for grammatical descriptions.
|
||||
|
||||
## Comments
|
||||
|
||||
There are single-line, multiline, and Javadoc-style comments:
|
||||
|
||||
```
|
||||
/** This grammar is an example illustrating the three kinds
|
||||
* of comments.
|
||||
*/
|
||||
grammar T;
|
||||
/* a multi-line
|
||||
comment
|
||||
*/
|
||||
|
||||
/** This rule matches a declarator for my language */
|
||||
decl : ID ; // match a variable name
|
||||
```
|
||||
|
||||
The Javadoc comments are hidden from the parser and are ignored at the moment. They are intended to be used only at the start of the grammar and any rule.
|
||||
|
||||
## Identifiers
|
||||
|
||||
Token names always start with a capital letter and so do lexer rules as defined by Java’s `Character.isUpperCase` method. Parser rule names always start with a lowercase letter (those that fail `Character.isUpperCase`). The initial character can be followed by uppercase and lowercase letters, digits, and underscores. Here are some sample names:
|
||||
|
||||
```
|
||||
ID, LPAREN, RIGHT_CURLY // token names/rules
|
||||
expr, simpleDeclarator, d2, header_file // rule names
|
||||
```
|
||||
|
||||
Like Java, ANTLR accepts Unicode characters in ANTLR names:
|
||||
|
||||
<img src=images/nonascii.png width=100>
|
||||
|
||||
To support Unicode parser and lexer rule names, ANTLR uses the following rule:
|
||||
|
||||
```
|
||||
ID : a=NameStartChar NameChar*
|
||||
{
|
||||
if ( Character.isUpperCase(getText().charAt(0)) ) setType(TOKEN_REF);
|
||||
else setType(RULE_REF);
|
||||
}
|
||||
;
|
||||
```
|
||||
|
||||
Rule `NameChar` identifies the valid identifier characters:
|
||||
|
||||
```
|
||||
fragment
|
||||
NameChar
|
||||
: NameStartChar
|
||||
| '0'..'9'
|
||||
| '_'
|
||||
| '\u00B7'
|
||||
| '\u0300'..'\u036F'
|
||||
| '\u203F'..'\u2040'
|
||||
;
|
||||
fragment
|
||||
NameStartChar
|
||||
: 'A'..'Z' | 'a'..'z'
|
||||
| '\u00C0'..'\u00D6'
|
||||
| '\u00D8'..'\u00F6'
|
||||
| '\u00F8'..'\u02FF'
|
||||
| '\u0370'..'\u037D'
|
||||
| '\u037F'..'\u1FFF'
|
||||
| '\u200C'..'\u200D'
|
||||
| '\u2070'..'\u218F'
|
||||
| '\u2C00'..'\u2FEF'
|
||||
| '\u3001'..'\uD7FF'
|
||||
| '\uF900'..'\uFDCF'
|
||||
| '\uFDF0'..'\uFFFD'
|
||||
;
|
||||
```
|
||||
|
||||
Rule `NameStartChar` is the list of characters that can start an identifier (rule, token, or label name):
|
||||
These more or less correspond to `isJavaIdentifierPart` and `isJavaIdentifierStart` in Java’s Character class. Make sure to use the `-encoding` option on the ANTLR tool if your grammar file is not in UTF-8 format, so that ANTLR reads characters properly.
|
||||
|
||||
## Literals
|
||||
|
||||
ANTLR does not distinguish between character and string literals as most languages do. All literal strings one or more characters in length are enclosed in single quotes such as `’;’`, `’if’`, `’>=’`, and `’\’'` (refers to the one-character string containing the single quote character). Literals never contain regular expressions.
|
||||
|
||||
Literals can contain Unicode escape sequences of the form `\uXXXX`, where XXXX is the hexadecimal Unicode character value. For example, `’\u00E8’` is the French letter with a grave accent: `’è’`. ANTLR also understands the usual special escape sequences: `’\n’` (newline), `’\r’` (carriage return), `’\t’` (tab), `’\b’` (backspace), and `’\f’` (form feed). You can use Unicode characters directly within literals or use the Unicode escape sequences:
|
||||
|
||||
```
|
||||
grammar Foreign;
|
||||
a : '外' ;
|
||||
```
|
||||
|
||||
The recognizers that ANTLR generates assume a character vocabulary containing all Unicode characters. The input file encoding assumed by the runtime library depends on the target language. For the Java target, the runtime library assumes files are in UTF-8. Using the constructors, you can specify a different encoding. See, for example, ANTLR’s `ANTLRFileStream`.
|
||||
|
||||
## Actions
|
||||
|
||||
Actions are code blocks written in the target language. You can use actions in a number of places within a grammar, but the syntax is always the same: arbitrary text surrounded by curly braces. You don’t need to escape a closing curly character if it’s in a string or comment: `"}"` or `/*}*/`. If the curlies are balanced, you also don’t need to escape }: `{...}`. Otherwise, escape extra curlies with a backslash: `\{` or `\}`. The action text should conform to the target language as specified with thelanguage option.
|
||||
|
||||
Embedded code can appear in: `@header` and `@members` named actions, parser and lexer rules, exception catching specifications, attribute sections for parser rules (return values, arguments, and locals), and some rule element options (currently predicates).
|
||||
|
||||
The only interpretation ANTLR does inside actions relates to grammar attributes; see [Token Attributes](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) and Chapter 10, [Attributes and Actions](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). Actions embedded within lexer rules are emitted without any interpretation or translation into generated lexers.
|
||||
|
||||
## Keywords
|
||||
|
||||
Here’s a list of the reserved words in ANTLR grammars:
|
||||
|
||||
```
|
||||
import, fragment, lexer, parser, grammar, returns,
|
||||
locals, throws, catch, finally, mode, options, tokens
|
||||
```
|
||||
|
||||
Also, although it is not a keyword, do not use the word `rule` as a rule name. Further, do not use any keyword of the target language as a token, label, or rule name. For example, rule `if` would result in a generated function called `if`. That would not compile obviously.
|
|
@ -0,0 +1,38 @@
|
|||
# Parse Tree Listeners
|
||||
|
||||
*Partially taken from publically visible [excerpt from ANTLR 4 book](http://media.pragprog.com/titles/tpantlr2/picture.pdf)*
|
||||
|
||||
By default, ANTLR-generated parsers build a data structure called a parse tree or syntax tree that records how the parser recognized the structure of the input sentence and component phrases.
|
||||
|
||||
<img src=images/process.png>
|
||||
|
||||
The interior nodes of the parse tree are phrase names that group and identify their children. The root node is the most abstract phrase name, in this case `stat` (short for statement). The leaves of a parse tree are always the input tokens. Parse trees sit between a language recognizer and an interpreter or translator implementation. They are extremely effective data structures because they contain all of the input and complete knowledge of how the parser grouped the symbols into phrases. Better yet, they are easy to understand and the parser generates them automatically (unless you turn them off with `parser.setBuildParseTree(false)`).
|
||||
|
||||
Because we specify phrase structure with a set of rules, parse tree subtree roots correspond to grammar rule names. ANTLR has a ParseTreeWalker that knows how to walk these parse trees and trigger events in listener implementation objects that you can create. The ANTLR tool generates listener interfaces for you also, unless you turn that off with a commandline option. You can also have it generate visitors. For example from a Java.g4 grammar, ANTLR generates:
|
||||
|
||||
```java
|
||||
public interface JavaListener extends ParseTreeListener<Token> {
|
||||
void enterClassDeclaration(JavaParser.ClassDeclarationContext ctx);
|
||||
void exitClassDeclaration(JavaParser.ClassDeclarationContext ctx);
|
||||
void enterMethodDeclaration(JavaParser.MethodDeclarationContext ctx);
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
where there is an enter and exit method for each rule in the parser grammar. ANTLR also generates a base listener with the fall empty implementations of all listener interface methods, in this case called JavaBaseListener. You can build your listener by subclassing this base and overriding the methods of interest.
|
||||
|
||||
Assuming you've created a listener object called `MyListener`, here is how to call the Java parser and walk the parse tree:
|
||||
|
||||
```java
|
||||
JavaLexer lexer = new JavaLexer(input);
|
||||
CommonTokenStream tokens = new CommonTokenStream(lexer);
|
||||
JavaParser parser = new JavaParser(tokens);
|
||||
JavaParser.CompilationUnitContext tree = parser.compilationUnit(); // parse a compilationUnit
|
||||
|
||||
MyListener extractor = new MyListener(parser);
|
||||
ParseTreeWalker.DEFAULT.walk(extractor, tree); // initiate walk of tree with listener in use of default walker
|
||||
```
|
||||
|
||||
Listeners and visitors are great because they keep application-specific code out of grammars, making grammars easier to read and preventing them from getting entangled with a particular application.
|
||||
|
||||
See the book for more information on listeners and to learn how to use visitors. (The biggest difference between the listener and visitor mechanisms is that listener methods are called independently by an ANTLR-provided walker object, whereas visitor methods must walk their children with explicit visit calls. Forgetting to invoke visitor methods on a node’s children, means those subtrees don’t get visited.)
|
|
@ -0,0 +1,101 @@
|
|||
# Options
|
||||
|
||||
There are a number of options that you can specify at the grammar and rule element level. (There are currently no rule options.) These change how ANTLR generates code from your grammar. The general syntax is:
|
||||
|
||||
```
|
||||
options { name1=value1; ... nameN=valueN; } // ANTLR not target language syntax
|
||||
```
|
||||
|
||||
where a value can be an identifier, a qualified identifier (for example, a.b.c), a string, a multi-line string in curly braces `{...}`, and an integer.
|
||||
|
||||
## Grammar Options
|
||||
|
||||
All grammars can use the following options. In combined grammars, all options except language pertain only to the generated parser. Options may be set either within the grammar file using the options syntax (described above) or when invoking ANTLR on the command line, using the `-D` option. (see Section 15.9, [ANTLR Tool Command Line Options](tool-options.md).) The following examples demonstrate both mechanisms; note that `-D` overrides options within the grammar.
|
||||
|
||||
* `superClass`. Set the superclass of the generated parser or lexer. For combined grammars, it sets the superclass of the parser.
|
||||
```
|
||||
$ cat Hi.g4
|
||||
grammar Hi;
|
||||
a : 'hi' ;
|
||||
$ antlr4 -DsuperClass=XX Hi.g4
|
||||
$ grep 'public class' HiParser.java
|
||||
public class HiParser extends XX {
|
||||
$ grep 'public class' HiLexer.java
|
||||
public class HiLexer extends Lexer {
|
||||
```
|
||||
* `language` Generate code in the indicated language, if ANTLR is able to do so. Otherwise, you will see an error message like this:
|
||||
```
|
||||
$ antlr4 -Dlanguage=C MyGrammar.g4
|
||||
error(31): ANTLR cannot generate C code as of version 4.0
|
||||
```
|
||||
* `tokenVocab` ANTLR assigns token type numbers to the tokens as it encounters them in a file. To use different token type values, such as with a separate lexer, use this option to have ANTLR pull in the <fileextension>tokens</fileextension> file. ANTLR generates a <fileextension>tokens</fileextension> file from each grammar.
|
||||
```
|
||||
$ cat SomeLexer.g4
|
||||
lexer grammar SomeLexer;
|
||||
ID : [a-z]+ ;
|
||||
$ cat R.g4
|
||||
parser grammar R;
|
||||
options {tokenVocab=SomeLexer;}
|
||||
tokens {A,B,C} // normally, these would be token types 1, 2, 3
|
||||
a : ID ;
|
||||
$ antlr4 SomeLexer.g4
|
||||
$ cat SomeLexer.tokens
|
||||
ID=1
|
||||
$ antlr4 R.g4
|
||||
$ cat R.tokens
|
||||
A=2
|
||||
B=3
|
||||
C=4
|
||||
ID=1
|
||||
```
|
||||
* `TokenLabelType` ANTLR normally uses type <class>Token</class> when it generates variables referencing tokens. If you have passed a <class>TokenFactory</class> to your parser and lexer so that they create custom tokens, you should set this option to your specific type. This ensures that the context objects know your type for fields and method return values.
|
||||
```
|
||||
$ cat T2.g4
|
||||
grammar T2;
|
||||
options {TokenLabelType=MyToken;}
|
||||
a : x=ID ;
|
||||
$ antlr4 T2.g4
|
||||
$ grep MyToken T2Parser.java
|
||||
public MyToken x;
|
||||
```
|
||||
* `contextSuperClass`. Specify the super class of parse tree internal nodes. Default is `ParserRuleContext`. Should derive from ultimately `RuleContext` at minimum.
|
||||
Java target can use `contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum` for convenience. It adds a backing field for `altNumber`, the alt matched for the associated rule node.
|
||||
|
||||
## Rule Options
|
||||
|
||||
There are currently no valid rule-level options, but the tool still supports the following syntax for future use:
|
||||
|
||||
```
|
||||
rulename
|
||||
options {...}
|
||||
: ...
|
||||
;
|
||||
```
|
||||
|
||||
## Rule Element Options
|
||||
|
||||
Token options have the form `T<name=value>` as we saw in Section 5.4, [Dealing with Precedence, Left Recursion, and Associativity](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only token option is `assoc`, and it accepts values `left` and `right`. Here’s a sample grammar with a left-recursive expression rule that specifies a token option on the `^` exponent operator token:
|
||||
|
||||
```
|
||||
grammar ExprLR;
|
||||
|
||||
expr : expr '^'<assoc=right> expr
|
||||
| expr '*' expr // match subexpressions joined with '*' operator
|
||||
| expr '+' expr // match subexpressions joined with '+' operator
|
||||
| INT // matches simple integer atom
|
||||
;
|
||||
|
||||
INT : '0'..'9'+ ;
|
||||
WS : [ \n]+ -> skip ;
|
||||
```
|
||||
|
||||
Semantic predicates also accept an option, per [Catching failed semantic predicates](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference). The only valid option is the `fail` option, which takes either a string literal in double-quotes or an action that evaluates to a string. The string literal or string result from the action should be the message to emit upon predicate failure.
|
||||
|
||||
```
|
||||
ints[int max]
|
||||
locals [int i=1]
|
||||
: INT ( ',' {$i++;} {$i<=$max}?<fail={"exceeded max "+$max}> INT )*
|
||||
;
|
||||
```
|
||||
|
||||
The action can execute a function as well as compute a string when a predicate fails: `{...}?<fail={doSomethingAndReturnAString()}>`
|
|
@ -0,0 +1,480 @@
|
|||
# Parser Rules
|
||||
|
||||
Parsers consist of a set of parser rules either in a parser or a combined grammar. A Java application launches a parser by invoking the rule function, generated by ANTLR, associated with the desired start rule. The most basic rule is just a rule name followed by a single alternative terminated with a semicolon:
|
||||
|
||||
```
|
||||
/** Javadoc comment can precede rule */
|
||||
retstat : 'return' expr ';' ;
|
||||
```
|
||||
|
||||
Rules can also have alternatives separated by the |
|
||||
|
||||
```
|
||||
operator:
|
||||
stat: retstat
|
||||
| 'break' ';'
|
||||
| 'continue' ';'
|
||||
;
|
||||
```
|
||||
|
||||
Alternatives are either a list of rule elements or empty. For example, here’s a rule with an empty alternative that makes the entire rule optional:
|
||||
|
||||
```
|
||||
superClass
|
||||
: 'extends' ID
|
||||
| // empty means other alternative(s) are optional
|
||||
;
|
||||
```
|
||||
|
||||
## Alternative Labels
|
||||
|
||||
As we saw in Section 7.4, Labeling Rule Alternatives for Precise Event Methods, we can get more precise parse-tree listener events by labeling the outermost alternatives of a rule using the # operator. All alternatives within a rule must be labeled, or none of them. Here are two rules with labeled alternatives.
|
||||
|
||||
```
|
||||
grammar T;
|
||||
stat: 'return' e ';' # Return
|
||||
| 'break' ';' # Break
|
||||
;
|
||||
e : e '*' e # Mult
|
||||
| e '+' e # Add
|
||||
| INT # Int
|
||||
;
|
||||
```
|
||||
|
||||
Alternative labels do not have to be at the end of the line and there does not have to be a space after the # symbol.
|
||||
ANTLR generates a rule context class definition for each label. For example, here is the listener that ANTLR generates:
|
||||
|
||||
```java
|
||||
public interface AListener extends ParseTreeListener {
|
||||
void enterReturn(AParser.ReturnContext ctx);
|
||||
void exitReturn(AParser.ReturnContext ctx);
|
||||
void enterBreak(AParser.BreakContext ctx);
|
||||
void exitBreak(AParser.BreakContext ctx);
|
||||
void enterMult(AParser.MultContext ctx);
|
||||
void exitMult(AParser.MultContext ctx);
|
||||
void enterAdd(AParser.AddContext ctx);
|
||||
void exitAdd(AParser.AddContext ctx);
|
||||
void enterInt(AParser.IntContext ctx);
|
||||
void exitInt(AParser.IntContext ctx);
|
||||
}
|
||||
```
|
||||
|
||||
There are enter and exit methods associated with each labeled alternative. The parameters to those methods are specific to alternatives.
|
||||
|
||||
You can reuse the same label on multiple alternatives to indicate that the parse tree walker should trigger the same event for those alternatives. For example, here’s a variation on rule e from grammar A above:
|
||||
|
||||
```
|
||||
e : e '*' e # BinaryOp
|
||||
| e '+' e # BinaryOp
|
||||
| INT # Int
|
||||
;
|
||||
```
|
||||
|
||||
ANTLR would generate the following listener methods for e:
|
||||
|
||||
```java
|
||||
void enterBinaryOp(AParser.BinaryOpContext ctx);
|
||||
void exitBinaryOp(AParser.BinaryOpContext ctx);
|
||||
void enterInt(AParser.IntContext ctx);
|
||||
void exitInt(AParser.IntContext ctx);
|
||||
```
|
||||
|
||||
ANTLR gives errors if an alternative name conflicts with a rule name. Here’s another rewrite of rule e where two
|
||||
alternative labels conflict with rule names:
|
||||
|
||||
```
|
||||
e : e '*' e # e
|
||||
| e '+' e # Stat
|
||||
| INT # Int
|
||||
;
|
||||
```
|
||||
|
||||
The context objects generated from rule names and labels get capitalized and so label Stat conflicts with rule stat:
|
||||
|
||||
```bash
|
||||
$ antlr4 A.g4
|
||||
error(124): A.g4:5:23: rule alt label e conflicts with rule e
|
||||
error(124): A.g4:6:23: rule alt label Stat conflicts with rule stat
|
||||
warning(125): A.g4:2:13: implicit definition of token INT in parser
|
||||
```
|
||||
|
||||
## Rule Context Objects
|
||||
|
||||
ANTLR generates methods to access the rule context objects (parse tree nodes) associated with each rule reference. For rules with a single rule reference, ANTLR generates a method with no arguments. Consider the following rule.
|
||||
|
||||
```
|
||||
inc : e '++' ;
|
||||
```
|
||||
|
||||
ANTLR generates this context class:
|
||||
|
||||
```java
|
||||
public static class IncContext extends ParserRuleContext {
|
||||
public EContext e() { ... } // return context object associated with e
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
ANTLR also provide support to access context objects when there is more than a single reference to a rule:
|
||||
|
||||
```
|
||||
field : e '.' e ;
|
||||
```
|
||||
|
||||
ANTLR generates a method with an index to access the ith element as well as a method to get context for all references to that rule:
|
||||
|
||||
```java
|
||||
public static class FieldContext extends ParserRuleContext {
|
||||
public EContext e(int i) { ... } // get ith e context
|
||||
public List<EContext> e() { ... } // return ALL e contexts
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
If we had another rule, s, that references field, an embedded action could access the list of e rule matches performed by field:
|
||||
|
||||
```
|
||||
s : field
|
||||
{
|
||||
List<EContext> x = $field.ctx.e();
|
||||
...
|
||||
}
|
||||
;
|
||||
```
|
||||
|
||||
A listener or visitor could do the same thing. Given a pointer to a FieldContext object, f, f.e() would return List<EContext>.
|
||||
|
||||
## Rule Element Labels
|
||||
|
||||
You can label rule elements using the = operator to add fields to the rule context objects:
|
||||
|
||||
```
|
||||
stat: 'return' value=e ';' # Return
|
||||
| 'break' ';' # Break
|
||||
;
|
||||
```
|
||||
|
||||
Here value is the label for the return value of rule e, which is defined elsewhere.
|
||||
Labels become fields in the appropriate parse tree node class. In this case, label value becomes a field in ReturnContext because of the Return alternative label:
|
||||
|
||||
```java
|
||||
public static class ReturnContext extends StatContext {
|
||||
public EContext value;
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
It’s often handy to track a number of tokens, which you can do with the += “list label” operator. For example, the following rule creates a list of the Token objects matched for a simple array construct:
|
||||
|
||||
```
|
||||
array : '{' el+=INT (',' el+=INT)* '}' ;
|
||||
```
|
||||
|
||||
ANTLR generates a List field in the appropriate rule context class:
|
||||
|
||||
```
|
||||
public static class ArrayContext extends ParserRuleContext {
|
||||
public List<Token> el = new ArrayList<Token>();
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
These list labels also work for rule references:
|
||||
|
||||
```
|
||||
elist : exprs+=e (',' exprs+=e)* ;
|
||||
```
|
||||
|
||||
ANTLR generates a field holding the list of context objects:
|
||||
|
||||
```
|
||||
public static class ElistContext extends ParserRuleContext {
|
||||
public List<EContext> exprs = new ArrayList<EContext>();
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Rule Elements
|
||||
|
||||
Rule elements specify what the parser should do at a given moment just like statements in a programming language. The elements can be rule, token, string literal like expression, ID, and ’return’. Here’s a complete list of the rule elements (we’ll look at actions and predicates in more detail later):
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Syntax</th><th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>T</td><td>
|
||||
Match token T at the current input position. Tokens always begin with a capital letter.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>’literal’</td><td>
|
||||
Match the string literal at the current input position. A string literal is simply a token with a fixed string.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>r</td><td>
|
||||
Match rule r at current input position, which amounts to invoking the rule just like a function call. Parser rule names always begin with a lowercase letter.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>r [«args»]</td><td>
|
||||
Match rule r at current input position, passing in a list of arguments just like a function call. The arguments inside the square brackets are in the syntax of the target language and are usually a comma-separated list of expressions.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{«action»}</td><td>
|
||||
Execute an action immediately after the preceding alternative element and immediately before the following alternative element. The action conforms to the syntax of the target language. ANTLR copies the action code to the generated class verbatim, except for substituting attribute and token references such as $x and $x.y.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{«p»}?</td><td>
|
||||
Evaluate semantic predicate «p». Do not continue parsing past a predicate if «p» evaluates to false at runtime. Predicates encountered during prediction, when ANTLR distinguishes between alternatives, enable or disable the alternative(s) surrounding the predicate(s).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>.</td><td>
|
||||
Match any single token except for the end of file token. The “dot” operator is called the wildcard.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
When you want to match everything but a particular token or set of tokens, use the `~` “not” operator. This operator is rarely used in the parser but is available. `~INT` matches any token except the `INT` token. `~’,’` matches any token except the comma. `~(INT|ID)` matches any token except an INT or an ID.
|
||||
|
||||
Token, string literal, and semantic predicate rule elements can take options. See Rule Element Options.
|
||||
|
||||
## Subrules
|
||||
|
||||
A rule can contain alternative blocks called subrules (as allowed in Extended BNF Notation: EBNF). A subrule is like a rule that lacks a name and is enclosed in parentheses. Subrules can have one or more alternatives inside the parentheses. Subrules cannot define attributes with locals and returns like rules can. There are four kinds of subrules (x, y, and z represent grammar fragments):
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Syntax</th><th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><img src=images/xyz.png></td><td>(x|y|z).
|
||||
Match any alternative within the subrule exactly once. Example:
|
||||
<br>
|
||||
<tt>
|
||||
returnType : (type | 'void') ;
|
||||
</tt>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><img src=images/xyz_opt.png></td><td>(x|y|z)?
|
||||
Match nothing or any alternative within subrule. Example:
|
||||
<br>
|
||||
<tt>
|
||||
classDeclaration
|
||||
: 'class' ID (typeParameters)? ('extends' type)?
|
||||
('implements' typeList)?
|
||||
classBody
|
||||
;
|
||||
</tt>
|
||||
<tr>
|
||||
<td><img src=images/xyz_star.png></td><td>(x|y|z)*
|
||||
Match an alternative within subrule zero or more times. Example:
|
||||
<br>
|
||||
<tt>
|
||||
annotationName : ID ('.' ID)* ;
|
||||
</tt>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><img src=images/xyz_plus.png></td><td>(x|y|z)+
|
||||
Match an alternative within subrule one or more times. Example:
|
||||
<br>
|
||||
<tt>
|
||||
annotations : (annotation)+ ;
|
||||
</tt>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
You can suffix the `?`, `*`, and `+` subrule operators with the nongreedy operator, which is also a question mark: `??`, `*?`, and `+?`. See Section 15.6, Wildcard Operator and Nongreedy Subrules.
|
||||
|
||||
As a shorthand, you can omit the parentheses for subrules composed of a single alternative with a single rule element reference. For example, `annotation+` is the same as `(annotation)+` and `ID+` is the same as `(ID)+`. Labels also work with the shorthand. `ids+=INT+` make a list of `INT` token objects.
|
||||
|
||||
## Catching Exceptions
|
||||
|
||||
When a syntax error occurs within a rule, ANTLR catches the exception, reports the error, attempts to recover (possibly by consuming more tokens), and then returns from the rule. Every rule is wrapped in a `try/catch/finally` statement:
|
||||
|
||||
```
|
||||
void r() throws RecognitionException {
|
||||
try {
|
||||
rule-body
|
||||
}
|
||||
catch (RecognitionException re) {
|
||||
_errHandler.reportError(this, re);
|
||||
_errHandler.recover(this, re);
|
||||
}
|
||||
finally {
|
||||
exitRule();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In Section 9.5, Altering ANTLR’s Error Handling Strategy, we saw how to use a strategy object to alter ANTLR’s error handling. Replacing the strategy changes the strategy for all rules, however. To alter the exception handling for a single rule, specify an exception after the rule definition:
|
||||
|
||||
```
|
||||
r : ...
|
||||
;
|
||||
catch[RecognitionException e] { throw e; }
|
||||
```
|
||||
|
||||
That example shows how to avoid default error reporting and recovery. r rethrows the exception, which is useful when it makes more sense for a higher-level rule to report the error. Specifying any exception clause, prevents ANTLR from generating a clause to handle `RecognitionException`.
|
||||
|
||||
You can specify other exceptions as well:
|
||||
|
||||
```
|
||||
r : ...
|
||||
;
|
||||
catch[FailedPredicateException fpe] { ... }
|
||||
catch[RecognitionException e] { ... }
|
||||
```
|
||||
|
||||
The code snippets inside curly braces and the exception “argument” actions must be written in the target language; Java, in this case.
|
||||
When you need to execute an action even if an exception occurs, put it into the `finally` clause:
|
||||
|
||||
```
|
||||
r : ...
|
||||
;
|
||||
// catch blocks go first
|
||||
finally { System.out.println("exit rule r"); }
|
||||
```
|
||||
|
||||
The finally clause executes right before the rule triggers `exitRule` before returning. If you want to execute an action after the rule finishes matching the alternatives but before it does its cleanup work, use an `after` action.
|
||||
|
||||
Here’s a complete list of exceptions:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Exception name</th><th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>RecognitionException</td><td>
|
||||
The superclass of all exceptions thrown by an ANTLR-generated recognizer. It’s a subclass of RuntimeException to avoid the hassles of checked exceptions. This exception records where the recognizer (lexer or parser) was in the input, where it was in the ATN (internal graph data structure representing the grammar), the rule invocation stack, and what kind of problem occurred.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>NoViableAltException</td><td>
|
||||
Indicates that the parser could not decide which of two or more paths to take by looking at the remaining input. This exception tracks the starting token of the offending input and also knows where the parser was in the various paths when the error occurred.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>LexerNoViableAltException</td><td>
|
||||
The equivalent of NoViableAltException but for lexers only.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>InputMismatchException</td><td>
|
||||
The current input Token does not match what the parser expected.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>FailedPredicateException</td><td>
|
||||
A semantic predicate that evaluates to false during prediction renders the surrounding alternative nonviable. Prediction occurs when a rule is predicting which alternative to take. If all viable paths disappear, parser will throw NoViableAltException. This predicate gets thrown by the parser when a semantic predicate evaluates to false outside of prediction, during the normal parsing process of matching tokens and calling rules.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Rule Attribute Definitions
|
||||
|
||||
There are a number of action-related syntax elements associated with rules to be aware of. Rules can have arguments, return values, and local variables just like functions in a programming language. (Rules can have actions embedded among the rule elements, as we’ll see in Section 15.4, Actions and Attributes.) ANTLR collects all of the variables you define and stores them in the rule context object. These variables are usually called attributes. Here’s the general syntax showing all possible attribute definition locations:
|
||||
|
||||
```
|
||||
rulename[args] returns [retvals] locals [localvars] : ... ;
|
||||
```
|
||||
|
||||
The attributes defined within those [...] can be used like any other variable. Here is a sample rule that copies parameters to return values:
|
||||
|
||||
```
|
||||
// Return the argument plus the integer value of the INT token
|
||||
add[int x] returns [int result] : '+=' INT {$result = $x + $INT.int;} ;
|
||||
```
|
||||
|
||||
As with the grammar level, you can specify rule-level named actions. For rules, the valid names are init and after. As the names imply, parsers execute init actions immediately before trying to match the associated rule and execute after actions immediately after matching the rule. ANTLR after actions do not execute as part of the finally code block of the generated rule function. Use the ANTLR finally action to place code in the generated rule function finally code block.
|
||||
The actions come after any argument, return value, or local attribute definition actions. The row rule preamble from Section 10.2, Accessing Token and Rule Attributes illustrates the syntax nicely:
|
||||
actions/CSV.g4
|
||||
|
||||
```
|
||||
/** Derived from rule "row : field (',' field)* '\r'? '\n' ;" */
|
||||
row[String[] columns]
|
||||
returns [Map<String,String> values]
|
||||
locals [int col=0]
|
||||
@init {
|
||||
$values = new HashMap<String,String>();
|
||||
}
|
||||
@after {
|
||||
if ($values!=null && $values.size()>0) {
|
||||
System.out.println("values = "+$values);
|
||||
}
|
||||
}
|
||||
: ...
|
||||
;
|
||||
```
|
||||
|
||||
Rule row takes argument columns, returns values, and defines local variable col. The “actions” in square brackets are copied directly into the generated code:
|
||||
|
||||
```java
|
||||
public class CSVParser extends Parser {
|
||||
...
|
||||
public static class RowContext extends ParserRuleContext {
|
||||
public String [] columns;
|
||||
public Map<String,String> values;
|
||||
public int col=0;
|
||||
...
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
The generated rule functions also specify the rule arguments as function arguments, but they are quickly copied into the local RowContext object:
|
||||
|
||||
```java
|
||||
public class CSVParser extends Parser {
|
||||
...
|
||||
public final RowContext row(String [] columns) throws RecognitionException {
|
||||
RowContext _localctx = new RowContext(_ctx, 4, columns);
|
||||
enterRule(_localctx, RULE_row);
|
||||
...
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
ANTLR tracks nested `[...]` within the action so that `String[]` columns is parsed properly. It also tracks angle brackets so that commas within generic type parameters do not signify the start of another attribute. `Map<String,String>` values is one attribute definition.
|
||||
|
||||
There can be multiple attributes in each action, even for return values. Use a comma to separate attributes within the same action:
|
||||
|
||||
```
|
||||
a[Map<String,String> x, int y] : ... ;
|
||||
```
|
||||
|
||||
ANTLR interprets that action to define two arguments, x and y:
|
||||
|
||||
```java
|
||||
public final AContext a(Map<String,String> x, int y)
|
||||
throws RecognitionException
|
||||
{
|
||||
AContext _localctx = new AContext(_ctx, 0, x, y);
|
||||
enterRule(_localctx, RULE_a);
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Start Rules and EOF
|
||||
|
||||
A start rule is the rule engaged first by the parser; it’s the rule function called by the language application. For example, a language application that parsed to Java code might call `parser.compilationUnit()` for a `JavaParser` object called `parser`. Any rule in the grammar can act as a start rule.
|
||||
|
||||
Start rules don’t necessarily consume all of the input. They consume only as much input as needed to match an alternative of the rule. For example, consider the following rule that matches one, two, or three tokens, depending on the input.
|
||||
|
||||
```
|
||||
s : ID
|
||||
| ID '+'
|
||||
| ID '+' INT
|
||||
;
|
||||
```
|
||||
|
||||
Upon `a+3`, rule `s` matches the third alternative. Upon `a+b`, it matches the second alternative and ignores the final `b` token. Upon `a b`, it matches the first alternative, ignoring the `b` token. The parser does not consume the complete input in the latter two cases because rule `s` doesn’t explicitly say that end of file must occur after matching an alternative of the rule.
|
||||
|
||||
This default functionality is very useful for building things like IDEs. Imagine the IDE wanting to parse a method somewhere in the middle of a big Java file. Calling rule `methodDeclaration` should try to match just a method and ignore whatever comes next.
|
||||
|
||||
On the other hand, rules that describe entire input files should reference special predefined-token `EOF`. If they don’t, you might scratch your head for a while wondering why the start rule doesn’t report errors for any input no matter what you give it. Here’s a rule that’s part of a grammar for reading configuration files:
|
||||
|
||||
```
|
||||
config : element*; // can "match" even with invalid input.
|
||||
```
|
||||
|
||||
Invalid input would cause `config` to return immediately without matching any input and without reporting an error. Here’s the proper specification:
|
||||
|
||||
```
|
||||
file : element* EOF; // don't stop early. must match all input
|
||||
```
|
|
@ -0,0 +1,164 @@
|
|||
# Semantic Predicates
|
||||
|
||||
Semantic predicates, `{...}?`, are boolean expressions written in the target language that indicate the validity of continuing the parse along the path "guarded" by the predicate. Predicates can appear anywhere within a parser rule just like actions can, but only those appearing on the left edge of alternatives can affect prediction (choosing between alternatives). This section provides all of the fine print regarding the use of semantic predicates in parser and lexer rules. Let's start out by digging deeper into how the parser incorporates predicates into parsing decisions.
|
||||
|
||||
## Making Predicated Parsing Decisions
|
||||
|
||||
ANTLR's general decision-making strategy is to find all viable alternatives and then ignore the alternatives guarded with predicates that currently evaluate to false. (A viable alternative is one that matches the current input.) If more than one viable alternative remains, the parser chooses the alternative specified first in the decision.
|
||||
|
||||
Consider a variant of C++ where array references also use parentheses instead of square brackets. If we only predicate one of the alternatives, we still have an ambiguous decision in expr:
|
||||
|
||||
```
|
||||
expr: ID '(' expr ')' // array reference (ANTLR picks this one)
|
||||
| {istype()}? ID '(' expr ')' // ctor-style typecast
|
||||
| ID '(' expr ')' // function call
|
||||
;
|
||||
```
|
||||
|
||||
In this case, all three alternatives are viable for input `x(i)`. When `x` is not a type name, the predicate evaluates to false, leaving only the first and third alternatives as possible matches for expr. ANTLR automatically chooses the first alternative matching the array reference to resolve the ambiguity. Leaving ANTLR with more than one viable alternative because of too few predicates is probably not a good idea. It's best to cover n viable alternatives with at least n-1 predicates. In other words, don't build rules like expr with too few predicates.
|
||||
|
||||
Sometimes, the parser finds multiple visible predicates associated with a single choice. No worries. ANTLR just combines the predicates with appropriate logical operators to conjure up a single meta-predicate on-the-fly.
|
||||
|
||||
For example, the decision in rule `stat` joins the predicates from both alternatives of expr with the `||` operator to guard the second stat alternative:
|
||||
|
||||
```
|
||||
stat: decl | expr ;
|
||||
decl: ID ID ;
|
||||
expr: {istype()}? ID '(' expr ')' // ctor-style typecast
|
||||
| {isfunc()}? ID '(' expr ')' // function call
|
||||
;
|
||||
```
|
||||
|
||||
The parser will only predict an expr from stat when `istype()||isfunc()` evaluates to true. This makes sense because the parser should only choose to match an expression if the upcoming `ID` is a type name or function name. It wouldn't make sense to just test one of the predicates in this case. Note that, when the parser gets to `expr` itself, the parsing decision tests the predicates individually, one for each alternative.
|
||||
|
||||
If multiple predicates occur in a sequence, the parser joins them with the `&&` operator. For example, consider changing `stat` to include a predicate before the call `toexpr`:
|
||||
|
||||
```
|
||||
stat: decl | {java5}? expr ;
|
||||
```
|
||||
|
||||
Now, the parser would only predict the second alternative if `java5&&(istype()||isfunc())` evaluated to true.
|
||||
|
||||
Turning to the code inside the predicates themselves now, keep in mind the following guidelines.
|
||||
|
||||
Even when the parser isn't making decisions, predicates can deactivate alternatives, causing rules to fail. This happens when a rule only has a single alternative. There is no choice to make, but ANTLR evaluates the predicate as part of the normal parsing process, just like it does for actions. That means that the following rule always fails to match.
|
||||
|
||||
```
|
||||
prog: {false}? 'return' INT ; // throws FailedPredicateException
|
||||
```
|
||||
|
||||
ANTLR converts `{false}?` in the grammar to a conditional in the generated parser:
|
||||
|
||||
```
|
||||
if ( !false ) throw new FailedPredicateException(...);
|
||||
```
|
||||
|
||||
So far, all of the predicates we've seen have been visible and available to the prediction process, but that's not always the case.
|
||||
|
||||
## Finding Visible Predicates
|
||||
|
||||
The parser will not evaluate predicates during prediction that occur after an action or token reference. Let's think about the relationship between actions and predicates first.
|
||||
|
||||
ANTLR has no idea what's inside the raw code of an action and so it must assume any predicate could depend on side effects of that action. Imagine an action that computed value `x` and a predicate that tested `x`. Evaluating that predicate before the action executed to create `x` would violate the implied order of operations within the grammar.
|
||||
|
||||
More importantly, the parser can't execute actions until it has decided which alternative to match. That's because actions have side effects and we can't undo things like print statements. For example, in the following rule, the parser can't execute the action in front of the `{java5}?` predicate before committing to that alternative.
|
||||
|
||||
```
|
||||
@members {boolean allowgoto=false;}
|
||||
stat: {System.out.println("goto"); allowgoto=true;} {java5}? 'goto' ID ';'
|
||||
| ...
|
||||
;
|
||||
```
|
||||
|
||||
If we can't execute the action during prediction, we shouldn't evaluate the `{java5}?` predicate because it depends on that action.
|
||||
|
||||
The prediction process also can't see through token references. Token references have the side effect of advancing the input one symbol. A predicate that tested the current input symbol would find itself out of sync if the parser shifted it over the token reference. For example, in the following grammar, the predicates expect `getCurrentToken` to return an `ID` token.
|
||||
|
||||
```
|
||||
stat: '{' decl '}'
|
||||
| '{' stat '}'
|
||||
;
|
||||
decl: {istype(getCurrentToken().getText())}? ID ID ';' ;
|
||||
expr: {isvar(getCurrentToken().getText())}? ID ;
|
||||
```
|
||||
|
||||
The decision in stat can't test those predicates because, at the start of stat, the current token is a left curly. To preserve the semantics, ANTLR won't test the predicates in that decision.
|
||||
|
||||
Visible predicates are those that prediction encounters before encountering an action or token. The prediction process ignores nonvisible predicates, treating them as if they don't exist.
|
||||
|
||||
In rare cases, the parser won't be able to use a predicate, even if it's visible to a particular decision. That brings us to our next fine print topic.
|
||||
|
||||
## Using Context-Dependent Predicates
|
||||
|
||||
A predicate that depends on a parameter or local variable of the surrounding rule, is considered a context-dependent predicate. Clearly, we can only evaluate such predicates within the rules in which they're defined. For example, it makes no sense for the decision in prog below to test context-dependent predicate `{$i<=5}?`. That `$i` local variable is not even defined in `prog`.
|
||||
|
||||
```
|
||||
prog: vec5
|
||||
| ...
|
||||
;
|
||||
vec5
|
||||
locals [int i=1]
|
||||
: ( {$i<=5}? INT {$i++;} )* // match 5 INTs
|
||||
;
|
||||
```
|
||||
|
||||
ANTLR ignores context-dependent predicates that it can't evaluate in the proper context. Normally the proper context is simply the rule defining the predicate, but sometimes the parser can't even evaluate a context-dependent predicate from within the same rule! Detecting these cases is done on-the-fly at runtime during adaptive LL(*) prediction.
|
||||
|
||||
For example, prediction for the optional branch of the else subrule in stat below "falls off" the end of stat and continues looking for symbols in the invoking prog rule.
|
||||
|
||||
```
|
||||
prog: stat+ ; // stat can follow stat
|
||||
stat
|
||||
locals [int i=0]
|
||||
: {$i==0}? 'if' expr 'then' stat {$i=5;} ('else' stat)?
|
||||
| 'break' ';'
|
||||
;
|
||||
```
|
||||
|
||||
The prediction process is trying to figure out what can follow an if statement other than an else clause. Since the input can have multiple stats in a row, the prediction for the optional branch of the else subrule reenters stat. This time, of course, it gets a new copy of `$i` with a value of 0, not 5. ANTLR ignores context-dependent predicate `{$i==0}?` because it knows that the parser isn't in the original stat call. The predicate would test a different version of `$i` so the parser can't evaluate it.
|
||||
|
||||
The fine print for predicates in the lexer more or less follow these same guidelines, except of course lexer rules can't have parameters and local variables. Let's look at all of the lexer-specific guidelines in the next section.
|
||||
|
||||
## Predicates in Lexer Rules
|
||||
|
||||
In parser rules, predicates must appear on the left edge of alternatives to aid in alternative prediction. Lexers, on the other hand, prefer predicates on the right edge of lexer rules because they choose rules after seeing a token's entire text. Predicates in lexer rules can technically be anywhere within the rule. Some positions might be more or less efficient than others; ANTLR makes no guarantees about the optimal spot. A predicate in a lexer rule might be executed multiple times even during a single token match. You can embed multiple predicates per lexer rule and they are evaluated as the lexer reaches them during matching.
|
||||
|
||||
Loosely speaking, the lexer's goal is to choose the rule that matches the most input characters. At each character, the lexer decides which rules are still viable. Eventually, only a single rule will be still viable. At that point, the lexer creates a token object according the rule's token type and matched text.
|
||||
|
||||
Sometimes the lexer is faced with more than a single viable matching rule. For example, input enum would match an `ENUM` rule and an `ID` rule. If the next character after enum is a space, neither rule can continue. The lexer resolves the ambiguity by choosing the viable rule specified first in the grammar. That's why we have to place keyword rules before an identifier rule like this:
|
||||
|
||||
```
|
||||
ENUM : 'enum' ;
|
||||
ID : [a-z]+ ;
|
||||
```
|
||||
|
||||
If, on the other hand, the next character after input `enum` is a letter, then only `ID` is viable.
|
||||
|
||||
Predicates come into play by pruning the set of viable lexer rules. When the lexer encounters a false predicate, it deactivates that rule just like parsers deactivate alternatives with false predicates.
|
||||
|
||||
Like parser predicates, lexer predicates can't depend on side effects from lexer actions. That's because actions can only execute after the lexer positively identifies the rule to match. Since predicates are part of the rule selection process, they can't rely on action side effects. Lexer actions must appear after predicates in lexer rules. As an example, here's another way to match enum as a keyword in the lexer:
|
||||
|
||||
```
|
||||
ENUM: [a-z]+ {getText().equals("enum")}?
|
||||
{System.out.println("enum!");}
|
||||
;
|
||||
ID : [a-z]+ {System.out.println("ID "+getText());} ;
|
||||
```
|
||||
|
||||
The print action in `ENUM` appears last and executes only if the current input matches `[a-z]+` and the predicate is true. Let's build and test `Enum3` to see if it distinguishes between enum and an identifier:
|
||||
|
||||
```bash
|
||||
$ antlr4 Enum3.g4
|
||||
$ javac Enum3.java
|
||||
$ grun Enum3 tokens
|
||||
=> enum abc
|
||||
=> EOF
|
||||
<= enum!
|
||||
ID abc
|
||||
```
|
||||
|
||||
That works great, but it's really just for instructional purposes. It's easier to understand and more efficient to match enum keywords with a simple rule like this:
|
||||
|
||||
```
|
||||
ENUM : 'enum' ;
|
||||
```
|
|
@ -0,0 +1,128 @@
|
|||
# Python (2 and 3)
|
||||
|
||||
The examples from the ANTLR 4 book converted to Python are [here](https://github.com/jszheng/py3antlr4book).
|
||||
|
||||
There are 2 Python targets: `Python2` and `Python3`. This is because there is only limited compatibility between those 2 versions of the language. Please refer to the [Python documentation](https://wiki.python.org/moin/Python2orPython3) for full details.
|
||||
|
||||
How to create a Python lexer or parser?
|
||||
This is pretty much the same as creating a Java lexer or parser, except you need to specify the language target, for example:
|
||||
|
||||
```
|
||||
$ antlr4 -Dlanguage=Python2 MyGrammar.g4
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
$ antlr4 -Dlanguage=Python3 MyGrammar.g4
|
||||
```
|
||||
|
||||
For a full list of antlr4 tool options, please visit the tool documentation page.
|
||||
|
||||
## Where can I get the runtime?
|
||||
|
||||
Once you've generated the lexer and/or parser code, you need to download the runtime. The Python runtimes are available from PyPI:
|
||||
|
||||
* https://pypi.python.org/pypi/antlr4-python2-runtime/
|
||||
* https://pypi.python.org/pypi/antlr4-python3-runtime/
|
||||
|
||||
The runtimes are provided in the form of source code, so no additional installation is required.
|
||||
|
||||
We will not document here how to refer to the runtime from your Python project, since this would differ a lot depending on your project type and IDE.
|
||||
|
||||
## How do I run the generated lexer and/or parser?
|
||||
|
||||
Let's suppose that your grammar is named, as above, "MyGrammar". Let's suppose this parser comprises a rule named "StartRule". The tool will have generated for you the following files:
|
||||
|
||||
* MyGrammarLexer.py
|
||||
* MyGrammarParser.py
|
||||
* MyGrammarListener.py (if you have not activated the -no-listener option)
|
||||
* MyGrammarVisitor.py (if you have activated the -visitor option)
|
||||
|
||||
(Developers used to Java/C# AntLR will notice that there is no base listener or visitor generated, this is because Python having no support for interfaces, the generated listener and visitor are fully fledged classes)
|
||||
|
||||
Now a fully functioning script might look like the following:
|
||||
|
||||
```python
|
||||
from antlr4 import *
|
||||
from MyGrammarLexer import MyGrammarLexer
|
||||
from MyGrammarParser import MyGrammarParser
|
||||
|
||||
def main(argv):
|
||||
input = FileStream(argv[1])
|
||||
lexer = MyGrammarLexer(input)
|
||||
stream = CommonTokenStream(lexer)
|
||||
parser = MyGrammarParser(stream)
|
||||
tree = parser.StartRule()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
```
|
||||
|
||||
This program will work. But it won't be useful unless you do one of the following:
|
||||
|
||||
* you visit the parse tree using a custom listener
|
||||
* you visit the parse tree using a custom visitor
|
||||
* your grammar comprises production code (like ANTLR3)
|
||||
|
||||
(please note that production code is target specific, so you can't have multi target grammars that include production code, except for very limited use cases, see below)
|
||||
|
||||
## How do I create and run a custom listener?
|
||||
|
||||
Let's suppose your MyGrammar grammar comprises 2 rules: "key" and "value". The antlr4 tool will have generated the following listener:
|
||||
|
||||
```python
|
||||
class MyGrammarListener(ParseTreeListener):
|
||||
def enterKey(self, ctx):
|
||||
pass
|
||||
def exitKey(self, ctx):
|
||||
pass
|
||||
def enterValue(self, ctx):
|
||||
pass
|
||||
def exitValue(self, ctx):
|
||||
pass
|
||||
```
|
||||
|
||||
In order to provide custom behavior, you might want to create the following class:
|
||||
|
||||
```python
|
||||
class KeyPrinter(MyGrammarListener):
|
||||
def exitKey(self, ctx):
|
||||
print("Oh, a key!")
|
||||
```
|
||||
|
||||
In order to execute this listener, you would simply add the following lines to the above code:
|
||||
|
||||
```
|
||||
...
|
||||
tree = parser.StartRule() - only repeated here for reference
|
||||
printer = KeyPrinter()
|
||||
walker = ParseTreeWalker()
|
||||
walker.walk(printer, tree)
|
||||
```
|
||||
|
||||
Further information can be found from the ANTLR 4 definitive guide.
|
||||
|
||||
The Python implementation of ANTLR is as close as possible to the Java one, so you shouldn't find it difficult to adapt the examples for Python.
|
||||
|
||||
## Target agnostic grammars
|
||||
|
||||
If your grammar is targeted to Python only, you may ignore the following. But if your goal is to get your Java parser to also run in Python, then you might find it useful.
|
||||
|
||||
1. Do not embed production code inside your grammar. This is not portable and will not be. Move all your code to listeners or visitors.
|
||||
1. The only production code absolutely required to sit with the grammar should be semantic predicates, like:
|
||||
```
|
||||
ID {$text.equals("test")}?
|
||||
```
|
||||
|
||||
Unfortunately, this is not portable, but you can work around it. The trick involves:
|
||||
|
||||
* deriving your parser from a parser you provide, such as BaseParser
|
||||
* implementing utility methods in this BaseParser, such as "isEqualText"
|
||||
* adding a "self" field to the Java/C# BaseParser, and initialize it with "this"
|
||||
|
||||
Thanks to the above, you should be able to rewrite the above semantic predicate as follows:
|
||||
|
||||
```
|
||||
ID {$self.isEqualText($text,"test")}?
|
||||
```
|
|
@ -0,0 +1,268 @@
|
|||
# Cutting an ANTLR Release
|
||||
|
||||
## Github
|
||||
|
||||
Create a pre-release or full release at github; [Example 4.5-rc-1](https://github.com/antlr/antlr4/releases/tag/4.5-rc-1).
|
||||
|
||||
Wack any existing tag as mvn will create one and it fails if already there.
|
||||
|
||||
```
|
||||
$ git tag -d 4.5.2
|
||||
$ git push origin :refs/tags/4.5.2
|
||||
$ git push upstream :refs/tags/4.5.2
|
||||
```
|
||||
|
||||
## Bump version
|
||||
|
||||
Edit the repository looking for 4.5 or whatever and update it. Bump version in the following files:
|
||||
|
||||
* runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java
|
||||
* runtime/Python2/setup.py
|
||||
* runtime/Python2/src/antlr4/Recognizer.py
|
||||
* runtime/Python3/setup.py
|
||||
* runtime/Python3/src/antlr4/Recognizer.py
|
||||
* runtime/CSharp/runtime/CSharp/Antlr4.Runtime/Properties/AssemblyInfo.cs
|
||||
* runtime/JavaScript/src/antlr4/package.json
|
||||
* runtime/JavaScript/src/antlr4/Recognizer.js
|
||||
* tool/src/org/antlr/v4/codegen/target/CSharpTarget.java
|
||||
* tool/src/org/antlr/v4/codegen/target/JavaScriptTarget.java
|
||||
* tool/src/org/antlr/v4/codegen/target/Python2Target.java
|
||||
* tool/src/org/antlr/v4/codegen/target/Python3Target.java
|
||||
|
||||
Here is a simple script to display any line from the critical files with, say, `4.5` in it:
|
||||
|
||||
```bash
|
||||
find /tmp/antlr4 -type f -exec grep -l '4\.5' {} \;
|
||||
```
|
||||
|
||||
Commit to repository.
|
||||
|
||||
## Maven Repository Settings
|
||||
|
||||
First, make sure you have maven set up to communicate with staging servers etc... Create file `~/.m2/settings.xml` with appropriate username/password for staging server and gpg.keyname/passphrase for signing. Make sure it has strict visibility privileges to just you. On unix, it looks like:
|
||||
|
||||
```bash
|
||||
beast:~/.m2 $ ls -l settings.xml
|
||||
-rw------- 1 parrt staff 914 Jul 15 14:42 settings.xml
|
||||
```
|
||||
|
||||
Here is the file template
|
||||
|
||||
```xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
User-specific configuration for maven. Includes things that should not
|
||||
be distributed with the pom.xml file, such as developer identity, along with
|
||||
local settings, like proxy information.
|
||||
-->
|
||||
<settings>
|
||||
<servers>
|
||||
<server>
|
||||
<id>sonatype-nexus-staging</id>
|
||||
<username>sonatype-username</username>
|
||||
<password>XXX</password>
|
||||
</server>
|
||||
<server>
|
||||
<id>sonatype-nexus-snapshots</id>
|
||||
<username>sonatype-username</username>
|
||||
<password>XXX</password>
|
||||
</server>
|
||||
</servers>
|
||||
<profiles>
|
||||
<profile>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
</activation>
|
||||
<properties>
|
||||
<gpg.keyname>UUU</gpg.keyname>
|
||||
<gpg.passphrase>XXX</gpg.passphrase>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
</settings>
|
||||
```
|
||||
|
||||
## Maven release
|
||||
|
||||
The maven deploy lifecycle phased deploys the artifacts and the poms for the ANTLR project to the [sonatype remote staging server](https://oss.sonatype.org/content/repositories/snapshots/).
|
||||
|
||||
```bash
|
||||
mvn deploy -DskipTests
|
||||
```
|
||||
|
||||
With JDK 1.7 (not 6 or 8), do this:
|
||||
|
||||
```bash
|
||||
mvn release:prepare -Darguments="-DskipTests"
|
||||
```
|
||||
|
||||
It will start out by asking you the version number:
|
||||
|
||||
```
|
||||
...
|
||||
What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.5.2: : 4.5.2
|
||||
What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.5.2: :
|
||||
What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.5.2: :
|
||||
What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.5.2: :
|
||||
What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.5.2: :
|
||||
What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.5.2: :
|
||||
What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.5.2: : 4.5.2
|
||||
What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.5.3-SNAPSHOT:
|
||||
...
|
||||
```
|
||||
|
||||
Maven will go through your pom.xml files to update versions from 4.5.2-SNAPSHOT to 4.5.2 for release and then to 4.5.3-SNAPSHOT after release, which is done with:
|
||||
|
||||
```bash
|
||||
mvn release:perform -Darguments="-DskipTests"
|
||||
```
|
||||
|
||||
Maven will use git to push pom.xml changes. (big smile)
|
||||
|
||||
Now, go here:
|
||||
|
||||
[https://oss.sonatype.org/#welcome](https://oss.sonatype.org/#welcome)
|
||||
|
||||
and on the left click "Staging Repositories". You click the staging repo and close it, then you refresh, click it and release it. It's done when you see it here:
|
||||
|
||||
[http://repo1.maven.org/maven2/org/antlr/antlr4-runtime/](http://repo1.maven.org/maven2/org/antlr/antlr4-runtime/)
|
||||
|
||||
Copy the jars to antlr.org site and update download/index.html
|
||||
|
||||
```bash
|
||||
cp ~/.m2/repository/org/antlr/antlr4-runtime/4.5.2/antlr4-runtime-4.5.2.jar ~/antlr/sites/website-antlr4/download/antlr-runtime-4.5.2.jar
|
||||
cp ~/.m2/repository/org/antlr/antlr4/4.5.2/antlr4-4.5.2.jar ~/antlr/sites/website-antlr4/download/antlr-4.5.2-complete.jar
|
||||
cd ~/antlr/sites/website-antlr4/download
|
||||
git add antlr-4.5.2-complete.jar
|
||||
git add antlr-runtime-4.5.2.jar
|
||||
```
|
||||
|
||||
Update on site:
|
||||
|
||||
* download.html
|
||||
* index.html
|
||||
* api/index.html
|
||||
* download/index.html
|
||||
* scripts/topnav.js
|
||||
|
||||
```
|
||||
git commit -a -m 'add 4.5.2 jars'
|
||||
git push origin gh-pages
|
||||
```
|
||||
|
||||
## Deploying Targets
|
||||
|
||||
### JavaScript
|
||||
|
||||
```bash
|
||||
cd runtime/JavaScript/src
|
||||
zip -r /tmp/antlr-javascript-runtime-4.5.2.zip antlr4
|
||||
cp /tmp/antlr-javascript-runtime-4.5.2.zip ~/antlr/sites/website-antlr4/download
|
||||
# git add, commit, push
|
||||
```
|
||||
|
||||
Move target to website
|
||||
|
||||
```bash
|
||||
pushd ~/antlr/sites/website-antlr4/download
|
||||
git add antlr-javascript-runtime-4.5.2.zip
|
||||
git commit -a -m 'update JS runtime'
|
||||
git push origin gh-pages
|
||||
popd
|
||||
```
|
||||
|
||||
### CSharp
|
||||
|
||||
```bash
|
||||
cd ~/antlr/code/antlr4/runtime/CSharp/runtime/CSharp
|
||||
# kill previous ones manually as "xbuild /t:Clean" didn't seem to do it
|
||||
rm Antlr4.Runtime/bin/net20/Release/Antlr4.Runtime.dll
|
||||
rm Antlr4.Runtime/obj/net20/Release/Antlr4.Runtime.dll
|
||||
# build
|
||||
xbuild /p:Configuration=Release Antlr4.Runtime/Antlr4.Runtime.mono.csproj
|
||||
# zip it up to get a version number on zip filename
|
||||
zip --junk-paths /tmp/antlr-csharp-runtime-4.5.2.zip Antlr4.Runtime/bin/net35/Release/Antlr4.Runtime.dll
|
||||
cp /tmp/antlr-csharp-runtime-4.5.2.zip ~/antlr/sites/website-antlr4/download
|
||||
```
|
||||
|
||||
Move target to website
|
||||
|
||||
```bash
|
||||
pushd ~/antlr/sites/website-antlr4/download
|
||||
git add antlr-csharp-runtime-4.5.2.zip
|
||||
git commit -a -m 'update C# runtime'
|
||||
git push origin gh-pages
|
||||
popd
|
||||
```
|
||||
|
||||
### Python
|
||||
|
||||
The Python targets get deployed with `setup.py`. First, set up `~/.pypirc` with tight privileges:
|
||||
|
||||
```bash
|
||||
beast:~ $ ls -l ~/.pypirc
|
||||
-rw------- 1 parrt staff 267 Jul 15 17:02 /Users/parrt/.pypirc
|
||||
```
|
||||
|
||||
```
|
||||
[distutils] # this tells distutils what package indexes you can push to
|
||||
index-servers =
|
||||
pypi
|
||||
pypitest
|
||||
|
||||
[pypi]
|
||||
repository: https://pypi.python.org/pypi
|
||||
username: parrt
|
||||
password: XXX
|
||||
|
||||
[pypitest]
|
||||
repository: https://testpypi.python.org/pypi
|
||||
username: parrt
|
||||
```
|
||||
|
||||
Then run the usual python set up stuff:
|
||||
|
||||
```bash
|
||||
cd ~/antlr/code/antlr4/runtime/Python2
|
||||
# assume you have ~/.pypirc set up
|
||||
python setup.py register -r pypi
|
||||
python setup.py sdist bdist_wininst upload -r pypi
|
||||
```
|
||||
|
||||
and do again for Python 3 target
|
||||
|
||||
```bash
|
||||
cd ~/antlr/code/antlr4/runtime/Python3
|
||||
# assume you have ~/.pypirc set up
|
||||
python setup.py register -r pypi
|
||||
python setup.py sdist bdist_wininst upload -r pypi
|
||||
```
|
||||
|
||||
Add links to the artifacts from download.html
|
||||
|
||||
## Update javadoc for runtime and tool
|
||||
|
||||
First gen javadoc:
|
||||
|
||||
```bash
|
||||
$ cd antlr4
|
||||
$ mvn -DskipTests javadoc:jar install
|
||||
```
|
||||
|
||||
Then copy to website:
|
||||
|
||||
```bash
|
||||
cd ~/antlr/sites/website-antlr4/api
|
||||
git checkout gh-pages
|
||||
git pull origin gh-pages
|
||||
cd Java
|
||||
jar xvf ~/.m2/repository/org/antlr/antlr4-runtime/4.5.2/antlr4-runtime-4.5.2-javadoc.jar
|
||||
cd ../JavaTool
|
||||
jar xvf ~/.m2/repository/org/antlr/antlr4/4.5.2/antlr4-4.5.2-javadoc.jar
|
||||
git commit -a -m 'freshen api doc'
|
||||
git push origin gh-pages
|
||||
```
|
||||
|
||||
## Update Intellij plug-in
|
||||
|
||||
Rebuild antlr plugin with new antlr jar.
|
|
@ -0,0 +1,33 @@
|
|||
# Articles and Resources
|
||||
|
||||
## Books
|
||||
|
||||
<a href=""><img src=images/tpantlr2.png width=120></a>
|
||||
<a href=""><img src=images/tpdsl.png width=120></a>
|
||||
|
||||
<a href="https://www.youtube.com/watch?v=OAoA3E-cyug"><img src=images/teronbook.png width=250></a>
|
||||
|
||||
## Articles
|
||||
|
||||
* [Playing with ANTLR4, Primefaces extensions for Code Mirror and web-based DSLs](http://leonotepad.blogspot.com.br/2014/01/playing-with-antlr4-primefaces.html)
|
||||
* [A Tale of Two Grammars](https://dexvis.wordpress.com/2012/11/22/a-tale-of-two-grammars/)
|
||||
* [ANTLR 4: using the lexer, parser and listener with example grammar](http://www.theendian.com/blog/antlr-4-lexer-parser-and-listener-with-example-grammar/)
|
||||
* [Creating External DSLs using ANTLR and Java](http://java.dzone.com/articles/creating-external-dsls-using)
|
||||
|
||||
## Presentations
|
||||
|
||||
* [Introduction to ANTLR 4 by Oliver Zeigermann](https://docs.google.com/presentation/d/1XS_VIdicCQVonPK6AGYkWTp-3VeHfGuD2l8yNMpAfuQ/edit#slide=id.p)
|
||||
|
||||
## Videos
|
||||
|
||||
<a href="https://vimeo.com/59285751"><img src=images/tertalk.png width=200></a>
|
||||
|
||||
## Resources
|
||||
|
||||
* [Stack overflow ANTLR4 tag](http://stackoverflow.com/questions/tagged/antlr4)
|
||||
* [Antlr 4 with C# and Visual Studio 2012](http://programming-pages.com/2013/12/14/antlr-4-with-c-and-visual-studio-2012/)
|
||||
* [ANTLR Language Support in VisualStudio](http://visualstudiogallery.msdn.microsoft.com/25b991db-befd-441b-b23b-bb5f8d07ee9f)
|
||||
* [Upgrading to ANTLR 4 with C#](http://andrevdm.blogspot.com/2013/08/upgrading-to-antlr-4-with-c.html)
|
||||
* [Generate parsers with Antlr4 via Maven](http://ljelonek.wordpress.com/2014/01/03/generate-parsers-with-antlr4-via-maven/)
|
||||
* [Exploring ANTLR v4](http://johnsquibb.like97.com/blog/read/exploring-antlr-v4)
|
||||
* [antlr4dart](http://pub.dartlang.org/packages/antlr4dart)
|
|
@ -0,0 +1,23 @@
|
|||
# Runtime Libraries and Code Generation Targets
|
||||
|
||||
This page lists the available and upcoming ANTLR runtimes. Please note that you won't find here language specific code generators. This is because there is only one tool, written in Java, which is able to generate lexer and parser code for all targets, through command line options. The tool can be invoked from the command line, or any integration plugin to popular IDEs and build systems: Eclipse, IntelliJ, Visual Studio, Maven. So whatever your environment and target is, you should be able to run the tool and produce the code in the targeted language. As of writing, the available targets are the following:
|
||||
|
||||
* [Java](java-target.md)<br>
|
||||
The [ANTLR v4 book](http://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference) has a decent summary of the runtime library. We have added a useful XPath feature since the book was printed that lets you select bits of parse trees.
|
||||
<br>[Runtime API](http://www.antlr.org/api/Java/index.html)
|
||||
<br>See [Getting Started with ANTLR v4](getting-started.md)
|
||||
|
||||
* [C#](csharp-target.md)
|
||||
* [Python](python-target.md) (2 and 3)
|
||||
* [JavaScript](javascript-target.md)
|
||||
* Swift (not yet available)
|
||||
* C++ (not yet available)
|
||||
|
||||
## Target feature parity
|
||||
|
||||
New features generally appear in the Java target and then migrate to the other targets, but these other targets don't always get updated in the same overall tool release. This section tries to identify features added to Java that have not been added to the other targets.
|
||||
|
||||
|Feature|Java|C♯|JavaScript|Python2|Python3|Swift|C++|
|
||||
|---|---|---|---|---|---|---|---|
|
||||
|Ambiguous tree construction|4.5.1|-|-|-|-|-|-|
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
# ANTLR Tool Command Line Options
|
||||
|
||||
If you invoke the ANTLR tool without command line arguments, you’ll get a help message:
|
||||
|
||||
```bash
|
||||
$ antlr4
|
||||
ANTLR Parser Generator Version 4.5
|
||||
-o ___ specify output directory where all output is generated
|
||||
-lib ___ specify location of grammars, tokens files
|
||||
-atn generate rule augmented transition network diagrams
|
||||
-encoding ___ specify grammar file encoding; e.g., euc-jp
|
||||
-message-format ___ specify output style for messages in antlr, gnu, vs2005
|
||||
-long-messages show exception details when available for errors and warnings
|
||||
-listener generate parse tree listener (default)
|
||||
-no-listener don't generate parse tree listener
|
||||
-visitor generate parse tree visitor
|
||||
-no-visitor don't generate parse tree visitor (default)
|
||||
-package ___ specify a package/namespace for the generated code
|
||||
-depend generate file dependencies
|
||||
-D<option>=value set/override a grammar-level option
|
||||
-Werror treat warnings as errors
|
||||
-XdbgST launch StringTemplate visualizer on generated code
|
||||
-XdbgSTWait wait for STViz to close before continuing
|
||||
-Xforce-atn use the ATN simulator for all predictions
|
||||
-Xlog dump lots of logging info to antlr-timestamp.log
|
||||
```
|
||||
|
||||
Here are more details on the options:
|
||||
|
||||
## `-o outdir`
|
||||
|
||||
ANTLR generates output files in the current directory by default. This option specifies the output directory where ANTLR should generate parsers, listeners, visitors, and tokens files.
|
||||
|
||||
```bash
|
||||
$ antlr4 -o /tmp T.g4
|
||||
$ ls /tmp/T*
|
||||
/tmp/T.tokens /tmp/TListener.java
|
||||
/tmp/TBaseListener.java /tmp/TParser.java
|
||||
```
|
||||
|
||||
## `-lib libdir`
|
||||
|
||||
When looking for tokens files and imported grammars, ANTLR normally looks in the current directory. This option specifies which directory to look in instead. It is only used for resolving grammar references for the import statement and the tokenVocab option. The path to the primary grammar must always be fully specified.
|
||||
|
||||
$ cat /tmp/B.g4
|
||||
|
||||
parser grammar B;
|
||||
|
||||
x : ID ;
|
||||
|
||||
$ cat A.g4
|
||||
|
||||
grammar A;
|
||||
|
||||
import B;
|
||||
|
||||
s : x ;
|
||||
|
||||
ID : [a-z]+ ;
|
||||
|
||||
$ antlr4 -lib /tmp A.g4
|
||||
|
||||
## `-atn`
|
||||
|
||||
Generate DOT graph files that represent the internal ATN (augmented transition network) data structures that ANTLR uses to represent grammars. The files come out as Grammar.rule .dot. If the grammar is a combined grammar, the lexer rules are named Grammar Lexer.rule .dot.
|
||||
|
||||
$ cat A.g4
|
||||
|
||||
grammar A;
|
||||
|
||||
s : b ;
|
||||
|
||||
b : ID ;
|
||||
|
||||
ID : [a-z]+ ;
|
||||
|
||||
$ antlr4 -atn A.g4
|
||||
|
||||
$ ls *.dot
|
||||
|
||||
A.b.dot A.s.dot ALexer.ID.dot
|
||||
|
||||
## `-encoding encodingname`
|
||||
|
||||
By default ANTLR loads grammar files using the UTF-8 encoding, which is a very common character file encoding that degenerates to ASCII for characters that fit in one byte. There are many character file encodings from around the world. If that grammar file is not the default encoding for your locale, you need this option so that ANTLR can properly interpret grammar files. This does not affect the input to the generated parsers, just the encoding of the grammars themselves.
|
||||
|
||||
## `-message-format format`
|
||||
|
||||
ANTLR generates warning and error messages using templates from directory tool/resources/org/antlr/v4/tool/templates/messages/formats. By default, ANTLR uses the antlr.stg (StringTemplate group) file. You can change this to gnu or vs2005 to have ANTLR generate messages appropriate for Emacs or Visual Studio. To make your own called X, create resource org/antlr/v4/tool/templates/messages/formats/ X and place it in the CLASSPATH.
|
||||
|
||||
## `-listener`
|
||||
|
||||
This option tells ANTLR to generate a parse tree listener and is the default.
|
||||
|
||||
## `-no-listener`
|
||||
|
||||
This option tells ANTLR not to generate a parse tree listener.
|
||||
|
||||
## `-visitor`
|
||||
|
||||
ANTLR does not generate parse tree visitors by default. This option turns that feature on. ANTLR can generate both parse tree listeners and visitors; this option and -listener aren’t mutually exclusive.
|
||||
|
||||
## `-no-visitor`
|
||||
|
||||
Tell ANTLR not to generate a parse tree visitor; this is the default.
|
||||
|
||||
## `-package`
|
||||
|
||||
Use this option to specify a package or namespace for ANTLR-generated files. Alternatively, you can add a @header {...} action but that ties the grammar to a specific language. If you use this option and @header, make sure that the header action does not contain a package specification otherwise the generated code will have two of them.
|
||||
|
||||
## `-depend`
|
||||
|
||||
Instead of generating a parser and/or lexer, generate a list of file dependencies, one per line. The output shows what each grammar depends on and what it generates. This is useful for build tools that need to know ANTLR grammar dependencies. Here’s an example:
|
||||
|
||||
```bash
|
||||
$ antlr4 -depend T.g
|
||||
T.g: A.tokens
|
||||
TParser.java : T.g
|
||||
T.tokens : T.g
|
||||
TLexer.java : T.g
|
||||
TListener.java : T.g
|
||||
TBaseListener.java : T.g
|
||||
```
|
||||
|
||||
If you use -lib libdir with -depend and grammar option tokenVocab=A, then the dependencies include the library path as well: T.g: libdir/A.tokens. The output is also sensitive to the -o outdir option: outdir/TParser.java : T.g.
|
||||
|
||||
## `-D<option>=value`
|
||||
|
||||
Use this option to override or set a grammar-level option in the specified grammar or grammars. This option is useful for generating parsers in different languages without altering the grammar itself. (I expect to have other targets in the near future.)
|
||||
|
||||
```bash
|
||||
$ antlr4 -Dlanguage=Java T.g4 # default
|
||||
$ antlr4 -Dlanguage=C T.g4
|
||||
error(31): ANTLR cannot generate C code as of version 4.0b3
|
||||
```
|
||||
|
||||
## `-Werror`
|
||||
|
||||
As part of a large build, ANTLR warning messages could go unnoticed. Turn on this option to have warnings treated as errors, causing the ANTLR tool to report failure back to the invoking commandline shell.
|
||||
There are also some extended options that are useful mainly for debugging ANTLR itself:
|
||||
|
||||
## `-Xsave-lexer`
|
||||
|
||||
ANTLR generates both a parser and a lexer from a combined grammar. To create the lexer, ANTLR extracts a lexer grammar from the combined grammar. Sometimes it’s useful to see what that looks like if it’s not clear what token rules ANTLR is creating. This does not affect the generated parsers or lexers.
|
||||
|
||||
## `-XdbgST`
|
||||
|
||||
For those building a code generation target, this option brings up a window showing the generated code and the templates used to generate that code. It invokes the StringTemplate inspector window.
|
||||
|
||||
## `-Xforce-atn`
|
||||
|
||||
ANTLR normally builds traditional “switch on token type” decisions where possible (one token of lookahead is sufficient to distinguish between all alternatives in a decision). To force even these simple decisions into the adaptive LL(*) mechanism, use this option.
|
||||
|
||||
## `-Xlog`
|
||||
|
||||
This option creates a log file containing lots of information messages from ANTLR as it processes your grammar. If you would like to see how ANTLR translates your left-recursive rules, turn on this option and look in the resulting log file.
|
||||
|
||||
```bash
|
||||
$ antlr4 -Xlog T.g4
|
||||
wrote ./antlr-2012-09-06-17.56.19.log
|
||||
```
|
|
@ -0,0 +1,156 @@
|
|||
# Parse Tree Matching and XPath
|
||||
|
||||
*Since ANTLR 4.2*
|
||||
|
||||
ANTLR 4 introduced a visitor and listener mechanism that lets you implement DOM visiting or SAX-analogous event processing of tree nodes. This works great. For example, if all you care about is looking at Java method declarations, grab the `Java.g4` file and then override methodDeclaration in `JavaBaseListener`. From there, a `ParseTreeWalker` can trigger calls to your overridden method as it walks the tree. Easy things are easy.
|
||||
|
||||
This mechanism works more or less on a node-level basis. In other words, for every method declaration subtree root, your `methodDeclaration()` would get called. There are many situations where we care more about subtrees not just nodes. We might want to:
|
||||
|
||||
* Collect method declarations within a particular context (i.e., nested within another method) or methods with specific structure or specific types (e.g., `void <ID>() { }`). We'll combine `XPath` and tree pattern matching for this.
|
||||
* Group translation operations by patterns in the tree rather than spreading operations across listener event methods.
|
||||
* Get a list of all assignments anywhere in the tree. It's much easier to say *go find me all "... = ... ;" subtrees* rather than creating a class just to get a listener method for rule assignment and then passing the listener to the parse tree walker.
|
||||
|
||||
The other important idea here is that, since we're talking about parse trees not abstract syntax trees, we can use concrete patterns instead of tree syntax. For example, we can say `x = 0;` instead of AST `(= x 0)` where the `;` would probably stripped before it went into the AST.
|
||||
|
||||
## Parse tree patterns
|
||||
|
||||
To test a subtree to see if it has a particular structure, we use a tree pattern. We also often want to extract descendents from the subtree based upon the structure. A very simple example is checking to see if a subtree matches an assignment statement. The pattern might look like the following in your language:
|
||||
|
||||
```
|
||||
<ID> = <expr>;
|
||||
```
|
||||
|
||||
where "tags" in angle brackets represent either token or rule references in the associated grammar. ANTLR converts that string to a parse tree with special nodes that represent any token `ID` and rule `expr` subtree. To create this parse tree, the pattern matching compiler needs to know which rule in the grammar the pattern conforms to. In this case it might be statement. Here is how we could test a tree, `t`, to see if it matches that pattern:
|
||||
|
||||
```java
|
||||
ParseTree t = ...; // assume t is a statement
|
||||
ParseTreePattern p = parser.compileParseTreePattern("<ID> = <expr>;", MyParser.RULE_statement);
|
||||
ParseTreeMatch m = p.match(t);
|
||||
if ( m.succeeded() ) {...}
|
||||
```
|
||||
|
||||
We can also test for specific expressions or token values. For example, the following checks to see if `t` is an expression consisting of an identifier added to 0:
|
||||
|
||||
```java
|
||||
ParseTree t = ...; // assume t is an expression
|
||||
ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
|
||||
ParseTreeMatch m = p.match(t);
|
||||
```
|
||||
|
||||
We can also ask the `ParseTreeMatch` result to pull out the token matched to the `<ID>` tag:
|
||||
|
||||
```java
|
||||
String id = m.get("ID");
|
||||
```
|
||||
|
||||
You can change the tag delimiters using a method on the pattern matcher:
|
||||
|
||||
```java
|
||||
ParseTreePatternMatcher m = new ParseTreePatternMatcher();
|
||||
m.setDelimiters("<<", ">>", "$"); // $ is the escape character
|
||||
```
|
||||
|
||||
This would allow pattern `<<ID>> = <<expr>> ;$<< ick $>>` to be interpreted as elements: `ID`, ` = `, `expr`, and ` ;<< ick >>`.
|
||||
|
||||
```java
|
||||
String xpath = "//blockStatement/*";
|
||||
String treePattern = "int <Identifier> = <expression>;";
|
||||
ParseTreePattern p =
|
||||
parser.compileParseTreePattern(treePattern,
|
||||
JavaParser.RULE_localVariableDeclarationStatement);
|
||||
List<ParseTreeMatch> matches = p.findAll(tree, xpath);
|
||||
```
|
||||
|
||||
### Pattern labels
|
||||
|
||||
The tree pattern matcher tracks the nodes in the tree at matches against the tags in a tree pattern. That way we can use the `get()` and `getAll()` methods to retrieve components of the matched subtree. For example, for pattern `<ID>`, `get("ID")` returns the node matched for that `ID`. If more than one node matched the specified token or rule tag, only the first match is returned. If there is no node associated with the label, this returns null.
|
||||
|
||||
You can also label the tags with identifiers. If the label is the name of a parser rule or token in the grammar, the resulting list from `getAll()` (or node from `get()`) will contain both the parse trees matching rule or tags explicitly labeled with the label and the complete set of parse trees matching the labeled and unlabeled tags in the pattern for the parser rule or token. For example, if label is `foo`, the result will contain all of the following.
|
||||
|
||||
* Parse tree nodes matching tags of the form `<foo:anyRuleName>` and `<foo:AnyTokenName>`.
|
||||
* Parse tree nodes matching tags of the form `<anyLabel:foo>`.
|
||||
* Parse tree nodes matching tags of the form `<foo>`.
|
||||
|
||||
### Creating parse trees with the pattern matcher
|
||||
|
||||
You can use the parse tree pattern compiler to create parse trees for partial input fragments. Just use method `ParseTreePattern.getPatternTree()`.
|
||||
|
||||
See [TestParseTreeMatch.java](https://github.com/antlr/antlr4/blob/master/tool-testsuite/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java).
|
||||
|
||||
## Using XPath to identify parse tree node sets
|
||||
|
||||
XPath paths are strings representing nodes or subtrees you would like to select within a parse tree. It's useful to collect subsets of the parse tree to process. For example you might want to know where all assignments are in a method or all variable declarations that are initialized.
|
||||
|
||||
A path is a series of node names with the following separators.
|
||||
|
||||
| Expression |Description|
|
||||
|---------|-----------|
|
||||
|nodename| Nodes with the token or rule name nodename
|
||||
|/| The root node but `/X` is the same as `X` since the tree you pass to xpath is assumed to be the root. Because it looks better, start all of your patterns with `/` (or `//` below).|
|
||||
|//| All nodes in the tree that match the next element in the path. E.g., `//ID` finds all `ID` token nodes in the tree.|
|
||||
|!| Any node except for the next element in the path. E.g., `/classdef/!field` should find all children of `classdef` root node that are not `field` subtrees.|
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
/prog/func, -> all funcs under prog at root
|
||||
/prog/*, -> all children of prog at root
|
||||
/*/func, -> all func kids of any root node
|
||||
prog, -> prog must be root node
|
||||
/prog, -> prog must be root node
|
||||
/*, -> any root
|
||||
*, -> any root
|
||||
//ID, -> any ID in tree
|
||||
//expr/primary/ID, -> any ID child of a primary under any expr
|
||||
//body//ID, -> any ID under a body
|
||||
//'return', -> any 'return' literal in tree
|
||||
//primary/*, -> all kids of any primary
|
||||
//func/*/stat, -> all stat nodes grandkids of any func node
|
||||
/prog/func/'def', -> all def literal kids of func kid of prog
|
||||
//stat/';', -> all ';' under any stat node
|
||||
//expr/primary/!ID, -> anything but ID under primary under any expr node
|
||||
//expr/!primary, -> anything but primary under any expr node
|
||||
//!*, -> nothing anywhere
|
||||
/!*, -> nothing at root
|
||||
```
|
||||
|
||||
Given a parse tree, the typical mechanism for visiting those nodes is the following loop:
|
||||
|
||||
```java
|
||||
for (ParseTree t : XPath.findAll(tree, xpath, parser) ) {
|
||||
... process t ...
|
||||
}
|
||||
```
|
||||
|
||||
E.g., here is a general formula for making a list of the text associated with every node identified by a path specification:
|
||||
|
||||
```java
|
||||
List<String> nodes = new ArrayList<String>();
|
||||
for (ParseTree t : XPath.findAll(tree, xpath, parser) ) {
|
||||
if ( t instanceof RuleContext) {
|
||||
RuleContext r = (RuleContext)t;
|
||||
nodes.add(parser.getRuleNames()[r.getRuleIndex()]); }
|
||||
else {
|
||||
TerminalNode token = (TerminalNode)t;
|
||||
nodes.add(token.getText());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Combining XPath and tree pattern matching
|
||||
|
||||
Naturally you can combine the use of XPath to find a set of root nodes and then use tree pattern matching to identify a certain subset of those and extract component nodes.
|
||||
|
||||
```java
|
||||
// assume we are parsing Java
|
||||
ParserRuleContext tree = parser.compilationUnit();
|
||||
String xpath = "//blockStatement/*"; // get children of blockStatement
|
||||
String treePattern = "int <Identifier> = <expression>;";
|
||||
ParseTreePattern p =
|
||||
parser.compileParseTreePattern(treePattern,
|
||||
ExprParser.RULE_localVariableDeclarationStatement);
|
||||
List<ParseTreeMatch> matches = p.findAll(tree, xpath);
|
||||
System.out.println(matches);
|
||||
```
|
||||
|
||||
See [TestXPath.java](https://github.com/antlr/antlr4/blob/master/tool-testsuite/test/org/antlr/v4/test/tool/TestXPath.java).
|
|
@ -0,0 +1,228 @@
|
|||
# Wildcard Operator and Nongreedy Subrules
|
||||
|
||||
EBNF subrules like `(...)?`, `(...)*` and `(...)+` are greedy—They consume as much input as possible, but sometimes that’s not what’s needed. Constructs like `.*` consume until the end of the input in the lexer and sometimes in the parser. We want that loop to be nongreedy so we need to use different syntax: `.*?` borrowed from regular expression notation. We can make any subrule that has a `?`, `*`, or `+` suffix nongreedy by adding another `?` suffix. Such nongreedy subrules are allowed in both the parser and the lexer, but they are used much more frequently in the lexer.
|
||||
|
||||
## Nongreedy Lexer Subrules
|
||||
|
||||
Here’s the very common C-style comment lexer rule that consumes any characters until it sees the trailing `*/`:
|
||||
|
||||
```
|
||||
COMMENT : '/*' .*? '*/' -> skip ; // .*? matches anything until the first */
|
||||
```
|
||||
|
||||
Here’s another example that matches strings that allow \" as an escaped quote character:
|
||||
|
||||
```
|
||||
grammar Nongreedy;
|
||||
s : STRING+ ;
|
||||
STRING : '"' ( '\\"' | . )*? '"' ; // match "foo", "\"", "x\"\"y", ...
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
```
|
||||
|
||||
```bash
|
||||
$ antlr4 Nongreedy.g4
|
||||
$ javac Nongreedy*.java
|
||||
$ grun Nongreedy s -tokens
|
||||
=> "quote:\""
|
||||
=> EOF
|
||||
<= [@0,0:9='"quote:\""',<1>,1:0]
|
||||
[@1,11:10='<EOF>',<-1>,2:0]
|
||||
```
|
||||
|
||||
Nongreedy subrules should be used sparingly because they complicate the recognition problem and sometimes make it tricky to decipher how the lexer will match text. Here is how the lexer chooses token rules:
|
||||
|
||||
<ol>
|
||||
<li>The primary goal is to match the lexer rule that recognizes the most input characters.
|
||||
|
||||
```
|
||||
INT : [0-9]+ ;
|
||||
DOT : '.' ; // match period
|
||||
FLOAT : [0-9]+ '.' ; // match FLOAT upon '34.' not INT then DOT
|
||||
```
|
||||
</li>
|
||||
<li>
|
||||
If more than one lexer rule matches the same input sequence, the priority goes to the rule occurring first in the grammar file.
|
||||
|
||||
```
|
||||
DOC : '/**' .*? '*/' ; // both rules match /** foo */, resolve to DOC
|
||||
CMT : '/*' .*? '*/' ;
|
||||
```
|
||||
</li>
|
||||
<li>
|
||||
Nongreedy subrules match the fewest number of characters that still allows the surrounding lexical rule to match.
|
||||
|
||||
```
|
||||
/** Match anything except \n inside of double angle brackets */
|
||||
STRING : '<<' ~'\n'*? '>>' ; // Input '<<foo>>>>' matches STRING then END
|
||||
END : '>>' ;
|
||||
```
|
||||
</li>
|
||||
<li>
|
||||
<p>After crossing through a nongreedy subrule within a lexical rule, all decision-making from then on is "first match wins."
|
||||
</p>
|
||||
<p>
|
||||
For example, literal `ab` in rule right-hand side (grammar fragment) `.*? (’a’|’ab’)` is dead code and can never be matched. If the input is ab, the first alternative, ’a’, matches the first character and therefore succeeds. (’a’|’ab’) by itself on the right-hand side of a rule properly matches the second alternative for input ab. This quirk arises from a nongreedy design decision that’s too complicated to go into here.</p>
|
||||
<li>
|
||||
</ol>
|
||||
|
||||
To illustrate the different ways to use loops within lexer rules, consider the following grammar, which has three different action-like tokens (using different delimiters so that they all fit within one example grammar).
|
||||
|
||||
```
|
||||
ACTION1 : '{' ( STRING | . )*? '}' ; // Allows {"foo}
|
||||
ACTION2 : '[' ( STRING | ~'"' )*? ']' ; // Doesn't allow ["foo]; nongreedy *?
|
||||
ACTION3 : '<' ( STRING | ~[">] )* '>' ; // Doesn't allow <"foo>; greedy *
|
||||
STRING : '"' ( '\\"' | . )*? '"' ;
|
||||
```
|
||||
|
||||
Rule `ACTION1` allows unterminated strings, such as `"foo`, because input `"foo` matches to the wildcard part of the loop. It doesn’t have to go into rule `STRING` to match a quote. To fix that, rule `ACTION2` uses `~’"’` to match any character but the quote. Expression `~’"’` is still ambiguous with the `’]’` that ends the rule, but the fact that the subrule is nongreedy means that the lexer will exit the loop upon a right square bracket. To avoid a nongreedy subrule, make the alternatives explicit. Expression `~[">]` matches anything but the quote and right angle bracket. Here’s a sample run:
|
||||
|
||||
```bash
|
||||
$ antlr4 Actions.g4
|
||||
$ javac Actions*.java
|
||||
$ grun Actions tokens -tokens
|
||||
=> {"foo}
|
||||
=> EOF
|
||||
<= [@0,0:5='{"foo}',<1>,1:0]
|
||||
[@1,7:6='<EOF>',<-1>,2:0]
|
||||
=> $ grun Actions tokens -tokens
|
||||
=> ["foo]
|
||||
=> EOF
|
||||
<= line 1:0 token recognition error at: '["foo]
|
||||
'
|
||||
[@0,7:6='<EOF>',<-1>,2:0]
|
||||
=> $ grun Actions tokens -tokens
|
||||
=> <"foo>
|
||||
=> EOF
|
||||
<= line 1:0 token recognition error at: '<"foo>
|
||||
'
|
||||
[@0,7:6='<EOF>',<-1>,2:0]
|
||||
```
|
||||
|
||||
## Nongreedy Parser Subrules
|
||||
|
||||
Nongreedy subrules and wildcard are also useful within parsers to do *fuzzy parsing* where the goal is to extract information from an input file without having to specify the full grammar. In contrast to nongreedy lexer decision-making, parsers always make globally correct decisions. A parser never makes a decision that will ultimately cause valid input to fail later on during the parse. Here is the central idea: Nongreedy parser subrules match the shortest sequence of tokens that preserves a successful parse for a valid input sentence.
|
||||
|
||||
For example, here are the key rules that demonstrate how to pull integer constants out of an arbitrary Java file:
|
||||
|
||||
```
|
||||
grammar FuzzyJava;
|
||||
|
||||
/** Match anything in between constant rule matches */
|
||||
file : .*? (constant .*?)+ ;
|
||||
|
||||
/** Faster alternate version (Gets an ANTLR tool warning about
|
||||
* a subrule like .* in parser that you can ignore.)
|
||||
*/
|
||||
altfile : (constant | .)* ; // match a constant or any token, 0-or-more times
|
||||
|
||||
/** Match things like "public static final SIZE" followed by anything */
|
||||
constant
|
||||
: 'public' 'static' 'final' 'int' Identifier
|
||||
{System.out.println("constant: "+$Identifier.text);}
|
||||
;
|
||||
|
||||
Identifier : [a-zA-Z_$] [a-zA-Z_$0-9]* ; // simplified
|
||||
```
|
||||
|
||||
The grammar contains a greatly simplified set of lexer rules from a real Java lexer; the whole file about 60 lines. The recognizer still needs to handle string and character constants as well as comments so it doesn’t get out of sync, trying to match a constant inside of the string for example. The only unusual lexer rule performs “match any character not matched by another lexer rule” functionality:
|
||||
|
||||
```
|
||||
OTHER : . -> skip ;
|
||||
```
|
||||
|
||||
This catchall lexer rule and the `.*?` subrule in the parser are the critical ingredients for fuzzy parsing.
|
||||
|
||||
Here’s a sample file that we can run into the fuzzy parser:
|
||||
|
||||
```java
|
||||
import java.util.*;
|
||||
public class C {
|
||||
public static final int A = 1;
|
||||
public static final int B = 1;
|
||||
public void foo() { }
|
||||
public static final int C = 1;
|
||||
}
|
||||
```
|
||||
|
||||
And here’s the build and test sequence:
|
||||
|
||||
```bash
|
||||
$ antlr4 FuzzyJava.g4
|
||||
$ javac FuzzyJava*.java
|
||||
$ grun FuzzyJava file C.java
|
||||
constant: A
|
||||
constant: B
|
||||
constant: C
|
||||
```
|
||||
|
||||
Notice that it totally ignores everything except for the `public static final int` declarations. This all happens with only two parser rules.
|
||||
|
||||
Now let's try matching some simple class defs w/o having to build parser rules for the junk inside. Here want to catch just `A` and `B`:
|
||||
|
||||
```
|
||||
class A {
|
||||
String name = "parrt";
|
||||
}
|
||||
|
||||
class B {
|
||||
int x;
|
||||
int getDubX() {
|
||||
return 2*x;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This grammar does it.
|
||||
|
||||
```
|
||||
grammar Island;
|
||||
file : clazz* ;
|
||||
clazz : 'class' ID '{' ignore '}' ;
|
||||
ignore : (method|.)*? ;
|
||||
method : type ID '()' block ;
|
||||
type : 'int' | 'void' ;
|
||||
block : '{' (block | .)*? '}' ;
|
||||
ID : [a-zA-Z] [a-zA-Z0-9]* ;
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
ANY : . ;
|
||||
```
|
||||
|
||||
You get:
|
||||
|
||||
<img src=images/nonnested-fuzzy.png width=450>
|
||||
|
||||
Now let's try some nested classes
|
||||
|
||||
```
|
||||
class A {
|
||||
String name = "parrt";
|
||||
class Nested {
|
||||
any filthy shite we want in here { }}}}}}
|
||||
}
|
||||
}
|
||||
|
||||
class B {
|
||||
int x;
|
||||
int getDubX() {
|
||||
return 2*x;
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
grammar Island;
|
||||
file : clazz* ;
|
||||
clazz : 'class' ID '{' ignore '}' ;
|
||||
ignore : (method|clazz|.)*? ; // <- only change is to add clazz alt here
|
||||
method : type ID '()' block ;
|
||||
type : 'int' | 'void' ;
|
||||
block : '{' (block | .)*? '}' ;
|
||||
ID : [a-zA-Z] [a-zA-Z0-9]* ;
|
||||
WS : [ \r\t\n]+ -> skip ;
|
||||
ANY : . ;
|
||||
```
|
||||
|
||||
You get:
|
||||
|
||||
<img src=images/nested-fuzzy.png width=600>
|
321
pom.xml
|
@ -1,212 +1,131 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-master</artifactId>
|
||||
<version>4.5.4-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>7</version>
|
||||
</parent>
|
||||
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-master</artifactId>
|
||||
<version>4.1.1-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>ANTLR 4</name>
|
||||
<description>ANTLR 4 Master Build POM</description>
|
||||
<name>ANTLR 4</name>
|
||||
<description>ANTLR 4 Master Build POM</description>
|
||||
<url>http://www.antlr.org</url>
|
||||
<inceptionYear>1992</inceptionYear>
|
||||
<organization>
|
||||
<name>ANTLR</name>
|
||||
<url>http://www.antlr.org</url>
|
||||
<inceptionYear>1992</inceptionYear>
|
||||
<organization>
|
||||
<name>ANTLR</name>
|
||||
<url>http://www.antlr.org</url>
|
||||
</organization>
|
||||
</organization>
|
||||
|
||||
<licenses>
|
||||
<license>
|
||||
<name>The BSD License</name>
|
||||
<url>http://www.antlr.org/license.html</url>
|
||||
<distribution>repo</distribution>
|
||||
</license>
|
||||
</licenses>
|
||||
<licenses>
|
||||
<license>
|
||||
<name>The BSD License</name>
|
||||
<url>http://www.antlr.org/license.html</url>
|
||||
<distribution>repo</distribution>
|
||||
</license>
|
||||
</licenses>
|
||||
|
||||
<developers>
|
||||
<developers>
|
||||
<developer>
|
||||
<name>Terence Parr</name>
|
||||
<url>http://antlr.org/wiki/display/~admin/Home</url>
|
||||
<roles>
|
||||
<role>Project lead - ANTLR</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<developer>
|
||||
<name>Sam Harwell</name>
|
||||
<url>http://tunnelvisionlabs.com</url>
|
||||
<roles>
|
||||
<role>Developer</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<developer>
|
||||
<name>Eric Vergnaud</name>
|
||||
<roles>
|
||||
<role>Developer - JavaScript, C#, Python 2, Python 3</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<developer>
|
||||
<name>Jim Idle</name>
|
||||
<email>jimi@idle.ws</email>
|
||||
<url>http://www.linkedin.com/in/jimidle</url>
|
||||
<roles>
|
||||
<role>Developer - Maven Plugin</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<developer>
|
||||
<name>Mike Lischke</name>
|
||||
<roles>
|
||||
<role>Developer - C++ Target</role>
|
||||
</roles>
|
||||
</developer>
|
||||
</developers>
|
||||
|
||||
<developer>
|
||||
<name>Terence Parr</name>
|
||||
<url>http://antlr.org/wiki/display/~admin/Home</url>
|
||||
<roles>
|
||||
<role>Project lead - ANTLR</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<modules>
|
||||
<module>runtime/Java</module>
|
||||
<module>tool</module>
|
||||
<module>antlr4-maven-plugin</module>
|
||||
<module>tool-testsuite</module>
|
||||
<module>runtime-testsuite</module>
|
||||
</modules>
|
||||
|
||||
<developer>
|
||||
<name>Sam Harwell</name>
|
||||
<url>http://tunnelvisionlabs.com</url>
|
||||
<roles>
|
||||
<role>Developer</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<antlr.testinprocess>true</antlr.testinprocess>
|
||||
<maven.compiler.source>1.6</maven.compiler.source>
|
||||
<maven.compiler.target>1.6</maven.compiler.target>
|
||||
</properties>
|
||||
|
||||
<developer>
|
||||
<name>Jim Idle</name>
|
||||
<email>jimi@idle.ws</email>
|
||||
<url>http://www.linkedin.com/in/jimidle</url>
|
||||
<roles>
|
||||
<role>Developer - Maven Plugin</role>
|
||||
</roles>
|
||||
</developer>
|
||||
<mailingLists>
|
||||
<mailingList>
|
||||
<name>antlr-discussion</name>
|
||||
<archive>https://groups.google.com/forum/?fromgroups#!forum/antlr-discussion</archive>
|
||||
</mailingList>
|
||||
</mailingLists>
|
||||
|
||||
</developers>
|
||||
<issueManagement>
|
||||
<system>GitHub Issues</system>
|
||||
<url>https://github.com/antlr/antlr4/issues</url>
|
||||
</issueManagement>
|
||||
|
||||
<modules>
|
||||
<module>runtime/Java</module>
|
||||
<module>tool</module>
|
||||
<module>antlr4-maven-plugin</module>
|
||||
</modules>
|
||||
<scm>
|
||||
<url>https://github.com/antlr/antlr4/tree/master</url>
|
||||
<connection>scm:git:git://github.com/antlr/antlr4.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:antlr/antlr4.git</developerConnection>
|
||||
<tag>HEAD</tag>
|
||||
</scm>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<java5.home>${env.JAVA5_HOME}</java5.home>
|
||||
<java6.home>${env.JAVA6_HOME}</java6.home>
|
||||
<bootclasspath.java5>${java5.home}/lib/rt.jar</bootclasspath.java5>
|
||||
<bootclasspath.java6>${java6.home}/lib/rt.jar</bootclasspath.java6>
|
||||
<bootclasspath.compile>${bootclasspath.java6}</bootclasspath.compile>
|
||||
<bootclasspath.testCompile>${bootclasspath.java6}</bootclasspath.testCompile>
|
||||
<antlr.testinprocess>true</antlr.testinprocess>
|
||||
</properties>
|
||||
|
||||
<mailingLists>
|
||||
<mailingList>
|
||||
<name>antlr-discussion</name>
|
||||
<archive>https://groups.google.com/forum/?fromgroups#!forum/antlr-discussion</archive>
|
||||
</mailingList>
|
||||
</mailingLists>
|
||||
|
||||
<issueManagement>
|
||||
<system>GitHub Issues</system>
|
||||
<url>https://github.com/antlr/antlr4/issues</url>
|
||||
</issueManagement>
|
||||
|
||||
<scm>
|
||||
<url>https://github.com/antlr/antlr4/tree/master</url>
|
||||
<connection>scm:git:git://github.com/antlr/antlr4.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:antlr/antlr4.git</developerConnection>
|
||||
</scm>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>sonatype-oss-release</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<compilerArguments>
|
||||
<bootclasspath>${bootclasspath.compile}</bootclasspath>
|
||||
</compilerArguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>default-testCompile</id>
|
||||
<configuration>
|
||||
<compilerArguments>
|
||||
<bootclasspath>${bootclasspath.testCompile}</bootclasspath>
|
||||
</compilerArguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.1</version>
|
||||
<configuration>
|
||||
<sourceDirectory>src</sourceDirectory>
|
||||
<showWarnings>true</showWarnings>
|
||||
<showDeprecation>true</showDeprecation>
|
||||
</configuration>
|
||||
|
||||
<executions>
|
||||
<execution>
|
||||
<id>default-compile</id>
|
||||
<configuration>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
<compilerArgument>-Xlint:-serial</compilerArgument>
|
||||
<compilerArguments>
|
||||
<Xlint/>
|
||||
</compilerArguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>default-testCompile</id>
|
||||
<configuration>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
<compilerArgument>-Xlint:-serial</compilerArgument>
|
||||
<compilerArguments>
|
||||
<Xlint/>
|
||||
</compilerArguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>2.4</version>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>2.15</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<!-- override the version inherited from the parent -->
|
||||
<version>2.2.1</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<!-- override the version inherited from the parent -->
|
||||
<version>2.9</version>
|
||||
<configuration>
|
||||
<quiet>true</quiet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<!-- override the version inherited from the parent -->
|
||||
<version>1.4</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<build>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>resources</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
<testSourceDirectory>test</testSourceDirectory>
|
||||
<testResources>
|
||||
<testResource>
|
||||
<directory>test</directory>
|
||||
</testResource>
|
||||
</testResources>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>${maven.compiler.source}</source>
|
||||
<target>${maven.compiler.target}</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# Runtime Test Suite
|
||||
|
||||
If you are tweaking the runtime test suite generator you can regenerate them using the following command:
|
||||
|
||||
```
|
||||
mvn -Pgen generate-test-sources
|
||||
```
|
||||
|
||||
This will generate the runtime test harness classes into the `test` directory where they can be checked in.
|
|
@ -0,0 +1,145 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-master</artifactId>
|
||||
<version>4.5.4-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>antlr4-runtime-testsuite</artifactId>
|
||||
<name>ANTLR 4 Runtime Test Generator</name>
|
||||
<description>A collection of tests for ANTLR 4 Runtime libraries.</description>
|
||||
|
||||
<prerequisites>
|
||||
<maven>3.0</maven>
|
||||
</prerequisites>
|
||||
|
||||
<inceptionYear>2009</inceptionYear>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>ST4</artifactId>
|
||||
<version>4.0.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-runtime</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.seleniumhq.selenium</groupId>
|
||||
<artifactId>selenium-java</artifactId>
|
||||
<version>2.46.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
<artifactId>jetty-server</artifactId>
|
||||
<version>8.1.16.v20140903</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<sourceDirectory>src</sourceDirectory>
|
||||
<testSourceDirectory>test</testSourceDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>resources</directory>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>../runtime</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>2.12.4</version>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>**/csharp/Test*.java</include>
|
||||
<include>**/java/Test*.java</include>
|
||||
<include>**/javascript/node/Test*.java</include>
|
||||
<include>**/python2/Test*.java</include>
|
||||
<include>**/python3/Test*.java</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>2.4</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>gen</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>1.4.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-test-sources</phase>
|
||||
<goals>
|
||||
<goal>java</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<mainClass>org.antlr.v4.testgen.TestGenerator</mainClass>
|
||||
<arguments>
|
||||
<argument>-root</argument>
|
||||
<argument>${basedir}</argument>
|
||||
<argument>-outdir</argument>
|
||||
<argument>${basedir}/test</argument>
|
||||
<argument>-templates</argument>
|
||||
<argument>${basedir}/resources/org/antlr/v4/test/runtime/templates</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>tests</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>**/Test*.java</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
|
@ -0,0 +1,51 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" DefaultTargets="Build" ToolsVersion="4.0">
|
||||
<PropertyGroup>
|
||||
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
|
||||
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
|
||||
<ProjectGuid>{EDC70A11-C4C1-4209-93A6-CCE2B19E8E95}</ProjectGuid>
|
||||
<OutputType>Exe</OutputType>
|
||||
<RootNamespace>Antlr4.Test.mono</RootNamespace>
|
||||
<AssemblyName>Test</AssemblyName>
|
||||
<StartupObject>Test</StartupObject>
|
||||
<TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
|
||||
<DebugSymbols>true</DebugSymbols>
|
||||
<DebugType>full</DebugType>
|
||||
<Optimize>false</Optimize>
|
||||
<OutputPath>bin\Debug</OutputPath>
|
||||
<DefineConstants>DEBUG;</DefineConstants>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
<Externalconsole>true</Externalconsole>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
|
||||
<Optimize>true</Optimize>
|
||||
<OutputPath>bin\Release</OutputPath>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
<Externalconsole>true</Externalconsole>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<Reference Include="System" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="Antlr4.Runtime.mono.csproj">
|
||||
<Project>{E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}</Project>
|
||||
<Name>Antlr4.Runtime.mono</Name>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Compile Include="AssemblyInfo.cs" />
|
||||
<Compile Include="Test.cs" />
|
||||
<Compile Include="L.cs" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
|
||||
<ItemGroup>
|
||||
<None Include="App.config">
|
||||
<LogicalName>Test.exe.config</LogicalName>
|
||||
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
|
||||
</None>
|
||||
</ItemGroup>
|
||||
</Project>
|
|
@ -0,0 +1,44 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?><Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" DefaultTargets="Build" ToolsVersion="4.0">
|
||||
<PropertyGroup>
|
||||
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
|
||||
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
|
||||
<ProjectGuid>{EDC70A11-C4C1-4209-93A6-CCE2B19E8E95}</ProjectGuid>
|
||||
<OutputType>Exe</OutputType>
|
||||
<RootNamespace>Antlr4.Test.mono</RootNamespace>
|
||||
<AssemblyName>Test</AssemblyName>
|
||||
<StartupObject>Test</StartupObject>
|
||||
<TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
|
||||
<DebugSymbols>true</DebugSymbols>
|
||||
<DebugType>full</DebugType>
|
||||
<Optimize>false</Optimize>
|
||||
<OutputPath>bin\Debug</OutputPath>
|
||||
<DefineConstants>DEBUG;</DefineConstants>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
<Externalconsole>true</Externalconsole>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
|
||||
<Optimize>true</Optimize>
|
||||
<OutputPath>bin\Release</OutputPath>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
<Externalconsole>true</Externalconsole>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<Reference Include="System"/>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Compile Include="AssemblyInfo.cs"/>
|
||||
<Compile Include="Test.cs"/>
|
||||
<Compile Include="L.cs"/>
|
||||
</ItemGroup>
|
||||
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets"/>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="Antlr4.Runtime.vs2013.csproj">
|
||||
<Project>{E1A46D9D-66CB-46E8-93B0-7FC87299ABEF}</Project>
|
||||
<Name>Antlr4.Runtime.vs2013</Name>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
</Project>
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<configuration>
|
||||
<startup>
|
||||
<supportedRuntime version="v2.0.50727"/>
|
||||
<supportedRuntime version="v4.0"/>
|
||||
</startup>
|
||||
</configuration>
|
|
@ -0,0 +1,28 @@
|
|||
using System;
|
||||
using System.Reflection;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
// Information about this assembly is defined by the following attributes.
|
||||
// Change them to the values specific to your project.
|
||||
|
||||
[assembly: AssemblyTitle ("Antlr4.Test.mono")]
|
||||
[assembly: AssemblyDescription ("")]
|
||||
[assembly: AssemblyConfiguration ("")]
|
||||
[assembly: AssemblyCompany ("")]
|
||||
[assembly: AssemblyProduct ("")]
|
||||
[assembly: AssemblyCopyright ("ericvergnaud")]
|
||||
[assembly: AssemblyTrademark ("")]
|
||||
[assembly: AssemblyCulture ("")]
|
||||
[assembly: CLSCompliant (true)]
|
||||
// The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}".
|
||||
// The form "{Major}.{Minor}.*" will automatically update the build and revision,
|
||||
// and "{Major}.{Minor}.{Build}.*" will update just the revision.
|
||||
|
||||
[assembly: AssemblyVersion ("1.0.*")]
|
||||
|
||||
// The following attributes are used to specify the signing key for the assembly,
|
||||
// if desired. See the Mono documentation for more information about signing.
|
||||
|
||||
//[assembly: AssemblyDelaySign(false)]
|
||||
//[assembly: AssemblyKeyFile("")]
|
||||
|
|
@ -0,0 +1,423 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.csharp;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.Ignore;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer", "<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser", "<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<Console.WriteLine(<s>);>>
|
||||
|
||||
write(s) ::= <<Console.Write(<s>);>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= <<Debug.Assert(<s>);>>
|
||||
|
||||
Cast(t,v) ::= "((<t>)<v>)"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "Object <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= "System.Collections.IList __ttt__ = <v>;" // just use static type system
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%int <n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%bool <n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> == <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> == <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.DumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "List\<String>"
|
||||
|
||||
BuildParseTrees() ::= "this.BuildParseTree = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%ErrorHandler = new BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.ToStringTree(this)%>
|
||||
|
||||
Column() ::= "this.Column"
|
||||
|
||||
Text() ::= "this.Text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>==<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.Text.Equals("<a>")%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.Text%>
|
||||
|
||||
InputText() ::= "this.TokenStream.GetText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this.TokenStream.Lt(<i>).Text.Equals(<v>)%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this.InputStream.La(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this.TokenStartColumn==<i>%>
|
||||
|
||||
ImportListener(X) ::= ""
|
||||
|
||||
GetExpectedTokenNames() ::= "this.GetExpectedTokens().ToString(this.Vocabulary)"
|
||||
|
||||
RuleInvocationStack() ::= "GetRuleInvocationStackAsString()"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<Interpreter.PredictionMode = PredictionMode.LlExactAmbigDetection;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
bool Property() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
public override IToken NextToken() {
|
||||
if (!(Interpreter is PositionAdjustingLexerATNSimulator)) {
|
||||
Interpreter = new PositionAdjustingLexerATNSimulator(this, _ATN);
|
||||
}
|
||||
|
||||
return base.NextToken();
|
||||
}
|
||||
|
||||
public override IToken Emit() {
|
||||
switch (Type) {
|
||||
case TOKENS:
|
||||
HandleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
|
||||
case LABEL:
|
||||
HandleAcceptPositionForIdentifier();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return base.Emit();
|
||||
}
|
||||
|
||||
private bool HandleAcceptPositionForIdentifier() {
|
||||
string tokenText = this.Text;
|
||||
int identifierLength = 0;
|
||||
while (identifierLength \< tokenText.Length && IsIdentifierChar(tokenText[identifierLength])) {
|
||||
identifierLength++;
|
||||
}
|
||||
|
||||
if (InputStream.Index > TokenStartCharIndex + identifierLength) {
|
||||
int offset = identifierLength - 1;
|
||||
getInterpreter().ResetAcceptPosition((ICharStream)InputStream, TokenStartCharIndex + offset, TokenStartLine, TokenStartColumn + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private bool HandleAcceptPositionForKeyword(string keyword) {
|
||||
if (InputStream.Index > TokenStartCharIndex + keyword.Length) {
|
||||
int offset = keyword.Length - 1;
|
||||
getInterpreter().ResetAcceptPosition((ICharStream)InputStream, TokenStartCharIndex + offset, TokenStartLine, TokenStartColumn + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public PositionAdjustingLexerATNSimulator getInterpreter() {
|
||||
return (PositionAdjustingLexerATNSimulator)base.Interpreter;
|
||||
}
|
||||
|
||||
private static bool IsIdentifierChar(char c) {
|
||||
return Char.IsLetterOrDigit(c) || c == '_';
|
||||
}
|
||||
|
||||
public class PositionAdjustingLexerATNSimulator : LexerATNSimulator {
|
||||
|
||||
public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn)
|
||||
: base(recog, atn)
|
||||
{
|
||||
}
|
||||
|
||||
public void ResetAcceptPosition(ICharStream input, int index, int line, int column) {
|
||||
input.Seek(index);
|
||||
this.Line = line;
|
||||
this.Column = column;
|
||||
Consume(input);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
public class LeafListener : TBaseListener {
|
||||
public override void VisitTerminal(ITerminalNode node) {
|
||||
Console.WriteLine(node.Symbol.Text);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
ParseTreeWalker walker = new ParseTreeWalker();
|
||||
walker.Walk(new LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
@parser::members {
|
||||
public class MyRuleNode : ParserRuleContext {
|
||||
public int altNum;
|
||||
public MyRuleNode(ParserRuleContext parent, int invokingStateNumber): base(parent, invokingStateNumber)
|
||||
{
|
||||
}
|
||||
public override int getAltNumber() { return altNum; }
|
||||
public override void setAltNumber(int altNum) { this.altNum = altNum; }
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
public class LeafListener : TBaseListener {
|
||||
public override void ExitA(TParser.AContext ctx) {
|
||||
if (ctx.ChildCount==2)
|
||||
{
|
||||
StringBuilder sb = new StringBuilder ("[");
|
||||
foreach (ITerminalNode node in ctx.INT ()) {
|
||||
sb.Append (node.ToString ());
|
||||
sb.Append (", ");
|
||||
}
|
||||
sb.Length = sb.Length - 2;
|
||||
sb.Append ("]");
|
||||
Console.Write ("{0} {1} {2}", ctx.INT (0).Symbol.Text,
|
||||
ctx.INT (1).Symbol.Text, sb.ToString());
|
||||
}
|
||||
else
|
||||
Console.WriteLine(ctx.ID().Symbol);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
public class LeafListener : TBaseListener {
|
||||
public override void ExitA(TParser.AContext ctx) {
|
||||
if (ctx.ChildCount==2) {
|
||||
Console.Write("{0} {1} {2}",ctx.b(0).Start.Text,
|
||||
ctx.b(1).Start.Text,ctx.b()[0].Start.Text);
|
||||
} else
|
||||
Console.WriteLine(ctx.b(0).Start.Text);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
public class LeafListener : TBaseListener {
|
||||
public override void ExitE(TParser.EContext ctx) {
|
||||
if (ctx.ChildCount==3) {
|
||||
Console.Write("{0} {1} {2}\n",ctx.e(0).Start.Text,
|
||||
ctx.e(1).Start.Text, ctx.e()[0].Start.Text);
|
||||
} else
|
||||
Console.WriteLine(ctx.INT().Symbol.Text);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
public class LeafListener : TBaseListener {
|
||||
public override void ExitCall(TParser.CallContext ctx) {
|
||||
Console.Write("{0} {1}",ctx.e().Start.Text,ctx.eList());
|
||||
}
|
||||
public override void ExitInt(TParser.IntContext ctx) {
|
||||
Console.WriteLine(ctx.INT().Symbol.Text);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
void foo() {
|
||||
SContext s = null;
|
||||
AContext[] a = s.a();
|
||||
BContext[] b = s.b();
|
||||
}
|
||||
>>
|
||||
|
||||
Declare_foo() ::= <<public void foo() {Console.WriteLine("foo");}>>
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<bool pred(bool v) {
|
||||
Console.WriteLine("eval="+v.ToString().ToLower());
|
||||
return v;
|
||||
}
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,428 @@
|
|||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.java;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<if(test.AfterGrammar)>
|
||||
<test.AfterGrammar>
|
||||
<endif>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<test.AfterGrammar>
|
||||
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer", "<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser", "<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Timeout"))>
|
||||
(timeout = <test.Options.("Timeout")>)
|
||||
<endif>
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<System.out.println(<s>);>>
|
||||
|
||||
write(s) ::= <<System.out.print(<s>);>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= <<assert(<s>);>>
|
||||
|
||||
Cast(t,v) ::= "((<t>)<v>)"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "Object <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= "List\<?> __ttt__ = <v>;" // just use static type system
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%int <n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%boolean <n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> == <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> == <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "List\<String>"
|
||||
|
||||
BuildParseTrees() ::= "setBuildParseTree(true);"
|
||||
|
||||
BailErrorStrategy() ::= <%setErrorHandler(new BailErrorStrategy());%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(this)%>
|
||||
|
||||
Column() ::= "this.getCharPositionInLine()"
|
||||
|
||||
Text() ::= "this.getText()"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>==<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.getText().equals("<a>")%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.getText()%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).getText().equals(<v>)%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartCharPositionInLine==<i>%>
|
||||
|
||||
ImportListener(X) ::= ""
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.tokenNames)"
|
||||
|
||||
RuleInvocationStack() ::= "getRuleInvocationStack()"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<_interp.setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
boolean Property() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
@Override
|
||||
public Token nextToken() {
|
||||
if (!(_interp instanceof PositionAdjustingLexerATNSimulator)) {
|
||||
_interp = new PositionAdjustingLexerATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache);
|
||||
}
|
||||
|
||||
return super.nextToken();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Token emit() {
|
||||
switch (_type) {
|
||||
case TOKENS:
|
||||
handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
|
||||
case LABEL:
|
||||
handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return super.emit();
|
||||
}
|
||||
|
||||
private boolean handleAcceptPositionForIdentifier() {
|
||||
String tokenText = getText();
|
||||
int identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length() && isIdentifierChar(tokenText.charAt(identifierLength))) {
|
||||
identifierLength++;
|
||||
}
|
||||
|
||||
if (getInputStream().index() > _tokenStartCharIndex + identifierLength) {
|
||||
int offset = identifierLength - 1;
|
||||
getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean handleAcceptPositionForKeyword(String keyword) {
|
||||
if (getInputStream().index() > _tokenStartCharIndex + keyword.length()) {
|
||||
int offset = keyword.length() - 1;
|
||||
getInterpreter().resetAcceptPosition(getInputStream(), _tokenStartCharIndex + offset, _tokenStartLine, _tokenStartCharPositionInLine + offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PositionAdjustingLexerATNSimulator getInterpreter() {
|
||||
return (PositionAdjustingLexerATNSimulator)super.getInterpreter();
|
||||
}
|
||||
|
||||
private static boolean isIdentifierChar(char c) {
|
||||
return Character.isLetterOrDigit(c) || c == '_';
|
||||
}
|
||||
|
||||
protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimulator {
|
||||
|
||||
public PositionAdjustingLexerATNSimulator(Lexer recog, ATN atn,
|
||||
DFA[] decisionToDFA,
|
||||
PredictionContextCache sharedContextCache)
|
||||
{
|
||||
super(recog, atn, decisionToDFA, sharedContextCache);
|
||||
}
|
||||
|
||||
protected void resetAcceptPosition(CharStream input, int index, int line, int charPositionInLine) {
|
||||
input.seek(index);
|
||||
this.line = line;
|
||||
this.charPositionInLine = charPositionInLine;
|
||||
consume(input);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
public static class LeafListener extends TBaseListener {
|
||||
public void visitTerminal(TerminalNode node) {
|
||||
System.out.println(node.getSymbol().getText());
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
ParseTreeWalker walker = new ParseTreeWalker();
|
||||
walker.walk(new LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
@parser::members {
|
||||
public static class MyRuleNode extends ParserRuleContext {
|
||||
public int altNum;
|
||||
public MyRuleNode(ParserRuleContext parent, int invokingStateNumber) {
|
||||
super(parent, invokingStateNumber);
|
||||
}
|
||||
@Override public int getAltNumber() { return altNum; }
|
||||
@Override public void setAltNumber(int altNum) { this.altNum = altNum; }
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
public static class LeafListener extends TBaseListener {
|
||||
public void exitA(TParser.AContext ctx) {
|
||||
if (ctx.getChildCount()==2)
|
||||
System.out.printf("%s %s %s",ctx.INT(0).getSymbol().getText(),
|
||||
ctx.INT(1).getSymbol().getText(),ctx.INT());
|
||||
else
|
||||
System.out.println(ctx.ID().getSymbol());
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
public static class LeafListener extends TBaseListener {
|
||||
public void exitA(TParser.AContext ctx) {
|
||||
if (ctx.getChildCount()==2) {
|
||||
System.out.printf("%s %s %s",ctx.b(0).start.getText(),
|
||||
ctx.b(1).start.getText(),ctx.b().get(0).start.getText());
|
||||
} else
|
||||
System.out.println(ctx.b(0).start.getText());
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
public static class LeafListener extends TBaseListener {
|
||||
public void exitE(TParser.EContext ctx) {
|
||||
if (ctx.getChildCount()==3) {
|
||||
System.out.printf("%s %s %s\n",ctx.e(0).start.getText(),
|
||||
ctx.e(1).start.getText(), ctx.e().get(0).start.getText());
|
||||
} else
|
||||
System.out.println(ctx.INT().getSymbol().getText());
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
public static class LeafListener extends TBaseListener {
|
||||
public void exitCall(TParser.CallContext ctx) {
|
||||
System.out.printf("%s %s",ctx.e().start.getText(),ctx.eList());
|
||||
}
|
||||
public void exitInt(TParser.IntContext ctx) {
|
||||
System.out.println(ctx.INT().getSymbol().getText());
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
void foo() {
|
||||
SContext s = null;
|
||||
List\<? extends AContext> a = s.a();
|
||||
List\<? extends BContext> b = s.b();
|
||||
}
|
||||
>>
|
||||
|
||||
Declare_foo() ::= <<
|
||||
public void foo() {System.out.println("foo");}
|
||||
>>
|
||||
|
||||
Invoke_foo() ::= "foo();"
|
||||
|
||||
Declare_pred() ::= <<boolean pred(boolean v) {
|
||||
System.out.println("eval="+v);
|
||||
return v;
|
||||
}
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,434 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.javascript.chrome;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
|
||||
"<grammar>Listener", "<grammar>Visitor",
|
||||
"<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser",
|
||||
"<test.grammar.grammarName>Listener", "<test.grammar.grammarName>Visitor",
|
||||
"<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<document.getElementById('output').value += <s> + '\\n';>>
|
||||
|
||||
write(s) ::= <<document.getElementById('output').value += <s>;>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "var <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= <<if ( !(v instanceof Array) ) {throw "value is not an array";}>>
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> === <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> === <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "list"
|
||||
|
||||
BuildParseTrees() ::= "this.buildParseTrees = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(null, this)%>
|
||||
|
||||
Column() ::= "this.column"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>===<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text==="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).text===<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===<i>%>
|
||||
|
||||
ImportListener(X) ::= <<var <X>Listener = require('./<X>Listener').<X>Listener;>>
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
|
||||
|
||||
RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
this.Property = function() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) {
|
||||
this._input.seek(index);
|
||||
this.line = line;
|
||||
this.column = column;
|
||||
this._interp.consume(this._input);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.nextToken = function() {
|
||||
if (!("resetAcceptPosition" in this._interp)) {
|
||||
var lexer = this;
|
||||
this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); };
|
||||
}
|
||||
return antlr4.Lexer.prototype.nextToken.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.emit = function() {
|
||||
switch(this._type) {
|
||||
case PositionAdjustingLexer.TOKENS:
|
||||
this.handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
case PositionAdjustingLexer.LABEL:
|
||||
this.handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
}
|
||||
return antlr4.Lexer.prototype.emit.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() {
|
||||
var tokenText = this.text;
|
||||
var identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength])
|
||||
) {
|
||||
identifierLength += 1;
|
||||
}
|
||||
if (this._input.index > this._tokenStartCharIndex + identifierLength) {
|
||||
var offset = identifierLength - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) {
|
||||
if (this._input.index > this._tokenStartCharIndex + keyword.length) {
|
||||
var offset = keyword.length - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.isIdentifierChar = function(c) {
|
||||
return c.match(/^[0-9a-zA-Z_]+$/);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.visitTerminal = function(node) {
|
||||
document.getElementById('output').value += node.symbol.text + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
var walker = new antlr4.tree.ParseTreeWalker();
|
||||
walker.walk(new this.LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
|
||||
@parser::header {
|
||||
MyRuleNode = function(parent, invokingState) {
|
||||
antlr4.ParserRuleContext.call(this, parent, invokingState);
|
||||
this.altNum = 0;
|
||||
return this;
|
||||
};
|
||||
|
||||
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
|
||||
MyRuleNode.prototype.constructor = MyRuleNode;
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());
|
||||
} else {
|
||||
str = ctx.ID().symbol.toString();
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;
|
||||
} else {
|
||||
str = ctx.b(0).start.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitE = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===3) {
|
||||
str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;
|
||||
} else {
|
||||
str = ctx.INT().symbol.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitCall = function(ctx) {
|
||||
var str = ctx.e().start.text + ' ' + ctx.eList();
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
this.exitInt = function(ctx) {
|
||||
var str = ctx.INT().symbol.text;
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
function foo() {
|
||||
var s = new SContext();
|
||||
var a = s.a();
|
||||
var b = s.b();
|
||||
};
|
||||
>>
|
||||
|
||||
Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};"
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<this.pred = function(v) {
|
||||
document.getElementById('output').value += 'eval=' + v.toString() + '\\n';
|
||||
return v;
|
||||
};
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,434 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.javascript.explorer;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
|
||||
"<grammar>Listener", "<grammar>Visitor",
|
||||
"<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser",
|
||||
"<test.grammar.grammarName>Listener", "<test.grammar.grammarName>Visitor",
|
||||
"<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<document.getElementById('output').value += <s> + '\\n';>>
|
||||
|
||||
write(s) ::= <<document.getElementById('output').value += <s>;>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "var <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= <<if ( !(v instanceof Array) ) {throw "value is not an array";}>>
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> === <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> === <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "list"
|
||||
|
||||
BuildParseTrees() ::= "this.buildParseTrees = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(null, this)%>
|
||||
|
||||
Column() ::= "this.column"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>===<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text==="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).text===<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===<i>%>
|
||||
|
||||
ImportListener(X) ::= <<var <X>Listener = require('./<X>Listener').<X>Listener;>>
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
|
||||
|
||||
RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
this.Property = function() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) {
|
||||
this._input.seek(index);
|
||||
this.line = line;
|
||||
this.column = column;
|
||||
this._interp.consume(this._input);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.nextToken = function() {
|
||||
if (!("resetAcceptPosition" in this._interp)) {
|
||||
var lexer = this;
|
||||
this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); };
|
||||
}
|
||||
return antlr4.Lexer.prototype.nextToken.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.emit = function() {
|
||||
switch(this._type) {
|
||||
case PositionAdjustingLexer.TOKENS:
|
||||
this.handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
case PositionAdjustingLexer.LABEL:
|
||||
this.handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
}
|
||||
return antlr4.Lexer.prototype.emit.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() {
|
||||
var tokenText = this.text;
|
||||
var identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength])
|
||||
) {
|
||||
identifierLength += 1;
|
||||
}
|
||||
if (this._input.index > this._tokenStartCharIndex + identifierLength) {
|
||||
var offset = identifierLength - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) {
|
||||
if (this._input.index > this._tokenStartCharIndex + keyword.length) {
|
||||
var offset = keyword.length - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.isIdentifierChar = function(c) {
|
||||
return c.match(/^[0-9a-zA-Z_]+$/);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.visitTerminal = function(node) {
|
||||
document.getElementById('output').value += node.symbol.text + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
var walker = new antlr4.tree.ParseTreeWalker();
|
||||
walker.walk(new this.LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
|
||||
@parser::header {
|
||||
MyRuleNode = function(parent, invokingState) {
|
||||
antlr4.ParserRuleContext.call(this, parent, invokingState);
|
||||
this.altNum = 0;
|
||||
return this;
|
||||
};
|
||||
|
||||
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
|
||||
MyRuleNode.prototype.constructor = MyRuleNode;
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());
|
||||
} else {
|
||||
str = ctx.ID().symbol.toString();
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;
|
||||
} else {
|
||||
str = ctx.b(0).start.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitE = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===3) {
|
||||
str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;
|
||||
} else {
|
||||
str = ctx.INT().symbol.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitCall = function(ctx) {
|
||||
var str = ctx.e().start.text + ' ' + ctx.eList();
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
this.exitInt = function(ctx) {
|
||||
var str = ctx.INT().symbol.text;
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
function foo() {
|
||||
var s = new SContext();
|
||||
var a = s.a();
|
||||
var b = s.b();
|
||||
};
|
||||
>>
|
||||
|
||||
Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};"
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<this.pred = function(v) {
|
||||
document.getElementById('output').value += 'eval=' + v.toString() + '\\n';
|
||||
return v;
|
||||
};
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,436 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.javascript.firefox;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
|
||||
"<grammar>Listener", "<grammar>Visitor",
|
||||
"<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser",
|
||||
"<test.grammar.grammarName>Listener", "<test.grammar.grammarName>Visitor",
|
||||
"<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<document.getElementById('output').value += <s> + '\\n';>>
|
||||
|
||||
write(s) ::= <<document.getElementById('output').value += <s>;>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "var <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= <<if ( !(v instanceof Array) ) {throw "value is not an array"}>>
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> === <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> === <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
CheckVectorContext(s,v) ::= "<s> = [].concat(<v>);"
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "list"
|
||||
|
||||
BuildParseTrees() ::= "this.buildParseTrees = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(null, this)%>
|
||||
|
||||
Column() ::= "this.column"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>===<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text==="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).text===<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===<i>%>
|
||||
|
||||
ImportListener(X) ::= <<var <X>Listener = require('./<X>Listener').<X>Listener;>>
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
|
||||
|
||||
RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
this.Property = function() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) {
|
||||
this._input.seek(index);
|
||||
this.line = line;
|
||||
this.column = column;
|
||||
this._interp.consume(this._input);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.nextToken = function() {
|
||||
if (!("resetAcceptPosition" in this._interp)) {
|
||||
var lexer = this;
|
||||
this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); };
|
||||
}
|
||||
return antlr4.Lexer.prototype.nextToken.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.emit = function() {
|
||||
switch(this._type) {
|
||||
case PositionAdjustingLexer.TOKENS:
|
||||
this.handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
case PositionAdjustingLexer.LABEL:
|
||||
this.handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
}
|
||||
return antlr4.Lexer.prototype.emit.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() {
|
||||
var tokenText = this.text;
|
||||
var identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength])
|
||||
) {
|
||||
identifierLength += 1;
|
||||
}
|
||||
if (this._input.index > this._tokenStartCharIndex + identifierLength) {
|
||||
var offset = identifierLength - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) {
|
||||
if (this._input.index > this._tokenStartCharIndex + keyword.length) {
|
||||
var offset = keyword.length - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.isIdentifierChar = function(c) {
|
||||
return c.match(/^[0-9a-zA-Z_]+$/);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.visitTerminal = function(node) {
|
||||
document.getElementById('output').value += node.symbol.text + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
var walker = new antlr4.tree.ParseTreeWalker();
|
||||
walker.walk(new this.LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
|
||||
@parser::header {
|
||||
MyRuleNode = function(parent, invokingState) {
|
||||
antlr4.ParserRuleContext.call(this, parent, invokingState);
|
||||
this.altNum = 0;
|
||||
return this;
|
||||
};
|
||||
|
||||
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
|
||||
MyRuleNode.prototype.constructor = MyRuleNode;
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());
|
||||
} else {
|
||||
str = ctx.ID().symbol.toString();
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;
|
||||
} else {
|
||||
str = ctx.b(0).start.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitE = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===3) {
|
||||
str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;
|
||||
} else {
|
||||
str = ctx.INT().symbol.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitCall = function(ctx) {
|
||||
var str = ctx.e().start.text + ' ' + ctx.eList();
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
this.exitInt = function(ctx) {
|
||||
var str = ctx.INT().symbol.text;
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
function foo() {
|
||||
var s = new SContext();
|
||||
var a = s.a();
|
||||
var b = s.b();
|
||||
};
|
||||
>>
|
||||
|
||||
Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};"
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<this.pred = function(v) {
|
||||
document.getElementById('output').value += 'eval=' + v.toString() + '\\n';
|
||||
return v;
|
||||
};
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,438 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.javascript.node;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
|
||||
"<grammar>Listener", "<grammar>Visitor",
|
||||
"<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser",
|
||||
"<test.grammar.grammarName>Listener", "<test.grammar.grammarName>Visitor",
|
||||
"<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<console.log(<s>);>>
|
||||
|
||||
write(s) ::= <<process.stdout.write(<s>);>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= <<console.assert(<s>);>>
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "var <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= <<if ( !(v instanceof Array) ) {throw "value is not an array";}>>
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> === <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> === <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "list"
|
||||
|
||||
BuildParseTrees() ::= "this.buildParseTrees = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(null, this)%>
|
||||
|
||||
Column() ::= "this.column"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>===<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text==="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).text===<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===<i>%>
|
||||
|
||||
ImportListener(X) ::= <<var <X>Listener = require('./<X>Listener').<X>Listener;>>
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
|
||||
|
||||
RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
this.Property = function() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) {
|
||||
this._input.seek(index);
|
||||
this.line = line;
|
||||
this.column = column;
|
||||
this._interp.consume(this._input);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.nextToken = function() {
|
||||
if (!("resetAcceptPosition" in this._interp)) {
|
||||
var lexer = this;
|
||||
this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); };
|
||||
}
|
||||
return antlr4.Lexer.prototype.nextToken.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.emit = function() {
|
||||
switch(this._type) {
|
||||
case PositionAdjustingLexer.TOKENS:
|
||||
this.handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
case PositionAdjustingLexer.LABEL:
|
||||
this.handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
}
|
||||
return antlr4.Lexer.prototype.emit.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() {
|
||||
var tokenText = this.text;
|
||||
var identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength])
|
||||
) {
|
||||
identifierLength += 1;
|
||||
}
|
||||
if (this._input.index > this._tokenStartCharIndex + identifierLength) {
|
||||
var offset = identifierLength - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) {
|
||||
if (this._input.index > this._tokenStartCharIndex + keyword.length) {
|
||||
var offset = keyword.length - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.isIdentifierChar = function(c) {
|
||||
return c.match(/^[0-9a-zA-Z_]+$/);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.visitTerminal = function(node) {
|
||||
console.log(node.symbol.text);
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
var walker = new antlr4.tree.ParseTreeWalker();
|
||||
walker.walk(new this.LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
|
||||
@parser::header {
|
||||
MyRuleNode = function(parent, invokingState) {
|
||||
antlr4.ParserRuleContext.call(this, parent, invokingState);
|
||||
|
||||
this.altNum = 0;
|
||||
return this;
|
||||
};
|
||||
|
||||
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
|
||||
MyRuleNode.prototype.constructor = MyRuleNode;
|
||||
MyRuleNode.prototype.getAltNumber = function() { return this.altNum; }
|
||||
MyRuleNode.prototype.setAltNumber = function(altNumber) { this.altNum = altNumber; }
|
||||
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());
|
||||
} else {
|
||||
str = ctx.ID().symbol.toString();
|
||||
}
|
||||
console.log(str);
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;
|
||||
} else {
|
||||
str = ctx.b(0).start.text;
|
||||
}
|
||||
console.log(str);
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitE = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===3) {
|
||||
str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;
|
||||
} else {
|
||||
str = ctx.INT().symbol.text;
|
||||
}
|
||||
console.log(str);
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitCall = function(ctx) {
|
||||
var str = ctx.e().start.text + ' ' + ctx.eList();
|
||||
console.log(str);
|
||||
};
|
||||
this.exitInt = function(ctx) {
|
||||
var str = ctx.INT().symbol.text;
|
||||
console.log(str);
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
function foo() {
|
||||
var s = new SContext();
|
||||
var a = s.a();
|
||||
var b = s.b();
|
||||
};
|
||||
>>
|
||||
|
||||
Declare_foo() ::= "this.foo = function() {console.log('foo');};"
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<this.pred = function(v) {
|
||||
console.log("eval=" + v.toString());
|
||||
return v;
|
||||
};
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,435 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.javascript.safari;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BaseTest {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}<\n>
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
<test.afterGrammar>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar>Parser", "<grammar>Lexer",
|
||||
"<grammar>Listener", "<grammar>Visitor",
|
||||
"<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser",
|
||||
"<test.grammar.grammarName>Listener", "<test.grammar.grammarName>Visitor",
|
||||
"<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<document.getElementById('output').value += <s> + '\\n';>>
|
||||
|
||||
write(s) ::= <<document.getElementById('output').value += <s>;>>
|
||||
|
||||
False() ::= "false"
|
||||
|
||||
True() ::= "true"
|
||||
|
||||
Not(v) ::= "!<v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + <b>"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "var <s> = <v>;"
|
||||
|
||||
AssertIsList(v) ::= <<if ( !(v instanceof Array) ) {throw "value is not an array";}>>
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>;"
|
||||
|
||||
InitIntMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
GetMember(n) ::= <%this.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%this.<n> = <v>;%>
|
||||
|
||||
AddMember(n,v) ::= <%this.<n> += <v>;%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + this.<n>%>
|
||||
|
||||
MemberEquals(n,v) ::= <%this.<n> === <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%this.<n> % <m> === <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%this.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "this.dumpDFA();"
|
||||
|
||||
Pass() ::= ""
|
||||
|
||||
StringList() ::= "list"
|
||||
|
||||
BuildParseTrees() ::= "this.buildParseTrees = true;"
|
||||
|
||||
BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(null, this)%>
|
||||
|
||||
Column() ::= "this.column"
|
||||
|
||||
Text() ::= "this.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>===<b>%>
|
||||
|
||||
TextEquals(a) ::= <%this.text==="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + this.text%>
|
||||
|
||||
InputText() ::= "this._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%this._input.LT(<i>).text===<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%this._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===<i>%>
|
||||
|
||||
ImportListener(X) ::= <<var <X>Listener = require('./<X>Listener').<X>Listener;>>
|
||||
|
||||
GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)"
|
||||
|
||||
RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<this._interp.predictionMode = antlr4.atn.PredictionMode.LL_EXACT_AMBIG_DETECTION;>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
this.Property = function() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) {
|
||||
this._input.seek(index);
|
||||
this.line = line;
|
||||
this.column = column;
|
||||
this._interp.consume(this._input);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.nextToken = function() {
|
||||
if (!("resetAcceptPosition" in this._interp)) {
|
||||
var lexer = this;
|
||||
this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); };
|
||||
}
|
||||
return antlr4.Lexer.prototype.nextToken.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.emit = function() {
|
||||
switch(this._type) {
|
||||
case PositionAdjustingLexer.TOKENS:
|
||||
this.handleAcceptPositionForKeyword("tokens");
|
||||
break;
|
||||
case PositionAdjustingLexer.LABEL:
|
||||
this.handleAcceptPositionForIdentifier();
|
||||
break;
|
||||
}
|
||||
return antlr4.Lexer.prototype.emit.call(this);
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() {
|
||||
var tokenText = this.text;
|
||||
var identifierLength = 0;
|
||||
while (identifierLength \< tokenText.length &&
|
||||
PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength])
|
||||
) {
|
||||
identifierLength += 1;
|
||||
}
|
||||
if (this._input.index > this._tokenStartCharIndex + identifierLength) {
|
||||
var offset = identifierLength - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) {
|
||||
if (this._input.index > this._tokenStartCharIndex + keyword.length) {
|
||||
var offset = keyword.length - 1;
|
||||
this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset,
|
||||
this._tokenStartLine, this._tokenStartColumn + offset);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
PositionAdjustingLexer.isIdentifierChar = function(c) {
|
||||
return c.match(/^[0-9a-zA-Z_]+$/);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.visitTerminal = function(node) {
|
||||
document.getElementById('output').value += node.symbol.text + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
var walker = new antlr4.tree.ParseTreeWalker();
|
||||
walker.walk(new this.LeafListener(), <s>);
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
|
||||
@parser::header {
|
||||
MyRuleNode = function(parent, invokingState) {
|
||||
antlr4.ParserRuleContext.call(this, parent, invokingState);
|
||||
this.altNum = 0;
|
||||
return this;
|
||||
};
|
||||
|
||||
MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype);
|
||||
MyRuleNode.prototype.constructor = MyRuleNode;
|
||||
}
|
||||
>>
|
||||
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT());
|
||||
} else {
|
||||
str = ctx.ID().symbol.toString();
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitA = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===2) {
|
||||
str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text;
|
||||
} else {
|
||||
str = ctx.b(0).start.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitE = function(ctx) {
|
||||
var str;
|
||||
if(ctx.getChildCount()===3) {
|
||||
str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text;
|
||||
} else {
|
||||
str = ctx.INT().symbol.text;
|
||||
}
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
this.LeafListener = function() {
|
||||
this.exitCall = function(ctx) {
|
||||
var str = ctx.e().start.text + ' ' + ctx.eList();
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
this.exitInt = function(ctx) {
|
||||
var str = ctx.INT().symbol.text;
|
||||
document.getElementById('output').value += str + '\\n';
|
||||
};
|
||||
return this;
|
||||
};
|
||||
this.LeafListener.prototype = Object.create(<X>Listener.prototype);
|
||||
this.LeafListener.prototype.constructor = this.LeafListener;
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
function foo() {
|
||||
var s = new SContext();
|
||||
var a = s.a();
|
||||
var b = s.b();
|
||||
};
|
||||
>>
|
||||
|
||||
Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};"
|
||||
|
||||
Invoke_foo() ::= "this.foo();"
|
||||
|
||||
Declare_pred() ::= <<this.pred = function(v) {
|
||||
document.getElementById('output').value += 'eval=' + v.toString() + '\\n';
|
||||
return v;
|
||||
};
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<this.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,412 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.python2;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BasePython2Test {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<if(test.AfterGrammar)>
|
||||
<test.AfterGrammar>
|
||||
<endif>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<test.AfterGrammar>
|
||||
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar><if(!test.slaveIsLexer)>Parser<endif>", "<if(test.slaveIsLexer)><first(test.slaveGrammars).grammarName><else><grammar>Lexer<endif>", "<grammar>Listener", "<grammar>Visitor", "<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser", "<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<print(<s>)>>
|
||||
|
||||
write(s) ::= <<print(<s>,end='')>>
|
||||
|
||||
False() ::= "False"
|
||||
|
||||
True() ::= "True"
|
||||
|
||||
Not(v) ::= "not <v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + str(<b>)"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "<s> = <v>"
|
||||
|
||||
AssertIsList(v) ::= "assert isinstance(v, (list, tuple))"
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>"
|
||||
|
||||
InitIntMember(n,v) ::= <%<n> = <v>%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%<n> = <v>%>
|
||||
|
||||
GetMember(n) ::= <%self.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%self.<n> = <v>%>
|
||||
|
||||
AddMember(n,v) ::= <%self.<n> += <v>%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + str(self.<n>)%>
|
||||
|
||||
MemberEquals(n,v) ::= <%self.<n> == <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%self.<n> % <m> == <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%self.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "self.dumpDFA()"
|
||||
|
||||
Pass() ::= "pass"
|
||||
|
||||
StringList() ::= ""
|
||||
|
||||
BuildParseTrees() ::= "self._buildParseTrees = True"
|
||||
|
||||
BailErrorStrategy() ::= <%self._errHandler = BailErrorStrategy()%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(recog=self)%>
|
||||
|
||||
Column() ::= "self.column"
|
||||
|
||||
Text() ::= "self.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>==<b>%>
|
||||
|
||||
TextEquals(a) ::= <%self.text=="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + self.text%>
|
||||
|
||||
InputText() ::= "self._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%self._input.LT(<i>).text==<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%self._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%self._tokenStartColumn==<i>%>
|
||||
|
||||
ImportListener(X) ::= ""
|
||||
|
||||
GetExpectedTokenNames() ::= "self.getExpectedTokens().toString(self.literalNames, self.symbolicNames)"
|
||||
|
||||
RuleInvocationStack() ::= "str_list(self.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<self._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
def Property(self):
|
||||
return True
|
||||
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
def resetAcceptPosition(self, index, line, column):
|
||||
self._input.seek(index)
|
||||
self.line = line
|
||||
self.column = column
|
||||
self._interp.consume(self._input)
|
||||
|
||||
def nextToken(self):
|
||||
if self._interp.__dict__.get("resetAcceptPosition", None) is None:
|
||||
self._interp.__dict__["resetAcceptPosition"] = self.resetAcceptPosition
|
||||
return super(type(self),self).nextToken()
|
||||
|
||||
def emit(self):
|
||||
if self._type==PositionAdjustingLexer.TOKENS:
|
||||
self.handleAcceptPositionForKeyword("tokens")
|
||||
elif self._type==PositionAdjustingLexer.LABEL:
|
||||
self.handleAcceptPositionForIdentifier()
|
||||
return super(type(self),self).emit()
|
||||
|
||||
def handleAcceptPositionForIdentifier(self):
|
||||
tokenText = self.text
|
||||
identifierLength = 0
|
||||
while identifierLength \< len(tokenText) and self.isIdentifierChar(tokenText[identifierLength]):
|
||||
identifierLength += 1
|
||||
|
||||
if self._input.index > self._tokenStartCharIndex + identifierLength:
|
||||
offset = identifierLength - 1
|
||||
self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset,
|
||||
self._tokenStartLine, self._tokenStartColumn + offset)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def handleAcceptPositionForKeyword(self, keyword):
|
||||
if self._input.index > self._tokenStartCharIndex + len(keyword):
|
||||
offset = len(keyword) - 1
|
||||
self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset,
|
||||
self._tokenStartLine, self._tokenStartColumn + offset)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def isIdentifierChar(c):
|
||||
return c.isalnum() or c == '_'
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .<X>Listener import <X>Listener
|
||||
else:
|
||||
from <X>Listener import <X>Listener
|
||||
|
||||
class LeafListener(TListener):
|
||||
def visitTerminal(self, node):
|
||||
print(node.symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
walker = ParseTreeWalker()
|
||||
walker.walk(TParser.LeafListener(), <s>)
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
@parser::members {
|
||||
class MyRuleNode(ParserRuleContext):
|
||||
def __init__(self, parent = None, invokingStateNumber = None ):
|
||||
super(<X>Parser.MyRuleNode, self).__init__(parent, invokingStateNumber)
|
||||
self.altNum = 0;
|
||||
def getAltNumber(self):
|
||||
return self.altNum
|
||||
def setAltNumber(self, altNum):
|
||||
self.altNum = altNum
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .<X>Listener import <X>Listener
|
||||
else:
|
||||
from <X>Listener import <X>Listener
|
||||
|
||||
class LeafListener(TListener):
|
||||
def exitA(self, ctx):
|
||||
if ctx.getChildCount()==2:
|
||||
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()))
|
||||
else:
|
||||
print(str(ctx.ID().symbol))
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .<X>Listener import <X>Listener
|
||||
else:
|
||||
from <X>Listener import <X>Listener
|
||||
|
||||
class LeafListener(TListener):
|
||||
def exitA(self, ctx):
|
||||
if ctx.getChildCount()==2:
|
||||
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text)
|
||||
else:
|
||||
print(ctx.b(0).start.text)
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .<X>Listener import <X>Listener
|
||||
else:
|
||||
from <X>Listener import <X>Listener
|
||||
|
||||
class LeafListener(TListener):
|
||||
def exitE(self, ctx):
|
||||
if ctx.getChildCount()==3:
|
||||
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text)
|
||||
else:
|
||||
print(ctx.INT().symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .<X>Listener import <X>Listener
|
||||
else:
|
||||
from <X>Listener import <X>Listener
|
||||
|
||||
class LeafListener(TListener):
|
||||
def exitCall(self, ctx):
|
||||
print(ctx.e().start.text + ' ' + str(ctx.eList()))
|
||||
def exitInt(self, ctx):
|
||||
print(ctx.INT().symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
def foo():
|
||||
s = SContext()
|
||||
a = s.a()
|
||||
b = s.b()
|
||||
>>
|
||||
|
||||
Declare_foo() ::= <<def foo(self):
|
||||
print('foo')
|
||||
>>
|
||||
|
||||
Invoke_foo() ::= "self.foo()"
|
||||
|
||||
Declare_pred() ::= <<def pred(self, v):
|
||||
print('eval=' + str(v).lower())
|
||||
return v
|
||||
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<self.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,394 @@
|
|||
IgnoredTests ::= [
|
||||
default: false
|
||||
]
|
||||
|
||||
TestFile(file) ::= <<
|
||||
/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
package org.antlr.v4.test.runtime.python3;
|
||||
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
<if(file.Options.("ImportErrorQueue"))>
|
||||
import org.antlr.v4.test.runtime.java.ErrorQueue;
|
||||
<endif>
|
||||
<if(file.Options.("ImportGrammar"))>
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
<endif>
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class Test<file.name> extends BasePython3Test {
|
||||
|
||||
<file.tests:{test | <test>}; separator="\n", wrap, anchor>
|
||||
|
||||
}
|
||||
>>
|
||||
|
||||
LexerTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<if(test.AfterGrammar)>
|
||||
<test.AfterGrammar>
|
||||
<endif>
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execLexer("<grammar>.g4", grammar, "<grammar><if(test.Options.("CombinedGrammar"))>Lexer<endif>", input, <writeBoolean(test.Options.("ShowDFA"))>);
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeLexerTestMethod(test) ::= <<
|
||||
<LexerTestMethod(test)>
|
||||
>>
|
||||
|
||||
ParserTestMethod(test) ::= <<
|
||||
/* This file and method are generated by TestGenerator, any edits will be overwritten by the next generation. */
|
||||
<testAnnotations(test)>
|
||||
public void test<test.name>() throws Exception {
|
||||
mkdir(tmpdir);
|
||||
|
||||
<test.SlaveGrammars:{grammar |
|
||||
String slave_<grammar> =<writeStringLiteral(test.SlaveGrammars.(grammar))>;
|
||||
<if(test.Options.("SlaveIsLexer"))>
|
||||
rawGenerateAndBuildRecognizer("<grammar>.g4", slave_<grammar>, null, "<grammar>");
|
||||
<else>
|
||||
writeFile(tmpdir, "<grammar>.g4", slave_<grammar>);
|
||||
<endif>
|
||||
}; separator="\n">
|
||||
<test.Grammar:{grammar |
|
||||
<buildStringLiteral(test.Grammar.(grammar), "grammar")>
|
||||
|
||||
<test.AfterGrammar>
|
||||
|
||||
String input =<writeStringLiteral(test.Input)>;
|
||||
String found = execParser("<grammar>.g4", grammar, "<grammar><if(!test.slaveIsLexer)>Parser<endif>", "<if(test.slaveIsLexer)><first(test.slaveGrammars).grammarName><else><grammar>Lexer<endif>", "<grammar>Listener", "<grammar>Visitor", "<test.Rule>", input, <writeBoolean(test.Options.("Debug"))>);
|
||||
|
||||
assertEquals(<writeStringLiteral(test.Output)>, found);
|
||||
<if(!isEmpty.(test.Errors))>
|
||||
assertEquals(<writeStringLiteral(test.Errors)>, this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
CompositeParserTestMethod(test) ::= <<
|
||||
<ParserTestMethod(test)>
|
||||
>>
|
||||
|
||||
AbstractParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
String test<test.name>(String input) throws Exception {
|
||||
String grammar = <test.grammar.lines:{ line | "<line>};separator="\\n\" +\n", wrap, anchor>";
|
||||
return execParser("<test.grammar.grammarName>.g4", grammar, "<test.grammar.grammarName>Parser", "<test.grammar.grammarName>Lexer", "<test.startRule>", input, <test.debug>);
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
ConcreteParserTestMethod(test) ::= <<
|
||||
/* this file and method are generated, any edit will be overwritten by the next generation */
|
||||
@Test
|
||||
public void test<test.name>() throws Exception {
|
||||
String found = test<test.baseName>("<test.input>");
|
||||
assertEquals("<test.expectedOutput>", found);
|
||||
<if(test.expectedErrors)>
|
||||
assertEquals("<test.expectedErrors>", this.stderrDuringParse);
|
||||
<else>
|
||||
assertNull(this.stderrDuringParse);
|
||||
<endif>
|
||||
}
|
||||
|
||||
>>
|
||||
|
||||
testAnnotations(test) ::= <%
|
||||
@Test
|
||||
<if(test.Options.("Ignore"))>
|
||||
<\n>@Ignore(<writeStringLiteral(test.Options.("Ignore"))>)
|
||||
<elseif(IgnoredTests.(({<file.name>.<test.name>})))>
|
||||
<\n>@Ignore(<writeStringLiteral(IgnoredTests.(({<file.name>.<test.name>})))>)
|
||||
<endif>
|
||||
%>
|
||||
|
||||
buildStringLiteral(text, variable) ::= <<
|
||||
StringBuilder <variable>Builder = new StringBuilder(<strlen.(text)>);
|
||||
<lines.(text):{line|<variable>Builder.append("<escape.(line)>");}; separator="\n">
|
||||
String <variable> = <variable>Builder.toString();
|
||||
>>
|
||||
|
||||
writeStringLiteral(text) ::= <%
|
||||
<if(isEmpty.(text))>
|
||||
""
|
||||
<else>
|
||||
<writeLines(lines.(text))>
|
||||
<endif>
|
||||
%>
|
||||
|
||||
writeLines(textLines) ::= <%
|
||||
<if(rest(textLines))>
|
||||
<textLines:{line|
|
||||
<\n> "<escape.(line)>}; separator="\" +">"
|
||||
<else>
|
||||
"<escape.(first(textLines))>"
|
||||
<endif>
|
||||
%>
|
||||
|
||||
string(text) ::= <<
|
||||
"<escape.(text)>"
|
||||
>>
|
||||
|
||||
writeBoolean(o) ::= "<if(o && !isEmpty.(o))>true<else>false<endif>"
|
||||
|
||||
writeln(s) ::= <<print(<s>)>>
|
||||
|
||||
write(s) ::= <<print(<s>,end='')>>
|
||||
|
||||
False() ::= "False"
|
||||
|
||||
True() ::= "True"
|
||||
|
||||
Not(v) ::= "not <v>"
|
||||
|
||||
Assert(s) ::= ""
|
||||
|
||||
Cast(t,v) ::= "<v>"
|
||||
|
||||
Append(a,b) ::= "<a> + str(<b>)"
|
||||
|
||||
Concat(a,b) ::= "<a><b>"
|
||||
|
||||
DeclareLocal(s,v) ::= "<s> = <v>"
|
||||
|
||||
AssertIsList(v) ::= "assert isinstance(v, (list, tuple))"
|
||||
|
||||
AssignLocal(s,v) ::= "<s> = <v>"
|
||||
|
||||
InitIntMember(n,v) ::= <%<n> = <v>%>
|
||||
|
||||
InitBooleanMember(n,v) ::= <%<n> = <v>%>
|
||||
|
||||
GetMember(n) ::= <%self.<n>%>
|
||||
|
||||
SetMember(n,v) ::= <%self.<n> = <v>%>
|
||||
|
||||
AddMember(n,v) ::= <%self.<n> += <v>%>
|
||||
|
||||
PlusMember(v,n) ::= <%<v> + str(self.<n>)%>
|
||||
|
||||
MemberEquals(n,v) ::= <%self.<n> == <v>%>
|
||||
|
||||
ModMemberEquals(n,m,v) ::= <%self.<n> % <m> == <v>%>
|
||||
|
||||
ModMemberNotEquals(n,m,v) ::= <%self.<n> % <m> != <v>%>
|
||||
|
||||
DumpDFA() ::= "self.dumpDFA()"
|
||||
|
||||
Pass() ::= "pass"
|
||||
|
||||
StringList() ::= ""
|
||||
|
||||
BuildParseTrees() ::= "self._buildParseTrees = True"
|
||||
|
||||
BailErrorStrategy() ::= <%self._errHandler = BailErrorStrategy()%>
|
||||
|
||||
ToStringTree(s) ::= <%<s>.toStringTree(recog=self)%>
|
||||
|
||||
Column() ::= "self.column"
|
||||
|
||||
Text() ::= "self.text"
|
||||
|
||||
ValEquals(a,b) ::= <%<a>==<b>%>
|
||||
|
||||
TextEquals(a) ::= <%self.text=="<a>"%>
|
||||
|
||||
PlusText(a) ::= <%"<a>" + self.text%>
|
||||
|
||||
InputText() ::= "self._input.getText()"
|
||||
|
||||
LTEquals(i, v) ::= <%self._input.LT(<i>).text==<v>%>
|
||||
|
||||
LANotEquals(i, v) ::= <%self._input.LA(<i>)!=<v>%>
|
||||
|
||||
TokenStartColumnEquals(i) ::= <%self._tokenStartColumn==<i>%>
|
||||
|
||||
ImportListener(X) ::= <<class MockListener:
|
||||
pass
|
||||
>>
|
||||
|
||||
GetExpectedTokenNames() ::= "self.getExpectedTokens().toString(self.literalNames, self.symbolicNames)"
|
||||
|
||||
RuleInvocationStack() ::= "str_list(self.getRuleInvocationStack())"
|
||||
|
||||
LL_EXACT_AMBIG_DETECTION() ::= <<self._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION>>
|
||||
|
||||
ParserPropertyMember() ::= <<
|
||||
@members {
|
||||
def Property(self):
|
||||
return True
|
||||
|
||||
}
|
||||
>>
|
||||
|
||||
PositionAdjustingLexer() ::= <<
|
||||
|
||||
def resetAcceptPosition(self, index, line, column):
|
||||
self._input.seek(index)
|
||||
self.line = line
|
||||
self.column = column
|
||||
self._interp.consume(self._input)
|
||||
|
||||
def nextToken(self):
|
||||
if self._interp.__dict__.get("resetAcceptPosition", None) is None:
|
||||
self._interp.__dict__["resetAcceptPosition"] = self.resetAcceptPosition
|
||||
return super(type(self),self).nextToken()
|
||||
|
||||
def emit(self):
|
||||
if self._type==PositionAdjustingLexer.TOKENS:
|
||||
self.handleAcceptPositionForKeyword("tokens")
|
||||
elif self._type==PositionAdjustingLexer.LABEL:
|
||||
self.handleAcceptPositionForIdentifier()
|
||||
return super(type(self),self).emit()
|
||||
|
||||
def handleAcceptPositionForIdentifier(self):
|
||||
tokenText = self.text
|
||||
identifierLength = 0
|
||||
while identifierLength \< len(tokenText) and self.isIdentifierChar(tokenText[identifierLength]):
|
||||
identifierLength += 1
|
||||
|
||||
if self._input.index > self._tokenStartCharIndex + identifierLength:
|
||||
offset = identifierLength - 1
|
||||
self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset,
|
||||
self._tokenStartLine, self._tokenStartColumn + offset)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def handleAcceptPositionForKeyword(self, keyword):
|
||||
if self._input.index > self._tokenStartCharIndex + len(keyword):
|
||||
offset = len(keyword) - 1
|
||||
self._interp.resetAcceptPosition(self._tokenStartCharIndex + offset,
|
||||
self._tokenStartLine, self._tokenStartColumn + offset)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def isIdentifierChar(c):
|
||||
return c.isalnum() or c == '_'
|
||||
|
||||
>>
|
||||
|
||||
BasicListener(X) ::= <<
|
||||
class LeafListener(MockListener):
|
||||
def visitTerminal(self, node):
|
||||
print(node.symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
WalkListener(s) ::= <<
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .TListener import TListener
|
||||
else:
|
||||
from TListener import TListener
|
||||
TParser.LeafListener.__bases__ = (TListener,)
|
||||
walker = ParseTreeWalker()
|
||||
walker.walk(TParser.LeafListener(), <s>)
|
||||
>>
|
||||
|
||||
TreeNodeWithAltNumField(X) ::= <<
|
||||
@parser::members {
|
||||
class MyRuleNode(ParserRuleContext):
|
||||
def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ):
|
||||
super(<X>Parser.MyRuleNode, self).__init__(parent, invokingStateNumber)
|
||||
self.altNum = 0;
|
||||
def getAltNumber(self):
|
||||
return self.altNum
|
||||
def setAltNumber(self, altNum):
|
||||
self.altNum = altNum
|
||||
}
|
||||
>>
|
||||
|
||||
TokenGetterListener(X) ::= <<
|
||||
class LeafListener(MockListener):
|
||||
def exitA(self, ctx):
|
||||
if ctx.getChildCount()==2:
|
||||
print(ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + str_list(ctx.INT()))
|
||||
else:
|
||||
print(str(ctx.ID().symbol))
|
||||
|
||||
>>
|
||||
|
||||
RuleGetterListener(X) ::= <<
|
||||
class LeafListener(MockListener):
|
||||
def exitA(self, ctx):
|
||||
if ctx.getChildCount()==2:
|
||||
print(ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text)
|
||||
else:
|
||||
print(ctx.b(0).start.text)
|
||||
|
||||
>>
|
||||
|
||||
|
||||
LRListener(X) ::= <<
|
||||
class LeafListener(MockListener):
|
||||
def exitE(self, ctx):
|
||||
if ctx.getChildCount()==3:
|
||||
print(ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text)
|
||||
else:
|
||||
print(ctx.INT().symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
LRWithLabelsListener(X) ::= <<
|
||||
class LeafListener(MockListener):
|
||||
def exitCall(self, ctx):
|
||||
print(ctx.e().start.text + ' ' + str(ctx.eList()))
|
||||
def exitInt(self, ctx):
|
||||
print(ctx.INT().symbol.text)
|
||||
|
||||
>>
|
||||
|
||||
DeclareContextListGettersFunction() ::= <<
|
||||
def foo():
|
||||
s = SContext()
|
||||
a = s.a()
|
||||
b = s.b()
|
||||
>>
|
||||
|
||||
Declare_foo() ::= <<def foo(self):
|
||||
print('foo')
|
||||
>>
|
||||
|
||||
Invoke_foo() ::= "self.foo()"
|
||||
|
||||
Declare_pred() ::= <<def pred(self, v):
|
||||
print('eval=' + str(v).lower())
|
||||
return v
|
||||
|
||||
>>
|
||||
|
||||
Invoke_pred(v) ::= <<self.pred(<v>)>>
|
||||
|
||||
isEmpty ::= [
|
||||
"": true,
|
||||
default: false
|
||||
]
|
|
@ -0,0 +1,4 @@
|
|||
TestTemplates ::= [
|
||||
"LexerDelegatorInvokesDelegateRule": [],
|
||||
"LexerDelegatorRuleOverridesDelegate": []
|
||||
]
|
|
@ -0,0 +1,34 @@
|
|||
TestType() ::= "CompositeLexer"
|
||||
|
||||
Grammar ::= [
|
||||
"M": {<masterGrammar("M", "S")>}
|
||||
]
|
||||
|
||||
SlaveGrammars ::= [
|
||||
"S": {<slaveGrammar("S")>}
|
||||
]
|
||||
|
||||
Input() ::= "abc"
|
||||
|
||||
Output() ::= <<
|
||||
S.A
|
||||
[@0,0:0='a',\<3>,1:0]
|
||||
[@1,1:1='b',\<1>,1:1]
|
||||
[@2,2:2='c',\<4>,1:2]
|
||||
[@3,3:2='\<EOF>',\<-1>,1:3]<\n>
|
||||
>>
|
||||
|
||||
Errors() ::= ""
|
||||
|
||||
masterGrammar(grammarName, slaveGrammarName) ::= <<
|
||||
lexer grammar <grammarName>;
|
||||
import <slaveGrammarName>;
|
||||
B : 'b';
|
||||
WS : (' '|'\n') -> skip ;
|
||||
>>
|
||||
|
||||
slaveGrammar(grammarName) ::= <<
|
||||
lexer grammar <grammarName>;
|
||||
A : 'a' {<writeln("\"S.A\"")>};
|
||||
C : 'c' ;
|
||||
>>
|