forked from jasder/antlr
merged master branch into add-maven-install but it really is just accepting all of Erics changes
This commit is contained in:
commit
1a62ba75e5
|
@ -1,5 +1,5 @@
|
|||
[The "BSD license"]
|
||||
Copyright (c) 2013 Terence Parr, Sam Harwell
|
||||
Copyright (c) 2014 Terence Parr, Sam Harwell
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -26,10 +26,12 @@ Programmers run into parsing problems all the time. Whether it’s a data format
|
|||
root directory name is the all-lowercase name of the language parsed
|
||||
by the grammar. For example, java, cpp, csharp, c, etc...
|
||||
|
||||
## Authors
|
||||
## Authors and major contributors
|
||||
|
||||
[Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
|
||||
[Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
|
||||
ANTLR project lead and supreme dictator for life
|
||||
[University of San Francisco](http://www.usfca.edu/)
|
||||
|
||||
[Sam Harwell](http://tunnelvisionlabs.com/)
|
||||
|
||||
Eric Vergnaud (Python2, Python3 targets)
|
||||
|
|
|
@ -150,7 +150,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.2</version>
|
||||
<version>3.3</version>
|
||||
<configuration>
|
||||
<!-- see http://jira.codehaus.org/browse/MNG-5346 -->
|
||||
<skipErrorNoDescriptorsFound>true</skipErrorNoDescriptorsFound>
|
||||
|
@ -195,7 +195,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-plugin-plugin</artifactId>
|
||||
<version>3.2</version>
|
||||
<version>3.3</version>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
|
|
|
@ -63,9 +63,9 @@ public class Antlr4ErrorLog implements ANTLRToolListener {
|
|||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message The message to send to Maven
|
||||
*/
|
||||
@Override
|
||||
|
@ -78,9 +78,9 @@ public class Antlr4ErrorLog implements ANTLRToolListener {
|
|||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message The message to send to Maven.
|
||||
*/
|
||||
@Override
|
||||
|
@ -101,9 +101,9 @@ public class Antlr4ErrorLog implements ANTLRToolListener {
|
|||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* <p>
|
||||
* This implementation passes the message to the Maven log.
|
||||
*
|
||||
* </p>
|
||||
* @param message
|
||||
*/
|
||||
@Override
|
||||
|
|
|
@ -146,10 +146,11 @@ public class Antlr4Mojo extends AbstractMojo {
|
|||
* the generate phase of the plugin. Note that the plugin is smart enough to
|
||||
* realize that imported grammars should be included but not acted upon
|
||||
* directly by the ANTLR Tool.
|
||||
* <p/>
|
||||
* <p>
|
||||
* A set of Ant-like inclusion patterns used to select files from the source
|
||||
* directory for processing. By default, the pattern
|
||||
* <code>**/*.g4</code> is used to select grammar files.
|
||||
* </p>
|
||||
*/
|
||||
@Parameter
|
||||
protected Set<String> includes = new HashSet<String>();
|
||||
|
|
8
bild.py
8
bild.py
|
@ -3,12 +3,15 @@ import os
|
|||
import string
|
||||
|
||||
"""
|
||||
This script use my experimental build tool http://www.bildtool.org
|
||||
This script uses my experimental build tool http://www.bildtool.org
|
||||
|
||||
In order to build the complete ANTLR4 product with Java, Python 2, and Python 3
|
||||
targets, do the following from a UNIX command line. Windows build using this script
|
||||
is not yet supported. Please use the mvn build or ant build.
|
||||
|
||||
!!!You must set path values in test_properties dictionary below to ensure Python
|
||||
tests run.!!!
|
||||
|
||||
mkdir -p /usr/local/antlr # somewhere appropriate where you want to install stuff
|
||||
cd /usr/local/antlr
|
||||
git clone git@github.com:parrt/antlr4.git
|
||||
|
@ -59,7 +62,6 @@ def parsers():
|
|||
args=["-lib", uniformpath("gen3/org/antlr/v4/parse")])
|
||||
antlr4("runtime/Java/src/org/antlr/v4/runtime/tree/xpath", "gen4", package="org.antlr.v4.runtime.tree.xpath")
|
||||
|
||||
|
||||
def compile():
|
||||
require(parsers)
|
||||
cp = uniformpath("out") + os.pathsep + \
|
||||
|
@ -125,7 +127,6 @@ def mkjar_runtime():
|
|||
jar(jarfile, srcdir="out/runtime", manifest=manifest)
|
||||
print "Generated " + jarfile
|
||||
|
||||
|
||||
def mkjar():
|
||||
mkjar_complete()
|
||||
# put it in JARCARCHE too so bild can find it during antlr4()
|
||||
|
@ -155,7 +156,6 @@ def tests():
|
|||
javac(TARGETS[t] + "/tool/test", "out/test/" + t, version="1.6", cp=cp, args=args)
|
||||
junit("out/test/" + t, cp=cp, verbose=False, args=properties)
|
||||
|
||||
|
||||
def all():
|
||||
clean(True)
|
||||
mkjar()
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
</target>
|
||||
|
||||
<target name="antlr4-init" depends="basic-init">
|
||||
<property name="antlr4.version" value="4.2.2"/>
|
||||
<property name="antlr4.version" value="4.3"/>
|
||||
<property name="antlr4.jar.name" value="antlr-${antlr4.version}-complete.jar"/>
|
||||
<property name="antlr4.jar" value="${lib.dir}/${antlr4.jar.name}"/>
|
||||
<mkdir dir="${lib.dir}"/>
|
||||
|
@ -32,7 +32,7 @@
|
|||
</target>
|
||||
|
||||
<target name="build-init" depends="basic-init">
|
||||
<property name="version" value="4.4-SNAPSHOT"/>
|
||||
<property name="version" value="4.4"/>
|
||||
<property name="build.sysclasspath" value="ignore"/>
|
||||
<property name="install.root.dir" value="${dist.dir}/antlr-${version}" />
|
||||
<property name="jar.file" value="${dist.dir}/antlr-${version}-complete.jar" />
|
||||
|
|
|
@ -58,4 +58,8 @@ YYYY/MM/DD, github id, Full name, email
|
|||
2014/03/18, aphyr, Kyle Kingsbury, aphyr@aphyr.com
|
||||
2014/06/07, ericvergnaud, Eric Vergnaud, eric.vergnaud@wanadoo.fr
|
||||
2014/07/04, jimidle, Jim Idle, jimi@Idle.ws
|
||||
|
||||
2014/09/04. jeduden, Jan-Eric Duden, jeduden@gmail.com
|
||||
2014/09/27, petrbel, Petr Bělohlávek, antlr@petrbel.cz
|
||||
2014/10/18, sergiusignacius, Sérgio Silva, serge.a.silva@gmail.com
|
||||
2014/10/26, bdkearns, Brian Kearns, bdkearns@gmail.com
|
||||
2014/10/27, michaelpj, Michael Peyton Jones, michaelpj@gmail.com
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.runtime.misc.Utils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -40,16 +42,16 @@ import java.io.IOException;
|
|||
public class ANTLRFileStream extends ANTLRInputStream {
|
||||
protected String fileName;
|
||||
|
||||
public ANTLRFileStream(String fileName) throws IOException {
|
||||
public ANTLRFileStream(@NotNull String fileName) throws IOException {
|
||||
this(fileName, null);
|
||||
}
|
||||
|
||||
public ANTLRFileStream(String fileName, String encoding) throws IOException {
|
||||
public ANTLRFileStream(@NotNull String fileName, String encoding) throws IOException {
|
||||
this.fileName = fileName;
|
||||
load(fileName, encoding);
|
||||
}
|
||||
|
||||
public void load(String fileName, String encoding)
|
||||
public void load(@NotNull String fileName, @Nullable String encoding)
|
||||
throws IOException
|
||||
{
|
||||
data = Utils.readFile(fileName, encoding);
|
||||
|
|
|
@ -238,6 +238,10 @@ public class ANTLRInputStream implements CharStream {
|
|||
|
||||
@Override
|
||||
public String getSourceName() {
|
||||
if (name == null || name.isEmpty()) {
|
||||
return UNKNOWN_SOURCE_NAME;
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
@NotNull InputMismatchException e)
|
||||
{
|
||||
String msg = "mismatched input "+getTokenErrorDisplay(e.getOffendingToken())+
|
||||
" expecting "+e.getExpectedTokens().toString(recognizer.getTokenNames());
|
||||
" expecting "+e.getExpectedTokens().toString(recognizer.getVocabulary());
|
||||
recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e);
|
||||
}
|
||||
|
||||
|
@ -373,7 +373,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
String tokenName = getTokenErrorDisplay(t);
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
String msg = "extraneous input "+tokenName+" expecting "+
|
||||
expecting.toString(recognizer.getTokenNames());
|
||||
expecting.toString(recognizer.getVocabulary());
|
||||
recognizer.notifyErrorListeners(t, msg, null);
|
||||
}
|
||||
|
||||
|
@ -403,7 +403,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
|
||||
Token t = recognizer.getCurrentToken();
|
||||
IntervalSet expecting = getExpectedTokens(recognizer);
|
||||
String msg = "missing "+expecting.toString(recognizer.getTokenNames())+
|
||||
String msg = "missing "+expecting.toString(recognizer.getVocabulary())+
|
||||
" at "+getTokenErrorDisplay(t);
|
||||
|
||||
recognizer.notifyErrorListeners(t, msg, null);
|
||||
|
@ -581,7 +581,7 @@ public class DefaultErrorStrategy implements ANTLRErrorStrategy {
|
|||
int expectedTokenType = expecting.getMinElement(); // get any element
|
||||
String tokenText;
|
||||
if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";
|
||||
else tokenText = "<missing "+recognizer.getTokenNames()[expectedTokenType]+">";
|
||||
else tokenText = "<missing "+recognizer.getVocabulary().getDisplayName(expectedTokenType)+">";
|
||||
Token current = currentSymbol;
|
||||
Token lookback = recognizer.getInputStream().LT(-1);
|
||||
if ( current.getType() == Token.EOF && lookback!=null ) {
|
||||
|
|
|
@ -274,14 +274,9 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
|
|||
|
||||
public Token emitEOF() {
|
||||
int cpos = getCharPositionInLine();
|
||||
// The character position for EOF is one beyond the position of
|
||||
// the previous token's last character
|
||||
if ( _token !=null ) {
|
||||
int n = _token.getStopIndex() - _token.getStartIndex() + 1;
|
||||
cpos = _token.getCharPositionInLine()+n;
|
||||
}
|
||||
int line = getLine();
|
||||
Token eof = _factory.create(_tokenFactorySourcePair, Token.EOF, null, Token.DEFAULT_CHANNEL, _input.index(), _input.index()-1,
|
||||
getLine(), cpos);
|
||||
line, cpos);
|
||||
emit(eof);
|
||||
return eof;
|
||||
}
|
||||
|
@ -358,6 +353,7 @@ public abstract class Lexer extends Recognizer<Integer, LexerATNSimulator>
|
|||
* that overrides this to point to their String[] tokenNames.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public String[] getTokenNames() {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.antlr.v4.runtime.atn.ATNType;
|
|||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.atn.PredictionContextCache;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
|
@ -42,15 +43,24 @@ public class LexerInterpreter extends Lexer {
|
|||
protected final String grammarFileName;
|
||||
protected final ATN atn;
|
||||
|
||||
@Deprecated
|
||||
protected final String[] tokenNames;
|
||||
protected final String[] ruleNames;
|
||||
protected final String[] modeNames;
|
||||
|
||||
@NotNull
|
||||
private final Vocabulary vocabulary;
|
||||
|
||||
protected final DFA[] _decisionToDFA;
|
||||
protected final PredictionContextCache _sharedContextCache =
|
||||
new PredictionContextCache();
|
||||
|
||||
@Deprecated
|
||||
public LexerInterpreter(String grammarFileName, Collection<String> tokenNames, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
||||
this(grammarFileName, VocabularyImpl.fromTokenNames(tokenNames.toArray(new String[tokenNames.size()])), ruleNames, modeNames, atn, input);
|
||||
}
|
||||
|
||||
public LexerInterpreter(String grammarFileName, @NotNull Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
|
||||
super(input);
|
||||
|
||||
if (atn.grammarType != ATNType.LEXER) {
|
||||
|
@ -59,9 +69,14 @@ public class LexerInterpreter extends Lexer {
|
|||
|
||||
this.grammarFileName = grammarFileName;
|
||||
this.atn = atn;
|
||||
this.tokenNames = tokenNames.toArray(new String[tokenNames.size()]);
|
||||
this.tokenNames = new String[atn.maxTokenType];
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
tokenNames[i] = vocabulary.getDisplayName(i);
|
||||
}
|
||||
|
||||
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
|
||||
this.modeNames = modeNames.toArray(new String[modeNames.size()]);
|
||||
this.vocabulary = vocabulary;
|
||||
|
||||
this._decisionToDFA = new DFA[atn.getNumberOfDecisions()];
|
||||
for (int i = 0; i < _decisionToDFA.length; i++) {
|
||||
|
@ -81,6 +96,7 @@ public class LexerInterpreter extends Lexer {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public String[] getTokenNames() {
|
||||
return tokenNames;
|
||||
}
|
||||
|
@ -94,4 +110,13 @@ public class LexerInterpreter extends Lexer {
|
|||
public String[] getModeNames() {
|
||||
return modeNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Vocabulary getVocabulary() {
|
||||
if (vocabulary != null) {
|
||||
return vocabulary;
|
||||
}
|
||||
|
||||
return super.getVocabulary();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -857,7 +857,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
|
|||
List<String> s = new ArrayList<String>();
|
||||
for (int d = 0; d < _interp.decisionToDFA.length; d++) {
|
||||
DFA dfa = _interp.decisionToDFA[d];
|
||||
s.add( dfa.toString(getTokenNames()) );
|
||||
s.add( dfa.toString(getVocabulary()) );
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
@ -872,7 +872,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
|
|||
if ( !dfa.states.isEmpty() ) {
|
||||
if ( seenOne ) System.out.println();
|
||||
System.out.println("Decision " + dfa.decision + ":");
|
||||
System.out.print(dfa.toString(getTokenNames()));
|
||||
System.out.print(dfa.toString(getVocabulary()));
|
||||
seenOne = true;
|
||||
}
|
||||
}
|
||||
|
@ -921,4 +921,14 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
|
|||
addParseListener(_tracer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets whether a {@link TraceListener} is registered as a parse listener
|
||||
* for the parser.
|
||||
*
|
||||
* @see #setTrace(boolean)
|
||||
*/
|
||||
public boolean isTrace() {
|
||||
return _tracer != null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.antlr.v4.runtime.atn.RuleTransition;
|
|||
import org.antlr.v4.runtime.atn.StarLoopEntryState;
|
||||
import org.antlr.v4.runtime.atn.Transition;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Pair;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
|
@ -74,19 +75,36 @@ public class ParserInterpreter extends Parser {
|
|||
protected final PredictionContextCache sharedContextCache =
|
||||
new PredictionContextCache();
|
||||
|
||||
@Deprecated
|
||||
protected final String[] tokenNames;
|
||||
protected final String[] ruleNames;
|
||||
@NotNull
|
||||
private final Vocabulary vocabulary;
|
||||
|
||||
protected final Deque<Pair<ParserRuleContext, Integer>> _parentContextStack = new ArrayDeque<Pair<ParserRuleContext, Integer>>();
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #ParserInterpreter(String, Vocabulary, Collection, ATN, TokenStream)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public ParserInterpreter(String grammarFileName, Collection<String> tokenNames,
|
||||
Collection<String> ruleNames, ATN atn, TokenStream input) {
|
||||
this(grammarFileName, VocabularyImpl.fromTokenNames(tokenNames.toArray(new String[tokenNames.size()])), ruleNames, atn, input);
|
||||
}
|
||||
|
||||
public ParserInterpreter(String grammarFileName, @NotNull Vocabulary vocabulary,
|
||||
Collection<String> ruleNames, ATN atn, TokenStream input)
|
||||
{
|
||||
super(input);
|
||||
this.grammarFileName = grammarFileName;
|
||||
this.atn = atn;
|
||||
this.tokenNames = tokenNames.toArray(new String[tokenNames.size()]);
|
||||
this.tokenNames = new String[atn.maxTokenType];
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
tokenNames[i] = vocabulary.getDisplayName(i);
|
||||
}
|
||||
|
||||
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
|
||||
this.vocabulary = vocabulary;
|
||||
this.decisionToDFA = new DFA[atn.getNumberOfDecisions()];
|
||||
for (int i = 0; i < decisionToDFA.length; i++) {
|
||||
decisionToDFA[i] = new DFA(atn.getDecisionState(i), i);
|
||||
|
@ -116,10 +134,16 @@ public class ParserInterpreter extends Parser {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public String[] getTokenNames() {
|
||||
return tokenNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Vocabulary getVocabulary() {
|
||||
return vocabulary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getRuleNames() {
|
||||
return ruleNames;
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.antlr.v4.runtime.misc.Nullable;
|
|||
import org.antlr.v4.runtime.misc.Utils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.WeakHashMap;
|
||||
|
@ -46,8 +47,8 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
|||
public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
|
||||
public static final int EOF=-1;
|
||||
|
||||
private static final Map<String[], Map<String, Integer>> tokenTypeMapCache =
|
||||
new WeakHashMap<String[], Map<String, Integer>>();
|
||||
private static final Map<Vocabulary, Map<String, Integer>> tokenTypeMapCache =
|
||||
new WeakHashMap<Vocabulary, Map<String, Integer>>();
|
||||
private static final Map<String[], Map<String, Integer>> ruleIndexMapCache =
|
||||
new WeakHashMap<String[], Map<String, Integer>>();
|
||||
|
||||
|
@ -64,11 +65,26 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
|
|||
/** Used to print out token names like ID during debugging and
|
||||
* error reporting. The generated parsers implement a method
|
||||
* that overrides this to point to their String[] tokenNames.
|
||||
*
|
||||
* @deprecated Use {@link #getVocabulary()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract String[] getTokenNames();
|
||||
|
||||
public abstract String[] getRuleNames();
|
||||
|
||||
/**
|
||||
* Get the vocabulary used by the recognizer.
|
||||
*
|
||||
* @return A {@link Vocabulary} instance providing information about the
|
||||
* vocabulary used by the grammar.
|
||||
*/
|
||||
@NotNull
|
||||
@SuppressWarnings("deprecation")
|
||||
public Vocabulary getVocabulary() {
|
||||
return VocabularyImpl.fromTokenNames(getTokenNames());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a map from token names to token types.
|
||||
*
|
||||
|
@ -76,18 +92,26 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
|
|||
*/
|
||||
@NotNull
|
||||
public Map<String, Integer> getTokenTypeMap() {
|
||||
String[] tokenNames = getTokenNames();
|
||||
if (tokenNames == null) {
|
||||
throw new UnsupportedOperationException("The current recognizer does not provide a list of token names.");
|
||||
}
|
||||
|
||||
Vocabulary vocabulary = getVocabulary();
|
||||
synchronized (tokenTypeMapCache) {
|
||||
Map<String, Integer> result = tokenTypeMapCache.get(tokenNames);
|
||||
Map<String, Integer> result = tokenTypeMapCache.get(vocabulary);
|
||||
if (result == null) {
|
||||
result = Utils.toMap(tokenNames);
|
||||
result = new HashMap<String, Integer>();
|
||||
for (int i = 0; i < getATN().maxTokenType; i++) {
|
||||
String literalName = vocabulary.getLiteralName(i);
|
||||
if (literalName != null) {
|
||||
result.put(literalName, i);
|
||||
}
|
||||
|
||||
String symbolicName = vocabulary.getSymbolicName(i);
|
||||
if (symbolicName != null) {
|
||||
result.put(symbolicName, i);
|
||||
}
|
||||
}
|
||||
|
||||
result.put("EOF", Token.EOF);
|
||||
result = Collections.unmodifiableMap(result);
|
||||
tokenTypeMapCache.put(tokenNames, result);
|
||||
tokenTypeMapCache.put(vocabulary, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -74,7 +74,7 @@ public class RuntimeMetaData {
|
|||
* libraries to include a literal reference to the version of the ANTLR 4
|
||||
* runtime library the code was compiled against.</p>
|
||||
*/
|
||||
public static final String VERSION = "4.4";
|
||||
public static final String VERSION = "4.4.1-dev";
|
||||
|
||||
/**
|
||||
* This class provides detailed information about a mismatch between the
|
||||
|
|
|
@ -57,6 +57,19 @@ public interface Token {
|
|||
*/
|
||||
public static final int HIDDEN_CHANNEL = 1;
|
||||
|
||||
/**
|
||||
* This is the minimum constant value which can be assigned to a
|
||||
* user-defined token channel.
|
||||
*
|
||||
* <p>
|
||||
* The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are
|
||||
* assigned to the predefined channels {@link #DEFAULT_CHANNEL} and
|
||||
* {@link #HIDDEN_CHANNEL}.</p>
|
||||
*
|
||||
* @see Token#getChannel()
|
||||
*/
|
||||
public static final int MIN_USER_CHANNEL_VALUE = 2;
|
||||
|
||||
/**
|
||||
* Get the text of the token.
|
||||
*/
|
||||
|
|
|
@ -91,6 +91,7 @@ public interface TokenSource {
|
|||
* non-null, non-empty string. If such a name is not known, this method
|
||||
* returns {@link IntStream#UNKNOWN_SOURCE_NAME}.
|
||||
*/
|
||||
@NotNull
|
||||
public String getSourceName();
|
||||
|
||||
/**
|
||||
|
|
|
@ -115,16 +115,16 @@ import java.util.Map;
|
|||
*/
|
||||
public class TokenStreamRewriter {
|
||||
public static final String DEFAULT_PROGRAM_NAME = "default";
|
||||
public static final int PROGRAM_INIT_SIZE = 100;
|
||||
public static final int PROGRAM_INIT_SIZE = 100;
|
||||
public static final int MIN_TOKEN_INDEX = 0;
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
public class RewriteOperation {
|
||||
/** What index into rewrites List are we? */
|
||||
protected int instructionIndex;
|
||||
/** Token buffer index. */
|
||||
protected int index;
|
||||
/** What index into rewrites List are we? */
|
||||
protected int instructionIndex;
|
||||
/** Token buffer index. */
|
||||
protected int index;
|
||||
protected Object text;
|
||||
|
||||
protected RewriteOperation(int index) {
|
||||
|
@ -148,7 +148,7 @@ public class TokenStreamRewriter {
|
|||
int $index = opName.indexOf('$');
|
||||
opName = opName.substring($index+1, opName.length());
|
||||
return "<"+opName+"@"+tokens.get(index)+
|
||||
":\""+text+"\">";
|
||||
":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,10 +187,10 @@ public class TokenStreamRewriter {
|
|||
public String toString() {
|
||||
if ( text==null ) {
|
||||
return "<DeleteOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+">";
|
||||
".."+tokens.get(lastIndex)+">";
|
||||
}
|
||||
return "<ReplaceOp@"+tokens.get(index)+
|
||||
".."+tokens.get(lastIndex)+":\""+text+"\">";
|
||||
".."+tokens.get(lastIndex)+":\""+text+"\">";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ public class TokenStreamRewriter {
|
|||
|
||||
/** Rollback the instruction stream for a program so that
|
||||
* the indicated instruction (via instructionIndex) is no
|
||||
* longer in the stream. UNTESTED!
|
||||
* longer in the stream. UNTESTED!
|
||||
*/
|
||||
public void rollback(String programName, int instructionIndex) {
|
||||
List<RewriteOperation> is = programs.get(programName);
|
||||
|
@ -274,8 +274,8 @@ public class TokenStreamRewriter {
|
|||
public void insertBefore(String programName, int index, Object text) {
|
||||
RewriteOperation op = new InsertBeforeOp(index,text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(int index, Object text) {
|
||||
|
@ -300,8 +300,8 @@ public class TokenStreamRewriter {
|
|||
}
|
||||
RewriteOperation op = new ReplaceOp(from, to, text);
|
||||
List<RewriteOperation> rewrites = getProgram(programName);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
op.instructionIndex = rewrites.size();
|
||||
rewrites.add(op);
|
||||
}
|
||||
|
||||
public void replace(String programName, Token from, Token to, @Nullable Object text) {
|
||||
|
@ -390,11 +390,11 @@ public class TokenStreamRewriter {
|
|||
int start = interval.a;
|
||||
int stop = interval.b;
|
||||
|
||||
// ensure start/end are in range
|
||||
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||
if ( start<0 ) start = 0;
|
||||
// ensure start/end are in range
|
||||
if ( stop>tokens.size()-1 ) stop = tokens.size()-1;
|
||||
if ( start<0 ) start = 0;
|
||||
|
||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||
if ( rewrites==null || rewrites.isEmpty() ) {
|
||||
return tokens.getText(interval); // no instructions to execute
|
||||
}
|
||||
StringBuilder buf = new StringBuilder();
|
||||
|
@ -402,9 +402,9 @@ public class TokenStreamRewriter {
|
|||
// First, optimize instruction stream
|
||||
Map<Integer, RewriteOperation> indexToOp = reduceToSingleOperationPerIndex(rewrites);
|
||||
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
int i = start;
|
||||
while ( i <= stop && i < tokens.size() ) {
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
int i = start;
|
||||
while ( i <= stop && i < tokens.size() ) {
|
||||
RewriteOperation op = indexToOp.get(i);
|
||||
indexToOp.remove(i); // remove so any left have index size-1
|
||||
Token t = tokens.get(i);
|
||||
|
@ -418,22 +418,22 @@ public class TokenStreamRewriter {
|
|||
}
|
||||
}
|
||||
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if ( stop==tokens.size()-1 ) {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if ( stop==tokens.size()-1 ) {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for (RewriteOperation op : indexToOp.values()) {
|
||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
if ( op.index >= tokens.size()-1 ) buf.append(op.text);
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** We need to combine operations and report invalid operations (like
|
||||
* overlapping replaces that are not completed nested). Inserts to
|
||||
* same index need to be combined etc... Here are the cases:
|
||||
* overlapping replaces that are not completed nested). Inserts to
|
||||
* same index need to be combined etc... Here are the cases:
|
||||
*
|
||||
* I.i.u I.j.v leave alone, nonoverlapping
|
||||
* I.i.u I.i.v combine: Iivu
|
||||
|
@ -456,25 +456,25 @@ public class TokenStreamRewriter {
|
|||
* I.i.u = insert u before op @ index i
|
||||
* R.x-y.u = replace x-y indexed tokens with u
|
||||
*
|
||||
* First we need to examine replaces. For any replace op:
|
||||
* First we need to examine replaces. For any replace op:
|
||||
*
|
||||
* 1. wipe out any insertions before op within that range.
|
||||
* 2. Drop any replace op before that is contained completely within
|
||||
* that range.
|
||||
* that range.
|
||||
* 3. Throw exception upon boundary overlap with any previous replace.
|
||||
*
|
||||
* Then we can deal with inserts:
|
||||
*
|
||||
* 1. for any inserts to same index, combine even if not adjacent.
|
||||
* 2. for any prior replace with same left boundary, combine this
|
||||
* insert with replace and delete this replace.
|
||||
* insert with replace and delete this replace.
|
||||
* 3. throw exception if index in same range as previous replace
|
||||
*
|
||||
* Don't actually delete; make op null in list. Easier to walk list.
|
||||
* Later we can throw as we add to index → op map.
|
||||
*
|
||||
* Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
* inserted stuff would be before the replace range. But, if you
|
||||
* inserted stuff would be before the replace range. But, if you
|
||||
* add tokens in front of a method body '{' and then delete the method
|
||||
* body, I think the stuff before the '{' you added should disappear too.
|
||||
*
|
||||
|
@ -499,16 +499,16 @@ public class TokenStreamRewriter {
|
|||
rop.text = iop.text.toString() + (rop.text!=null?rop.text.toString():"");
|
||||
}
|
||||
else if ( iop.index > rop.index && iop.index <= rop.lastIndex ) {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
// delete insert as it's a no-op.
|
||||
rewrites.set(iop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
|
||||
for (ReplaceOp prevRop : prevReplaces) {
|
||||
if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites.set(prevRop.instructionIndex, null);
|
||||
// delete replace as it's a no-op.
|
||||
rewrites.set(prevRop.instructionIndex, null);
|
||||
continue;
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
|
@ -526,8 +526,7 @@ public class TokenStreamRewriter {
|
|||
System.out.println("new rop "+rop);
|
||||
}
|
||||
else if ( !disjoint && !same ) {
|
||||
throw new IllegalArgumentException("replace op boundaries of "+rop+
|
||||
" overlap with previous "+prevRop);
|
||||
throw new IllegalArgumentException("replace op boundaries of "+rop+" overlap with previous "+prevRop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -545,8 +544,8 @@ public class TokenStreamRewriter {
|
|||
// convert to strings...we're in process of toString'ing
|
||||
// whole token buffer so no lazy eval issue with any templates
|
||||
iop.text = catOpText(iop.text,prevIop.text);
|
||||
// delete redundant prior insert
|
||||
rewrites.set(prevIop.instructionIndex, null);
|
||||
// delete redundant prior insert
|
||||
rewrites.set(prevIop.instructionIndex, null);
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
|
@ -554,12 +553,11 @@ public class TokenStreamRewriter {
|
|||
for (ReplaceOp rop : prevReplaces) {
|
||||
if ( iop.index == rop.index ) {
|
||||
rop.text = catOpText(iop.text,rop.text);
|
||||
rewrites.set(i, null); // delete current insert
|
||||
rewrites.set(i, null); // delete current insert
|
||||
continue;
|
||||
}
|
||||
if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
|
||||
throw new IllegalArgumentException("insert op "+iop+
|
||||
" within boundaries of previous "+rop);
|
||||
throw new IllegalArgumentException("insert op "+iop+" within boundaries of previous "+rop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -585,8 +583,8 @@ public class TokenStreamRewriter {
|
|||
return x+y;
|
||||
}
|
||||
|
||||
/** Get all operations before an index of a particular kind */
|
||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
||||
/** Get all operations before an index of a particular kind */
|
||||
protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
|
||||
List<T> ops = new ArrayList<T>();
|
||||
for (int i=0; i<before && i<rewrites.size(); i++) {
|
||||
RewriteOperation op = rewrites.get(i);
|
||||
|
@ -597,5 +595,4 @@ public class TokenStreamRewriter {
|
|||
}
|
||||
return ops;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -297,6 +297,10 @@ public class UnbufferedCharStream implements CharStream {
|
|||
|
||||
@Override
|
||||
public String getSourceName() {
|
||||
if (name == null || name.isEmpty()) {
|
||||
return UNKNOWN_SOURCE_NAME;
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* [The "BSD license"]
|
||||
* Copyright (c) 2014 Terence Parr
|
||||
* Copyright (c) 2014 Sam Harwell
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
/**
|
||||
* This interface provides information about the vocabulary used by a
|
||||
* recognizer.
|
||||
*
|
||||
* @see Recognizer#getVocabulary()
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public interface Vocabulary {
|
||||
|
||||
/**
|
||||
* Gets the string literal associated with a token type. The string returned
|
||||
* by this method, when not {@code null}, can be used unaltered in a parser
|
||||
* grammar to represent this token type.
|
||||
*
|
||||
* <p>The following table shows examples of lexer rules and the literal
|
||||
* names assigned to the corresponding token types.</p>
|
||||
*
|
||||
* <table>
|
||||
* <tr>
|
||||
* <th>Rule</th>
|
||||
* <th>Literal Name</th>
|
||||
* <th>Java String Literal</th>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code THIS : 'this';}</td>
|
||||
* <td>{@code 'this'}</td>
|
||||
* <td>{@code "'this'"}</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code SQUOTE : '\'';}</td>
|
||||
* <td>{@code '\''}</td>
|
||||
* <td>{@code "'\\''"}</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code ID : [A-Z]+;}</td>
|
||||
* <td>n/a</td>
|
||||
* <td>{@code null}</td>
|
||||
* </tr>
|
||||
* </table>
|
||||
*
|
||||
* @param tokenType The token type.
|
||||
*
|
||||
* @return The string literal associated with the specified token type, or
|
||||
* {@code null} if no string literal is associated with the type.
|
||||
*/
|
||||
@Nullable
|
||||
String getLiteralName(int tokenType);
|
||||
|
||||
/**
|
||||
* Gets the symbolic name associated with a token type. The string returned
|
||||
* by this method, when not {@code null}, can be used unaltered in a parser
|
||||
* grammar to represent this token type.
|
||||
*
|
||||
* <p>This method supports token types defined by any of the following
|
||||
* methods:</p>
|
||||
*
|
||||
* <ul>
|
||||
* <li>Tokens created by lexer rules.</li>
|
||||
* <li>Tokens defined in a {@code tokens{}} block in a lexer or parser
|
||||
* grammar.</li>
|
||||
* <li>The implicitly defined {@code EOF} token, which has the token type
|
||||
* {@link Token#EOF}.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>The following table shows examples of lexer rules and the literal
|
||||
* names assigned to the corresponding token types.</p>
|
||||
*
|
||||
* <table>
|
||||
* <tr>
|
||||
* <th>Rule</th>
|
||||
* <th>Symbolic Name</th>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code THIS : 'this';}</td>
|
||||
* <td>{@code THIS}</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code SQUOTE : '\'';}</td>
|
||||
* <td>{@code SQUOTE}</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>{@code ID : [A-Z]+;}</td>
|
||||
* <td>{@code ID}</td>
|
||||
* </tr>
|
||||
* </table>
|
||||
*
|
||||
* @param tokenType The token type.
|
||||
*
|
||||
* @return The symbolic name associated with the specified token type, or
|
||||
* {@code null} if no symbolic name is associated with the type.
|
||||
*/
|
||||
@Nullable
|
||||
String getSymbolicName(int tokenType);
|
||||
|
||||
/**
|
||||
* Gets the display name of a token type.
|
||||
*
|
||||
* <p>ANTLR provides a default implementation of this method, but
|
||||
* applications are free to override the behavior in any manner which makes
|
||||
* sense for the application. The default implementation returns the first
|
||||
* result from the following list which produces a non-{@code null}
|
||||
* result.</p>
|
||||
*
|
||||
* <ol>
|
||||
* <li>The result of {@link #getLiteralName}</li>
|
||||
* <li>The result of {@link #getSymbolicName}</li>
|
||||
* <li>The result of {@link Integer#toString}</li>
|
||||
* </ol>
|
||||
*
|
||||
* @param tokenType The token type.
|
||||
*
|
||||
* @return The display name of the token type, for use in error reporting or
|
||||
* other user-visible messages which reference specific token types.
|
||||
*/
|
||||
@NotNull
|
||||
String getDisplayName(int tokenType);
|
||||
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* [The "BSD license"]
|
||||
* Copyright (c) 2014 Terence Parr
|
||||
* Copyright (c) 2014 Sam Harwell
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* This class provides a default implementation of the {@link Vocabulary}
|
||||
* interface.
|
||||
*
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public class VocabularyImpl implements Vocabulary {
|
||||
private static final String[] EMPTY_NAMES = new String[0];
|
||||
|
||||
/**
|
||||
* Gets an empty {@link Vocabulary} instance.
|
||||
*
|
||||
* <p>
|
||||
* No literal or symbol names are assigned to token types, so
|
||||
* {@link #getDisplayName(int)} returns the numeric value for all tokens
|
||||
* except {@link Token#EOF}.</p>
|
||||
*/
|
||||
@NotNull
|
||||
public static final VocabularyImpl EMPTY_VOCABULARY = new VocabularyImpl(EMPTY_NAMES, EMPTY_NAMES, EMPTY_NAMES);
|
||||
|
||||
@NotNull
|
||||
private final String[] literalNames;
|
||||
@NotNull
|
||||
private final String[] symbolicNames;
|
||||
@NotNull
|
||||
private final String[] displayNames;
|
||||
|
||||
/**
|
||||
* Constructs a new instance of {@link VocabularyImpl} from the specified
|
||||
* literal and symbolic token names.
|
||||
*
|
||||
* @param literalNames The literal names assigned to tokens, or {@code null}
|
||||
* if no literal names are assigned.
|
||||
* @param symbolicNames The symbolic names assigned to tokens, or
|
||||
* {@code null} if no symbolic names are assigned.
|
||||
*
|
||||
* @see #getLiteralName(int)
|
||||
* @see #getSymbolicName(int)
|
||||
*/
|
||||
public VocabularyImpl(@Nullable String[] literalNames, @Nullable String[] symbolicNames) {
|
||||
this(literalNames, symbolicNames, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new instance of {@link VocabularyImpl} from the specified
|
||||
* literal, symbolic, and display token names.
|
||||
*
|
||||
* @param literalNames The literal names assigned to tokens, or {@code null}
|
||||
* if no literal names are assigned.
|
||||
* @param symbolicNames The symbolic names assigned to tokens, or
|
||||
* {@code null} if no symbolic names are assigned.
|
||||
* @param displayNames The display names assigned to tokens, or {@code null}
|
||||
* to use the values in {@code literalNames} and {@code symbolicNames} as
|
||||
* the source of display names, as described in
|
||||
* {@link #getDisplayName(int)}.
|
||||
*
|
||||
* @see #getLiteralName(int)
|
||||
* @see #getSymbolicName(int)
|
||||
* @see #getDisplayName(int)
|
||||
*/
|
||||
public VocabularyImpl(@Nullable String[] literalNames, @Nullable String[] symbolicNames, @Nullable String[] displayNames) {
|
||||
this.literalNames = literalNames != null ? literalNames : EMPTY_NAMES;
|
||||
this.symbolicNames = symbolicNames != null ? symbolicNames : EMPTY_NAMES;
|
||||
this.displayNames = displayNames != null ? displayNames : EMPTY_NAMES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link VocabularyImpl} instance from the specified set of token
|
||||
* names. This method acts as a compatibility layer for the single
|
||||
* {@code tokenNames} array generated by previous releases of ANTLR.
|
||||
*
|
||||
* <p>The resulting vocabulary instance returns {@code null} for
|
||||
* {@link #getLiteralName(int)} and {@link #getSymbolicName(int)}, and the
|
||||
* value from {@code tokenNames} for the display names.</p>
|
||||
*
|
||||
* @param tokenNames The token names, or {@code null} if no token names are
|
||||
* available.
|
||||
* @return A {@link Vocabulary} instance which uses {@code tokenNames} for
|
||||
* the display names of tokens.
|
||||
*/
|
||||
public static Vocabulary fromTokenNames(@Nullable String[] tokenNames) {
|
||||
if (tokenNames == null || tokenNames.length == 0) {
|
||||
return EMPTY_VOCABULARY;
|
||||
}
|
||||
|
||||
String[] literalNames = Arrays.copyOf(tokenNames, tokenNames.length);
|
||||
String[] symbolicNames = Arrays.copyOf(tokenNames, tokenNames.length);
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
String tokenName = tokenNames[i];
|
||||
if (tokenName == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!tokenName.isEmpty()) {
|
||||
char firstChar = tokenName.charAt(0);
|
||||
if (firstChar == '\'') {
|
||||
symbolicNames[i] = null;
|
||||
continue;
|
||||
}
|
||||
else if (Character.isUpperCase(firstChar)) {
|
||||
literalNames[i] = null;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// wasn't a literal or symbolic name
|
||||
literalNames[i] = null;
|
||||
symbolicNames[i] = null;
|
||||
}
|
||||
|
||||
return new VocabularyImpl(literalNames, symbolicNames, tokenNames);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String getLiteralName(int tokenType) {
|
||||
if (tokenType >= 0 && tokenType < literalNames.length) {
|
||||
return literalNames[tokenType];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String getSymbolicName(int tokenType) {
|
||||
if (tokenType >= 0 && tokenType < symbolicNames.length) {
|
||||
return symbolicNames[tokenType];
|
||||
}
|
||||
|
||||
if (tokenType == Token.EOF) {
|
||||
return "EOF";
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@NotNull
|
||||
public String getDisplayName(int tokenType) {
|
||||
if (tokenType >= 0 && tokenType < displayNames.length) {
|
||||
String displayName = displayNames[tokenType];
|
||||
if (displayName != null) {
|
||||
return displayName;
|
||||
}
|
||||
}
|
||||
|
||||
String literalName = getLiteralName(tokenType);
|
||||
if (literalName != null) {
|
||||
return literalName;
|
||||
}
|
||||
|
||||
String symbolicName = getSymbolicName(tokenType);
|
||||
if (symbolicName != null) {
|
||||
return symbolicName;
|
||||
}
|
||||
|
||||
return Integer.toString(tokenType);
|
||||
}
|
||||
}
|
|
@ -43,6 +43,13 @@ import org.antlr.v4.runtime.misc.Nullable;
|
|||
* an ATN state.
|
||||
*/
|
||||
public class ATNConfig {
|
||||
/**
|
||||
* This field stores the bit mask for implementing the
|
||||
* {@link #isPrecedenceFilterSuppressed} property as a bit within the
|
||||
* existing {@link #reachesIntoOuterContext} field.
|
||||
*/
|
||||
private static final int SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
|
||||
|
||||
/** The ATN state associated with this configuration */
|
||||
@NotNull
|
||||
public final ATNState state;
|
||||
|
@ -64,9 +71,21 @@ public class ATNConfig {
|
|||
* dependent predicates unless we are in the rule that initially
|
||||
* invokes the ATN simulator.
|
||||
*
|
||||
* closure() tracks the depth of how far we dip into the
|
||||
* outer context: depth > 0. Note that it may not be totally
|
||||
* accurate depth since I don't ever decrement. TODO: make it a boolean then
|
||||
* <p>
|
||||
* closure() tracks the depth of how far we dip into the outer context:
|
||||
* depth > 0. Note that it may not be totally accurate depth since I
|
||||
* don't ever decrement. TODO: make it a boolean then</p>
|
||||
*
|
||||
* <p>
|
||||
* For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
|
||||
* is also backed by this field. Since the field is publicly accessible, the
|
||||
* highest bit which would not cause the value to become negative is used to
|
||||
* store this field. This choice minimizes the risk that code which only
|
||||
* compares this value to 0 would be affected by the new purpose of the
|
||||
* flag. It also ensures the performance of the existing {@link ATNConfig}
|
||||
* constructors as well as certain operations like
|
||||
* {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are
|
||||
* <em>completely</em> unaffected by the change.</p>
|
||||
*/
|
||||
public int reachesIntoOuterContext;
|
||||
|
||||
|
@ -132,6 +151,28 @@ public class ATNConfig {
|
|||
this.reachesIntoOuterContext = c.reachesIntoOuterContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method gets the value of the {@link #reachesIntoOuterContext} field
|
||||
* as it existed prior to the introduction of the
|
||||
* {@link #isPrecedenceFilterSuppressed} method.
|
||||
*/
|
||||
public final int getOuterContextDepth() {
|
||||
return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER;
|
||||
}
|
||||
|
||||
public final boolean isPrecedenceFilterSuppressed() {
|
||||
return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0;
|
||||
}
|
||||
|
||||
public final void setPrecedenceFilterSuppressed(boolean value) {
|
||||
if (value) {
|
||||
this.reachesIntoOuterContext |= 0x40000000;
|
||||
}
|
||||
else {
|
||||
this.reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER;
|
||||
}
|
||||
}
|
||||
|
||||
/** An ATN configuration is equal to another if both have
|
||||
* the same state, they predict the same alternative, and
|
||||
* syntactic/semantic contexts are the same.
|
||||
|
@ -155,7 +196,8 @@ public class ATNConfig {
|
|||
return this.state.stateNumber==other.state.stateNumber
|
||||
&& this.alt==other.alt
|
||||
&& (this.context==other.context || (this.context != null && this.context.equals(other.context)))
|
||||
&& this.semanticContext.equals(other.semanticContext);
|
||||
&& this.semanticContext.equals(other.semanticContext)
|
||||
&& this.isPrecedenceFilterSuppressed() == other.isPrecedenceFilterSuppressed();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -195,8 +237,8 @@ public class ATNConfig {
|
|||
buf.append(",");
|
||||
buf.append(semanticContext);
|
||||
}
|
||||
if ( reachesIntoOuterContext>0 ) {
|
||||
buf.append(",up=").append(reachesIntoOuterContext);
|
||||
if ( getOuterContextDepth()>0 ) {
|
||||
buf.append(",up=").append(getOuterContextDepth());
|
||||
}
|
||||
buf.append(')');
|
||||
return buf.toString();
|
||||
|
|
|
@ -161,7 +161,7 @@ public class ATNConfigSet implements Set<ATNConfig> {
|
|||
if ( config.semanticContext!=SemanticContext.NONE ) {
|
||||
hasSemanticContext = true;
|
||||
}
|
||||
if (config.reachesIntoOuterContext > 0) {
|
||||
if (config.getOuterContextDepth() > 0) {
|
||||
dipsIntoOuterContext = true;
|
||||
}
|
||||
ATNConfig existing = configLookup.getOrAdd(config);
|
||||
|
@ -179,6 +179,12 @@ public class ATNConfigSet implements Set<ATNConfig> {
|
|||
// cache at both places.
|
||||
existing.reachesIntoOuterContext =
|
||||
Math.max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext);
|
||||
|
||||
// make sure to preserve the precedence filter suppression during the merge
|
||||
if (config.isPrecedenceFilterSuppressed()) {
|
||||
existing.setPrecedenceFilterSuppressed(true);
|
||||
}
|
||||
|
||||
existing.context = merged; // replace context; no need to alt mapping
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -320,7 +320,15 @@ public class ATNDeserializer {
|
|||
}
|
||||
|
||||
RuleTransition ruleTransition = (RuleTransition)t;
|
||||
atn.ruleToStopState[ruleTransition.target.ruleIndex].addTransition(new EpsilonTransition(ruleTransition.followState));
|
||||
int outermostPrecedenceReturn = -1;
|
||||
if (atn.ruleToStartState[ruleTransition.target.ruleIndex].isPrecedenceRule) {
|
||||
if (ruleTransition.precedence == 0) {
|
||||
outermostPrecedenceReturn = ruleTransition.target.ruleIndex;
|
||||
}
|
||||
}
|
||||
|
||||
EpsilonTransition returnTransition = new EpsilonTransition(ruleTransition.followState, outermostPrecedenceReturn);
|
||||
atn.ruleToStopState[ruleTransition.target.ruleIndex].addTransition(returnTransition);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,29 @@ package org.antlr.v4.runtime.atn;
|
|||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
|
||||
public final class EpsilonTransition extends Transition {
|
||||
public EpsilonTransition(@NotNull ATNState target) { super(target); }
|
||||
|
||||
private final int outermostPrecedenceReturn;
|
||||
|
||||
public EpsilonTransition(@NotNull ATNState target) {
|
||||
this(target, -1);
|
||||
}
|
||||
|
||||
public EpsilonTransition(@NotNull ATNState target, int outermostPrecedenceReturn) {
|
||||
super(target);
|
||||
this.outermostPrecedenceReturn = outermostPrecedenceReturn;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the rule index of a precedence rule for which this transition is
|
||||
* returning from, where the precedence value is 0; otherwise, -1.
|
||||
*
|
||||
* @see ATNConfig#isPrecedenceFilterSuppressed()
|
||||
* @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet)
|
||||
* @since 4.4.1
|
||||
*/
|
||||
public int outermostPrecedenceReturn() {
|
||||
return outermostPrecedenceReturn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSerializationType() {
|
||||
|
|
|
@ -197,6 +197,11 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
System.out.format(Locale.getDefault(), "start state closure=%s\n", ds0.configs);
|
||||
}
|
||||
|
||||
if (ds0.isAcceptState) {
|
||||
// allow zero-length tokens
|
||||
captureSimState(prevAccept, input, ds0);
|
||||
}
|
||||
|
||||
int t = input.LA(1);
|
||||
@NotNull
|
||||
DFAState s = ds0; // s is current/from DFA state
|
||||
|
@ -232,6 +237,14 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
break;
|
||||
}
|
||||
|
||||
// If this is a consumable input element, make sure to consume before
|
||||
// capturing the accept state so the input index, line, and char
|
||||
// position accurately reflect the state of the interpreter at the
|
||||
// end of the token.
|
||||
if (t != IntStream.EOF) {
|
||||
consume(input);
|
||||
}
|
||||
|
||||
if (target.isAcceptState) {
|
||||
captureSimState(prevAccept, input, target);
|
||||
if (t == IntStream.EOF) {
|
||||
|
@ -239,11 +252,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
if (t != IntStream.EOF) {
|
||||
consume(input);
|
||||
t = input.LA(1);
|
||||
}
|
||||
|
||||
t = input.LA(1);
|
||||
s = target; // flip; current DFA target becomes new src/from state
|
||||
}
|
||||
|
||||
|
@ -381,9 +390,6 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
input.seek(index);
|
||||
this.line = line;
|
||||
this.charPositionInLine = charPos;
|
||||
if (input.LA(1) != IntStream.EOF) {
|
||||
consume(input);
|
||||
}
|
||||
|
||||
if (lexerActionExecutor != null && recog != null) {
|
||||
lexerActionExecutor.execute(recog, input, startIndex);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
import org.antlr.v4.runtime.BailErrorStrategy;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.FailedPredicateException;
|
||||
import org.antlr.v4.runtime.IntStream;
|
||||
import org.antlr.v4.runtime.NoViableAltException;
|
||||
|
@ -40,6 +39,8 @@ import org.antlr.v4.runtime.ParserRuleContext;
|
|||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
import org.antlr.v4.runtime.misc.DoubleKeyMap;
|
||||
|
@ -316,6 +317,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
protected TokenStream _input;
|
||||
protected int _startIndex;
|
||||
protected ParserRuleContext _outerContext;
|
||||
protected DFA _dfa;
|
||||
|
||||
/** Testing only! */
|
||||
public ParserATNSimulator(@NotNull ATN atn, @NotNull DFA[] decisionToDFA,
|
||||
|
@ -360,6 +362,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
_startIndex = input.index();
|
||||
_outerContext = outerContext;
|
||||
DFA dfa = decisionToDFA[decision];
|
||||
_dfa = dfa;
|
||||
|
||||
int m = input.mark();
|
||||
int index = _startIndex;
|
||||
|
@ -421,11 +424,12 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
int alt = execATN(dfa, s0, input, index, outerContext);
|
||||
if ( debug ) System.out.println("DFA after predictATN: "+ dfa.toString(parser.getTokenNames()));
|
||||
if ( debug ) System.out.println("DFA after predictATN: "+ dfa.toString(parser.getVocabulary()));
|
||||
return alt;
|
||||
}
|
||||
finally {
|
||||
mergeCache = null; // wack cache after each prediction
|
||||
_dfa = null;
|
||||
input.seek(index);
|
||||
input.release(m);
|
||||
}
|
||||
|
@ -989,6 +993,113 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return configs;
|
||||
}
|
||||
|
||||
/* parrt internal source braindump that doesn't mess up
|
||||
* external API spec.
|
||||
|
||||
applyPrecedenceFilter is an optimization to avoid highly
|
||||
nonlinear prediction of expressions and other left recursive
|
||||
rules. The precedence predicates such as {3>=prec}? Are highly
|
||||
context-sensitive in that they can only be properly evaluated
|
||||
in the context of the proper prec argument. Without pruning,
|
||||
these predicates are normal predicates evaluated when we reach
|
||||
conflict state (or unique prediction). As we cannot evaluate
|
||||
these predicates out of context, the resulting conflict leads
|
||||
to full LL evaluation and nonlinear prediction which shows up
|
||||
very clearly with fairly large expressions.
|
||||
|
||||
Example grammar:
|
||||
|
||||
e : e '*' e
|
||||
| e '+' e
|
||||
| INT
|
||||
;
|
||||
|
||||
We convert that to the following:
|
||||
|
||||
e[int prec]
|
||||
: INT
|
||||
( {3>=prec}? '*' e[4]
|
||||
| {2>=prec}? '+' e[3]
|
||||
)*
|
||||
;
|
||||
|
||||
The (..)* loop has a decision for the inner block as well as
|
||||
an enter or exit decision, which is what concerns us here. At
|
||||
the 1st + of input 1+2+3, the loop entry sees both predicates
|
||||
and the loop exit also sees both predicates by falling off the
|
||||
edge of e. This is because we have no stack information with
|
||||
SLL and find the follow of e, which will hit the return states
|
||||
inside the loop after e[4] and e[3], which brings it back to
|
||||
the enter or exit decision. In this case, we know that we
|
||||
cannot evaluate those predicates because we have fallen off
|
||||
the edge of the stack and will in general not know which prec
|
||||
parameter is the right one to use in the predicate.
|
||||
|
||||
Because we have special information, that these are precedence
|
||||
predicates, we can resolve them without failing over to full
|
||||
LL despite their context sensitive nature. We make an
|
||||
assumption that prec[-1] <= prec[0], meaning that the current
|
||||
precedence level is greater than or equal to the precedence
|
||||
level of recursive invocations above us in the stack. For
|
||||
example, if predicate {3>=prec}? is true of the current prec,
|
||||
then one option is to enter the loop to match it now. The
|
||||
other option is to exit the loop and the left recursive rule
|
||||
to match the current operator in rule invocation further up
|
||||
the stack. But, we know that all of those prec are lower or
|
||||
the same value and so we can decide to enter the loop instead
|
||||
of matching it later. That means we can strip out the other
|
||||
configuration for the exit branch.
|
||||
|
||||
So imagine we have (14,1,$,{2>=prec}?) and then
|
||||
(14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization
|
||||
allows us to collapse these two configurations. We know that
|
||||
if {2>=prec}? is true for the current prec parameter, it will
|
||||
also be true for any prec from an invoking e call, indicated
|
||||
by dipsIntoOuterContext. As the predicates are both true, we
|
||||
have the option to evaluate them early in the decision start
|
||||
state. We do this by stripping both predicates and choosing to
|
||||
enter the loop as it is consistent with the notion of operator
|
||||
precedence. It's also how the full LL conflict resolution
|
||||
would work.
|
||||
|
||||
The solution requires a different DFA start state for each
|
||||
precedence level.
|
||||
|
||||
The basic filter mechanism is to remove configurations of the
|
||||
form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In
|
||||
other words, for the same ATN state and predicate context,
|
||||
remove any configuration associated with an exit branch if
|
||||
there is a configuration associated with the enter branch.
|
||||
|
||||
It's also the case that the filter evaluates precedence
|
||||
predicates and resolves conflicts according to precedence
|
||||
levels. For example, for input 1+2+3 at the first +, we see
|
||||
prediction filtering
|
||||
|
||||
[(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1),
|
||||
(11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext
|
||||
|
||||
to
|
||||
|
||||
[(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext
|
||||
|
||||
This filters because {3>=prec}? evals to true and collapses
|
||||
(11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict
|
||||
resolution based upon rules of operator precedence fits with
|
||||
our usual match first alt upon conflict.
|
||||
|
||||
We noticed a problem where a recursive call resets precedence
|
||||
to 0. Sam's fix: each config has flag indicating if it has
|
||||
returned from an expr[0] call. then just don't filter any
|
||||
config with that flag set. flag is carried along in
|
||||
closure(). so to avoid adding field, set bit just under sign
|
||||
bit of dipsIntoOuterContext (SUPPRESS_PRECEDENCE_FILTER).
|
||||
With the change you filter "unless (p, 2, pi) was reached
|
||||
after leaving the rule stop state of the LR rule containing
|
||||
state p, corresponding to a rule invocation with precedence
|
||||
level 0"
|
||||
*/
|
||||
|
||||
/**
|
||||
* This method transforms the start state computed by
|
||||
* {@link #computeStartState} to the special start state used by a
|
||||
|
@ -999,8 +1110,9 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
* <ol>
|
||||
* <li>Evaluate the precedence predicates for each configuration using
|
||||
* {@link SemanticContext#evalPrecedence}.</li>
|
||||
* <li>Remove all configurations which predict an alternative greater than
|
||||
* 1, for which another configuration that predicts alternative 1 is in the
|
||||
* <li>When {@link ATNConfig#isPrecedenceFilterSuppressed} is {@code false},
|
||||
* remove all configurations which predict an alternative greater than 1,
|
||||
* for which another configuration that predicts alternative 1 is in the
|
||||
* same ATN state with the same prediction context. This transformation is
|
||||
* valid for the following reasons:
|
||||
* <ul>
|
||||
|
@ -1012,7 +1124,10 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
* epsilon transition, so the only way an alternative other than 1 can exist
|
||||
* in a state that is also reachable via alternative 1 is by nesting calls
|
||||
* to the left-recursive rule, with the outer calls not being at the
|
||||
* preferred precedence level.</li>
|
||||
* preferred precedence level. The
|
||||
* {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN
|
||||
* configurations which do not meet this condition, and therefore are not
|
||||
* eligible for elimination during the filtering process.</li>
|
||||
* </ul>
|
||||
* </li>
|
||||
* </ol>
|
||||
|
@ -1076,14 +1191,16 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
continue;
|
||||
}
|
||||
|
||||
/* In the future, this elimination step could be updated to also
|
||||
* filter the prediction context for alternatives predicting alt>1
|
||||
* (basically a graph subtraction algorithm).
|
||||
*/
|
||||
PredictionContext context = statesFromAlt1.get(config.state.stateNumber);
|
||||
if (context != null && context.equals(config.context)) {
|
||||
// eliminated
|
||||
continue;
|
||||
if (!config.isPrecedenceFilterSuppressed()) {
|
||||
/* In the future, this elimination step could be updated to also
|
||||
* filter the prediction context for alternatives predicting alt>1
|
||||
* (basically a graph subtraction algorithm).
|
||||
*/
|
||||
PredictionContext context = statesFromAlt1.get(config.state.stateNumber);
|
||||
if (context != null && context.equals(config.context)) {
|
||||
// eliminated
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
configSet.add(config, mergeCache);
|
||||
|
@ -1240,7 +1357,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
protected int getAltThatFinishedDecisionEntryRule(ATNConfigSet configs) {
|
||||
IntervalSet alts = new IntervalSet();
|
||||
for (ATNConfig c : configs) {
|
||||
if ( c.reachesIntoOuterContext>0 || (c.state instanceof RuleStopState && c.context.hasEmptyPath()) ) {
|
||||
if ( c.getOuterContextDepth()>0 || (c.state instanceof RuleStopState && c.context.hasEmptyPath()) ) {
|
||||
alts.add(c.alt);
|
||||
}
|
||||
}
|
||||
|
@ -1409,6 +1526,10 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
// While we have context to pop back from, we may have
|
||||
// gotten that context AFTER having falling off a rule.
|
||||
// Make sure we track that we are now out of context.
|
||||
//
|
||||
// This assignment also propagates the
|
||||
// isPrecedenceFilterSuppressed() value to the new
|
||||
// configuration.
|
||||
c.reachesIntoOuterContext = config.reachesIntoOuterContext;
|
||||
assert depth > Integer.MIN_VALUE;
|
||||
closureCheckingStopState(c, configs, closureBusy, collectPredicates,
|
||||
|
@ -1476,6 +1597,13 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (_dfa != null && _dfa.isPrecedenceDfa()) {
|
||||
int outermostPrecedenceReturn = ((EpsilonTransition)t).outermostPrecedenceReturn();
|
||||
if (outermostPrecedenceReturn == _dfa.atnStartState.ruleIndex) {
|
||||
c.setPrecedenceFilterSuppressed(true);
|
||||
}
|
||||
}
|
||||
|
||||
c.reachesIntoOuterContext++;
|
||||
configs.dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method
|
||||
assert newDepth > Integer.MIN_VALUE;
|
||||
|
@ -1725,18 +1853,17 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
|
||||
@NotNull
|
||||
public String getTokenName(int t) {
|
||||
if ( t==Token.EOF ) return "EOF";
|
||||
if ( parser!=null && parser.getTokenNames()!=null ) {
|
||||
String[] tokensNames = parser.getTokenNames();
|
||||
if ( t>=tokensNames.length ) {
|
||||
System.err.println(t+" ttype out of range: "+ Arrays.toString(tokensNames));
|
||||
System.err.println(((CommonTokenStream)parser.getInputStream()).getTokens());
|
||||
}
|
||||
else {
|
||||
return tokensNames[t]+"<"+t+">";
|
||||
}
|
||||
if (t == Token.EOF) {
|
||||
return "EOF";
|
||||
}
|
||||
return String.valueOf(t);
|
||||
|
||||
Vocabulary vocabulary = parser != null ? parser.getVocabulary() : VocabularyImpl.EMPTY_VOCABULARY;
|
||||
String displayName = vocabulary.getDisplayName(t);
|
||||
if (displayName.equals(Integer.toString(t))) {
|
||||
return displayName;
|
||||
}
|
||||
|
||||
return displayName + "<" + t + ">";
|
||||
}
|
||||
|
||||
public String getLookaheadName(TokenStream input) {
|
||||
|
@ -1839,7 +1966,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
if ( debug ) {
|
||||
System.out.println("DFA=\n"+dfa.toString(parser!=null?parser.getTokenNames():null));
|
||||
System.out.println("DFA=\n"+dfa.toString(parser!=null?parser.getVocabulary():VocabularyImpl.EMPTY_VOCABULARY));
|
||||
}
|
||||
|
||||
return to;
|
||||
|
|
|
@ -29,11 +29,13 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime.dfa;
|
||||
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
import org.antlr.v4.runtime.Parser;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -198,14 +200,27 @@ public class DFA {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String toString() { return toString(null); }
|
||||
public String toString() { return toString(VocabularyImpl.EMPTY_VOCABULARY); }
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #toString(Vocabulary)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public String toString(@Nullable String[] tokenNames) {
|
||||
if ( s0==null ) return "";
|
||||
DFASerializer serializer = new DFASerializer(this,tokenNames);
|
||||
return serializer.toString();
|
||||
}
|
||||
|
||||
public String toString(@NotNull Vocabulary vocabulary) {
|
||||
if (s0 == null) {
|
||||
return "";
|
||||
}
|
||||
|
||||
DFASerializer serializer = new DFASerializer(this, vocabulary);
|
||||
return serializer.toString();
|
||||
}
|
||||
|
||||
public String toLexerString() {
|
||||
if ( s0==null ) return "";
|
||||
DFASerializer serializer = new LexerDFASerializer(this);
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
|
||||
package org.antlr.v4.runtime.dfa;
|
||||
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
|
@ -39,13 +41,21 @@ import java.util.List;
|
|||
/** A DFA walker that knows how to dump them to serialized strings. */
|
||||
public class DFASerializer {
|
||||
@NotNull
|
||||
final DFA dfa;
|
||||
@Nullable
|
||||
final String[] tokenNames;
|
||||
private final DFA dfa;
|
||||
@NotNull
|
||||
private final Vocabulary vocabulary;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #DFASerializer(DFA, Vocabulary)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public DFASerializer(@NotNull DFA dfa, @Nullable String[] tokenNames) {
|
||||
this(dfa, VocabularyImpl.fromTokenNames(tokenNames));
|
||||
}
|
||||
|
||||
public DFASerializer(@NotNull DFA dfa, @NotNull Vocabulary vocabulary) {
|
||||
this.dfa = dfa;
|
||||
this.tokenNames = tokenNames;
|
||||
this.vocabulary = vocabulary;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -73,11 +83,7 @@ public class DFASerializer {
|
|||
}
|
||||
|
||||
protected String getEdgeLabel(int i) {
|
||||
String label;
|
||||
if ( i==0 ) return "EOF";
|
||||
if ( tokenNames!=null ) label = tokenNames[i-1];
|
||||
else label = String.valueOf(i-1);
|
||||
return label;
|
||||
return vocabulary.getDisplayName(i - 1);
|
||||
}
|
||||
|
||||
@NotNull
|
||||
|
|
|
@ -30,11 +30,12 @@
|
|||
|
||||
package org.antlr.v4.runtime.dfa;
|
||||
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
|
||||
public class LexerDFASerializer extends DFASerializer {
|
||||
public LexerDFASerializer(@NotNull DFA dfa) {
|
||||
super(dfa, null);
|
||||
super(dfa, VocabularyImpl.EMPTY_VOCABULARY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,8 @@ package org.antlr.v4.runtime.misc;
|
|||
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
|
@ -544,7 +546,15 @@ public class IntervalSet implements IntSet {
|
|||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #toString(Vocabulary)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public String toString(String[] tokenNames) {
|
||||
return toString(VocabularyImpl.fromTokenNames(tokenNames));
|
||||
}
|
||||
|
||||
public String toString(@NotNull Vocabulary vocabulary) {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
if ( this.intervals==null || this.intervals.isEmpty() ) {
|
||||
return "{}";
|
||||
|
@ -558,12 +568,12 @@ public class IntervalSet implements IntSet {
|
|||
int a = I.a;
|
||||
int b = I.b;
|
||||
if ( a==b ) {
|
||||
buf.append(elementName(tokenNames, a));
|
||||
buf.append(elementName(vocabulary, a));
|
||||
}
|
||||
else {
|
||||
for (int i=a; i<=b; i++) {
|
||||
if ( i>a ) buf.append(", ");
|
||||
buf.append(elementName(tokenNames, i));
|
||||
buf.append(elementName(vocabulary, i));
|
||||
}
|
||||
}
|
||||
if ( iter.hasNext() ) {
|
||||
|
@ -576,12 +586,26 @@ public class IntervalSet implements IntSet {
|
|||
return buf.toString();
|
||||
}
|
||||
|
||||
protected String elementName(String[] tokenNames, int a) {
|
||||
if ( a==Token.EOF ) return "<EOF>";
|
||||
else if ( a==Token.EPSILON ) return "<EPSILON>";
|
||||
else return tokenNames[a];
|
||||
/**
|
||||
* @deprecated Use {@link #elementName(Vocabulary, int)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
protected String elementName(String[] tokenNames, int a) {
|
||||
return elementName(VocabularyImpl.fromTokenNames(tokenNames), a);
|
||||
}
|
||||
|
||||
}
|
||||
@NotNull
|
||||
protected String elementName(@NotNull Vocabulary vocabulary, int a) {
|
||||
if (a == Token.EOF) {
|
||||
return "<EOF>";
|
||||
}
|
||||
else if (a == Token.EPSILON) {
|
||||
return "<EPSILON>";
|
||||
}
|
||||
else {
|
||||
return vocabulary.getDisplayName(a);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
|
|
|
@ -30,7 +30,9 @@
|
|||
|
||||
package org.antlr.v4.runtime.misc;
|
||||
|
||||
public class Pair<A,B> {
|
||||
import java.io.Serializable;
|
||||
|
||||
public class Pair<A,B> implements Serializable {
|
||||
public final A a;
|
||||
public final B b;
|
||||
|
||||
|
|
|
@ -96,11 +96,11 @@ public class Utils {
|
|||
return buf.toString();
|
||||
}
|
||||
|
||||
public static void writeFile(String fileName, String content) throws IOException {
|
||||
public static void writeFile(@NotNull String fileName, @NotNull String content) throws IOException {
|
||||
writeFile(fileName, content, null);
|
||||
}
|
||||
|
||||
public static void writeFile(String fileName, String content, String encoding) throws IOException {
|
||||
public static void writeFile(@NotNull String fileName, @NotNull String content, @Nullable String encoding) throws IOException {
|
||||
File f = new File(fileName);
|
||||
FileOutputStream fos = new FileOutputStream(f);
|
||||
OutputStreamWriter osw;
|
||||
|
@ -119,14 +119,13 @@ public class Utils {
|
|||
}
|
||||
}
|
||||
|
||||
public static char[] readFile(String fileName) throws IOException {
|
||||
@NotNull
|
||||
public static char[] readFile(@NotNull String fileName) throws IOException {
|
||||
return readFile(fileName, null);
|
||||
}
|
||||
|
||||
public static char[] readFile(String fileName, String encoding) throws IOException {
|
||||
if ( fileName==null ) {
|
||||
return null;
|
||||
}
|
||||
@NotNull
|
||||
public static char[] readFile(@NotNull String fileName, @Nullable String encoding) throws IOException {
|
||||
File f = new File(fileName);
|
||||
int size = (int)f.length();
|
||||
InputStreamReader isr;
|
||||
|
|
|
@ -220,7 +220,7 @@ public class ParseTreePatternMatcher {
|
|||
CommonTokenStream tokens = new CommonTokenStream(tokenSrc);
|
||||
|
||||
ParserInterpreter parserInterp = new ParserInterpreter(parser.getGrammarFileName(),
|
||||
Arrays.asList(parser.getTokenNames()),
|
||||
parser.getVocabulary(),
|
||||
Arrays.asList(parser.getRuleNames()),
|
||||
parser.getATNWithBypassAlts(),
|
||||
tokens);
|
||||
|
|
|
@ -33,7 +33,6 @@ package org.antlr.v4.runtime.misc;
|
|||
import javax.annotation.processing.AbstractProcessor;
|
||||
import javax.annotation.processing.RoundEnvironment;
|
||||
import javax.annotation.processing.SupportedAnnotationTypes;
|
||||
import javax.annotation.processing.SupportedSourceVersion;
|
||||
import javax.lang.model.SourceVersion;
|
||||
import javax.lang.model.element.AnnotationMirror;
|
||||
import javax.lang.model.element.Element;
|
||||
|
@ -83,7 +82,6 @@ import java.util.Set;
|
|||
* @author Sam Harwell
|
||||
*/
|
||||
@SupportedAnnotationTypes({NullUsageProcessor.NotNullClassName, NullUsageProcessor.NullableClassName})
|
||||
@SupportedSourceVersion(SourceVersion.RELEASE_6)
|
||||
public class NullUsageProcessor extends AbstractProcessor {
|
||||
public static final String NotNullClassName = "org.antlr.v4.runtime.misc.NotNull";
|
||||
public static final String NullableClassName = "org.antlr.v4.runtime.misc.Nullable";
|
||||
|
@ -94,6 +92,22 @@ public class NullUsageProcessor extends AbstractProcessor {
|
|||
public NullUsageProcessor() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public SourceVersion getSupportedSourceVersion() {
|
||||
SourceVersion latestSupported = SourceVersion.latestSupported();
|
||||
|
||||
if (latestSupported.ordinal() <= 6) {
|
||||
return SourceVersion.RELEASE_6;
|
||||
}
|
||||
else if (latestSupported.ordinal() <= 8) {
|
||||
return latestSupported;
|
||||
}
|
||||
else {
|
||||
// this annotation processor is tested through Java 8
|
||||
return SourceVersion.values()[8];
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
|
||||
if (!checkClassNameConstants()) {
|
||||
|
|
23
tool/pom.xml
23
tool/pom.xml
|
@ -20,6 +20,18 @@
|
|||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.seleniumhq.selenium</groupId>
|
||||
<artifactId>selenium-java</artifactId>
|
||||
<version>2.43.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
<artifactId>jetty-server</artifactId>
|
||||
<version>8.1.16.v20140903</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>antlr4-runtime</artifactId>
|
||||
|
@ -142,6 +154,17 @@
|
|||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -238,20 +238,16 @@ public class <parser.name> extends <superClass; null="Parser"> {
|
|||
public static final int
|
||||
<parser.tokens:{k | <k>=<parser.tokens.(k)>}; separator=", ", wrap, anchor>;
|
||||
<endif>
|
||||
public static final String[] tokenNames = {
|
||||
<parser.tokenNames:{t | <t>}; null="\"\<INVALID>\"", separator=", ", wrap, anchor>
|
||||
};
|
||||
public static final int
|
||||
<parser.rules:{r | RULE_<r.name> = <r.index>}; separator=", ", wrap, anchor>;
|
||||
public static final String[] ruleNames = {
|
||||
<parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor>
|
||||
};
|
||||
|
||||
@Override
|
||||
public String getGrammarFileName() { return "<parser.grammarFileName; format="java-escape">"; }
|
||||
<vocabulary(parser.literalNames, parser.symbolicNames)>
|
||||
|
||||
@Override
|
||||
public String[] getTokenNames() { return tokenNames; }
|
||||
public String getGrammarFileName() { return "<parser.grammarFileName; format="java-escape">"; }
|
||||
|
||||
@Override
|
||||
public String[] getRuleNames() { return ruleNames; }
|
||||
|
@ -270,7 +266,8 @@ public class <parser.name> extends <superClass; null="Parser"> {
|
|||
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
|
||||
switch (ruleIndex) {
|
||||
<parser.sempredFuncs.values:{f|
|
||||
case <f.ruleIndex>: return <f.name>_sempred((<f.ctxType>)_localctx, predIndex);}; separator="\n">
|
||||
case <f.ruleIndex>:
|
||||
return <f.name>_sempred((<f.ctxType>)_localctx, predIndex);}; separator="\n">
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -281,13 +278,56 @@ case <f.ruleIndex>: return <f.name>_sempred((<f.ctxType>)_localctx, predIndex);}
|
|||
}
|
||||
>>
|
||||
|
||||
vocabulary(literalNames, symbolicNames) ::= <<
|
||||
private static final String[] _LITERAL_NAMES = {
|
||||
<literalNames:{t | <t>}; null="null", separator=", ", wrap, anchor>
|
||||
};
|
||||
private static final String[] _SYMBOLIC_NAMES = {
|
||||
<symbolicNames:{t | <t>}; null="null", separator=", ", wrap, anchor>
|
||||
};
|
||||
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #VOCABULARY} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String[] tokenNames;
|
||||
static {
|
||||
tokenNames = new String[_SYMBOLIC_NAMES.length];
|
||||
for (int i = 0; i \< tokenNames.length; i++) {
|
||||
tokenNames[i] = VOCABULARY.getLiteralName(i);
|
||||
if (tokenNames[i] == null) {
|
||||
tokenNames[i] = VOCABULARY.getSymbolicName(i);
|
||||
}
|
||||
|
||||
if (tokenNames[i] == null) {
|
||||
tokenNames[i] = "\<INVALID>";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public String[] getTokenNames() {
|
||||
return tokenNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
@NotNull
|
||||
public Vocabulary getVocabulary() {
|
||||
return VOCABULARY;
|
||||
}
|
||||
>>
|
||||
|
||||
dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= <<
|
||||
<if(actionFuncs)>
|
||||
@Override
|
||||
public void action(RuleContext _localctx, int ruleIndex, int actionIndex) {
|
||||
switch (ruleIndex) {
|
||||
<recog.actionFuncs.values:{f|
|
||||
case <f.ruleIndex>: <f.name>_action((<f.ctxType>)_localctx, actionIndex); break;}; separator="\n">
|
||||
<recog.actionFuncs.values:{f|
|
||||
case <f.ruleIndex>:
|
||||
<f.name>_action((<f.ctxType>)_localctx, actionIndex);
|
||||
break;}; separator="\n">
|
||||
}
|
||||
}
|
||||
<actionFuncs.values; separator="\n">
|
||||
|
@ -297,7 +337,8 @@ case <f.ruleIndex>: <f.name>_action((<f.ctxType>)_localctx, actionIndex); break;
|
|||
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
|
||||
switch (ruleIndex) {
|
||||
<recog.sempredFuncs.values:{f|
|
||||
case <f.ruleIndex>: return <f.name>_sempred((<f.ctxType>)_localctx, predIndex);}; separator="\n">
|
||||
case <f.ruleIndex>:
|
||||
return <f.name>_sempred((<f.ctxType>)_localctx, predIndex);}; separator="\n">
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -319,7 +360,9 @@ RuleActionFunction(r, actions) ::= <<
|
|||
private void <r.name>_action(<r.ctxType> _localctx, int actionIndex) {
|
||||
switch (actionIndex) {
|
||||
<actions:{index|
|
||||
case <index>: <actions.(index)> break;}; separator="\n">
|
||||
case <index>:
|
||||
<actions.(index)>
|
||||
break;}; separator="\n">
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
@ -331,7 +374,8 @@ RuleSempredFunction(r, actions) ::= <<
|
|||
private boolean <r.name>_sempred(<r.ctxType> _localctx, int predIndex) {
|
||||
switch (predIndex) {
|
||||
<actions:{index|
|
||||
case <index>: return <actions.(index)>;}; separator="\n">
|
||||
case <index>:
|
||||
return <actions.(index)>;}; separator="\n">
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -584,11 +628,13 @@ cases(ttypes) ::= <<
|
|||
>>
|
||||
|
||||
InvokeRule(r, argExprsChunks) ::= <<
|
||||
setState(<r.stateNumber>); <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>);
|
||||
setState(<r.stateNumber>);
|
||||
<if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>);
|
||||
>>
|
||||
|
||||
MatchToken(m) ::= <<
|
||||
setState(<m.stateNumber>); <if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>match(<m.name>);
|
||||
setState(<m.stateNumber>);
|
||||
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>match(<m.name>);
|
||||
>>
|
||||
|
||||
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
|
||||
|
@ -770,7 +816,7 @@ public \<T> T accept(ParseTreeVisitor\<? extends T> visitor) {
|
|||
}
|
||||
>>
|
||||
|
||||
AttributeDecl(d) ::= "<d.type> <d.name>"
|
||||
AttributeDecl(d) ::= "<d.type> <d.name><if(d.initValue)> = <d.initValue><endif>"
|
||||
|
||||
/** If we don't know location of label def x, use this template */
|
||||
labelref(x) ::= "<if(!x.isLocal)>((<x.ctx.name>)_localctx).<endif><x.name>"
|
||||
|
@ -783,15 +829,28 @@ recRuleAltPredicate(ruleName,opPrec) ::= "precpred(_ctx, <opPrec>)"
|
|||
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>;"
|
||||
recRuleSetStopToken() ::= "_ctx.stop = _input.LT(-1);"
|
||||
|
||||
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
|
||||
recRuleAltStartAction(ruleName, ctxName, label, isListLabel) ::= <<
|
||||
_localctx = new <ctxName>Context(_parentctx, _parentState);
|
||||
<if(label)>
|
||||
<if(isListLabel)>
|
||||
_localctx.<label>.add(_prevctx);
|
||||
<else>
|
||||
_localctx.<label> = _prevctx;
|
||||
<endif>
|
||||
<endif>
|
||||
<if(label)>_localctx.<label> = _prevctx;<endif>
|
||||
pushNewRecursionContext(_localctx, _startState, RULE_<ruleName>);
|
||||
>>
|
||||
|
||||
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label) ::= <<
|
||||
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label, isListLabel) ::= <<
|
||||
_localctx = new <currentAltLabel; format="cap">Context(new <ruleName; format="cap">Context(_parentctx, _parentState));
|
||||
<if(label)>((<currentAltLabel; format="cap">Context)_localctx).<label> = _prevctx;<endif>
|
||||
<if(label)>
|
||||
<if(isListLabel)>
|
||||
((<currentAltLabel; format="cap">Context)_localctx).<label>.add(_prevctx);
|
||||
<else>
|
||||
((<currentAltLabel; format="cap">Context)_localctx).<label> = _prevctx;
|
||||
<endif>
|
||||
<endif>
|
||||
pushNewRecursionContext(_localctx, _startState, RULE_<ruleName>);
|
||||
>>
|
||||
|
||||
|
@ -835,18 +894,21 @@ public class <lexer.name> extends <superClass; null="Lexer"> {
|
|||
new PredictionContextCache();
|
||||
public static final int
|
||||
<lexer.tokens:{k | <k>=<lexer.tokens.(k)>}; separator=", ", wrap, anchor>;
|
||||
<if(lexer.channels)>
|
||||
public static final int
|
||||
<lexer.channels:{k | <k>=<lexer.channels.(k)>}; separator=", ", wrap, anchor>;
|
||||
<endif>
|
||||
<rest(lexer.modes):{m| public static final int <m> = <i>;}; separator="\n">
|
||||
public static String[] modeNames = {
|
||||
<lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor>
|
||||
};
|
||||
|
||||
public static final String[] tokenNames = {
|
||||
<lexer.tokenNames:{t | <t>}; null="\"\<INVALID>\"", separator=", ", wrap, anchor>
|
||||
};
|
||||
public static final String[] ruleNames = {
|
||||
<lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor>
|
||||
};
|
||||
|
||||
<vocabulary(lexer.literalNames, lexer.symbolicNames)>
|
||||
|
||||
<namedActions.members>
|
||||
|
||||
public <lexer.name>(CharStream input) {
|
||||
|
@ -857,9 +919,6 @@ public class <lexer.name> extends <superClass; null="Lexer"> {
|
|||
@Override
|
||||
public String getGrammarFileName() { return "<lexer.grammarFileName>"; }
|
||||
|
||||
@Override
|
||||
public String[] getTokenNames() { return tokenNames; }
|
||||
|
||||
@Override
|
||||
public String[] getRuleNames() { return ruleNames; }
|
||||
|
||||
|
|
|
@ -608,6 +608,8 @@ public class Tool {
|
|||
return g;
|
||||
}
|
||||
|
||||
private final Map<String, Grammar> importedGrammars = new HashMap<String, Grammar>();
|
||||
|
||||
/**
|
||||
* Try current dir then dir of g then lib dir
|
||||
* @param g
|
||||
|
@ -615,27 +617,34 @@ public class Tool {
|
|||
*/
|
||||
public Grammar loadImportedGrammar(Grammar g, GrammarAST nameNode) throws IOException {
|
||||
String name = nameNode.getText();
|
||||
g.tool.log("grammar", "load " + name + " from " + g.fileName);
|
||||
File importedFile = null;
|
||||
for (String extension : ALL_GRAMMAR_EXTENSIONS) {
|
||||
importedFile = getImportedGrammarFile(g, name + extension);
|
||||
if (importedFile != null) {
|
||||
break;
|
||||
Grammar imported = importedGrammars.get(name);
|
||||
if (imported == null) {
|
||||
g.tool.log("grammar", "load " + name + " from " + g.fileName);
|
||||
File importedFile = null;
|
||||
for (String extension : ALL_GRAMMAR_EXTENSIONS) {
|
||||
importedFile = getImportedGrammarFile(g, name + extension);
|
||||
if (importedFile != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( importedFile==null ) {
|
||||
errMgr.grammarError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, g.fileName, nameNode.getToken(), name);
|
||||
return null;
|
||||
}
|
||||
|
||||
String absolutePath = importedFile.getAbsolutePath();
|
||||
ANTLRFileStream in = new ANTLRFileStream(absolutePath, grammarEncoding);
|
||||
GrammarRootAST root = parse(g.fileName, in);
|
||||
if (root == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
imported = createGrammar(root);
|
||||
imported.fileName = absolutePath;
|
||||
importedGrammars.put(root.getGrammarName(), imported);
|
||||
}
|
||||
|
||||
if ( importedFile==null ) {
|
||||
errMgr.grammarError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, g.fileName, nameNode.getToken(), name);
|
||||
return null;
|
||||
}
|
||||
|
||||
ANTLRFileStream in = new ANTLRFileStream(importedFile.getAbsolutePath(), grammarEncoding);
|
||||
GrammarRootAST root = parse(g.fileName, in);
|
||||
if ( root==null ) {
|
||||
return null;
|
||||
}
|
||||
Grammar imported = createGrammar(root);
|
||||
imported.fileName = importedFile.getAbsolutePath();
|
||||
return imported;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,24 +36,27 @@ public class LeftRecursiveRuleAltInfo {
|
|||
public int altNum; // original alt index (from 1)
|
||||
public String leftRecursiveRuleRefLabel;
|
||||
public String altLabel;
|
||||
public final boolean isListLabel;
|
||||
public String altText;
|
||||
public AltAST altAST; // transformed ALT
|
||||
public AltAST originalAltAST;
|
||||
public int nextPrec;
|
||||
|
||||
public LeftRecursiveRuleAltInfo(int altNum, String altText) {
|
||||
this(altNum, altText, null, null, null);
|
||||
this(altNum, altText, null, null, false, null);
|
||||
}
|
||||
|
||||
public LeftRecursiveRuleAltInfo(int altNum, String altText,
|
||||
String leftRecursiveRuleRefLabel,
|
||||
String altLabel,
|
||||
boolean isListLabel,
|
||||
AltAST originalAltAST)
|
||||
{
|
||||
this.altNum = altNum;
|
||||
this.altText = altText;
|
||||
this.leftRecursiveRuleRefLabel = leftRecursiveRuleRefLabel;
|
||||
this.altLabel = altLabel;
|
||||
this.isListLabel = isListLabel;
|
||||
this.originalAltAST = originalAltAST;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
}
|
||||
|
||||
if ( altAssociativity.get(alt)!=null && altAssociativity.get(alt)!=assoc ) {
|
||||
tool.errMgr.toolError(ErrorType.ALL_OPS_NEED_SAME_ASSOC, alt);
|
||||
tool.errMgr.toolError(ErrorType.INTERNAL_ERROR, "all operators of alt " + alt + " of left-recursive rule must have same associativity");
|
||||
}
|
||||
altAssociativity.put(alt, assoc);
|
||||
|
||||
|
@ -149,9 +149,12 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
AltAST altTree = (AltAST)originalAltTree.dupTree();
|
||||
String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null;
|
||||
|
||||
String label = null;
|
||||
boolean isListLabel = false;
|
||||
GrammarAST lrlabel = stripLeftRecursion(altTree);
|
||||
String label = lrlabel != null ? lrlabel.getText() : null;
|
||||
if ( lrlabel!=null ) {
|
||||
label = lrlabel.getText();
|
||||
isListLabel = lrlabel.getParent().getType() == PLUS_ASSIGN;
|
||||
leftRecursiveRuleRefLabels.add(new Pair<GrammarAST,String>(lrlabel,altLabel));
|
||||
}
|
||||
|
||||
|
@ -165,7 +168,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
String altText = text(altTree);
|
||||
altText = altText.trim();
|
||||
LeftRecursiveRuleAltInfo a =
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, originalAltTree);
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, isListLabel, originalAltTree);
|
||||
a.nextPrec = nextPrec;
|
||||
binaryAlts.put(alt, a);
|
||||
//System.out.println("binaryAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
|
||||
|
@ -183,7 +186,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
altText = altText.trim();
|
||||
String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null;
|
||||
LeftRecursiveRuleAltInfo a =
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, null, altLabel, originalAltTree);
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, null, altLabel, false, originalAltTree);
|
||||
a.nextPrec = nextPrec;
|
||||
prefixAlts.add(a);
|
||||
//System.out.println("prefixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
|
||||
|
@ -194,16 +197,19 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
AltAST altTree = (AltAST)originalAltTree.dupTree();
|
||||
String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null;
|
||||
|
||||
String label = null;
|
||||
boolean isListLabel = false;
|
||||
GrammarAST lrlabel = stripLeftRecursion(altTree);
|
||||
String label = lrlabel != null ? lrlabel.getText() : null;
|
||||
if ( lrlabel!=null ) {
|
||||
label = lrlabel.getText();
|
||||
isListLabel = lrlabel.getParent().getType() == PLUS_ASSIGN;
|
||||
leftRecursiveRuleRefLabels.add(new Pair<GrammarAST,String>(lrlabel,altLabel));
|
||||
}
|
||||
stripAltLabel(altTree);
|
||||
String altText = text(altTree);
|
||||
altText = altText.trim();
|
||||
LeftRecursiveRuleAltInfo a =
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, originalAltTree);
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, isListLabel, originalAltTree);
|
||||
suffixAlts.put(alt, a);
|
||||
// System.out.println("suffixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
|
||||
}
|
||||
|
@ -215,7 +221,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
String altText = text(altTree);
|
||||
String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null;
|
||||
LeftRecursiveRuleAltInfo a =
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, null, altLabel, originalAltTree);
|
||||
new LeftRecursiveRuleAltInfo(alt, altText, null, altLabel, false, originalAltTree);
|
||||
otherAlts.add(a);
|
||||
// System.out.println("otherAlt " + alt + ": " + altText);
|
||||
}
|
||||
|
@ -313,7 +319,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
|
|||
if ( (first.getType()==RULE_REF && first.getText().equals(ruleName)) ||
|
||||
(rref!=null && rref.getType()==RULE_REF && rref.getText().equals(ruleName)) )
|
||||
{
|
||||
if ( first.getType()==ASSIGN ) lrlabel = (GrammarAST)first.getChild(0);
|
||||
if ( first.getType()==ASSIGN || first.getType()==PLUS_ASSIGN ) lrlabel = (GrammarAST)first.getChild(0);
|
||||
// remove rule ref (first child unless options present)
|
||||
altAST.deleteChild(leftRecurRuleIndex);
|
||||
// reset index so it prints properly (sets token range of
|
||||
|
|
|
@ -106,7 +106,7 @@ public class ATNPrinter {
|
|||
buf.append("-").append(not?"~":"").append(st.toString()).append("->").append(getStateString(t.target)).append('\n');
|
||||
}
|
||||
else {
|
||||
buf.append("-").append(not?"~":"").append(st.label().toString(g.getTokenDisplayNames())).append("->").append(getStateString(t.target)).append('\n');
|
||||
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
|
||||
}
|
||||
}
|
||||
else if ( t instanceof AtomTransition ) {
|
||||
|
|
|
@ -464,6 +464,11 @@ public class LexerATNFactory extends ParserATNFactory {
|
|||
return tokenType;
|
||||
}
|
||||
|
||||
int channelValue = g.getChannelValue(name);
|
||||
if (channelValue >= org.antlr.v4.runtime.Token.MIN_USER_CHANNEL_VALUE) {
|
||||
return channelValue;
|
||||
}
|
||||
|
||||
List<String> modeNames = new ArrayList<String>(((LexerGrammar)g).modes.keySet());
|
||||
int mode = modeNames.indexOf(name);
|
||||
if (mode >= 0) {
|
||||
|
|
|
@ -557,7 +557,7 @@ public class ParserATNFactory implements ATNFactory {
|
|||
blkStart.loopBackState = loop;
|
||||
end.loopBackState = loop;
|
||||
|
||||
plusAST.atnState = blkStart;
|
||||
plusAST.atnState = loop;
|
||||
epsilon(blkEnd, loop); // blk can see loop back
|
||||
|
||||
BlockAST blkAST = (BlockAST)plusAST.getChild(0);
|
||||
|
|
|
@ -64,23 +64,44 @@ public class CodeGenPipeline {
|
|||
}
|
||||
}
|
||||
|
||||
// all templates are generated in memory to report the most complete
|
||||
// error information possible, but actually writing output files stops
|
||||
// after the first error is reported
|
||||
int errorCount = g.tool.errMgr.getNumErrors();
|
||||
|
||||
if ( g.isLexer() ) {
|
||||
ST lexer = gen.generateLexer();
|
||||
writeRecognizer(lexer, gen);
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
writeRecognizer(lexer, gen);
|
||||
}
|
||||
}
|
||||
else {
|
||||
ST parser = gen.generateParser();
|
||||
writeRecognizer(parser, gen);
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
writeRecognizer(parser, gen);
|
||||
}
|
||||
if ( g.tool.gen_listener ) {
|
||||
gen.writeListener(gen.generateListener());
|
||||
ST listener = gen.generateListener();
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
gen.writeListener(listener);
|
||||
}
|
||||
if (gen.getTarget().wantsBaseListener()) {
|
||||
gen.writeBaseListener(gen.generateBaseListener());
|
||||
ST baseListener = gen.generateBaseListener();
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
gen.writeBaseListener(baseListener);
|
||||
}
|
||||
}
|
||||
}
|
||||
if ( g.tool.gen_visitor ) {
|
||||
gen.writeVisitor(gen.generateVisitor());
|
||||
ST visitor = gen.generateVisitor();
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
gen.writeVisitor(visitor);
|
||||
}
|
||||
if (gen.getTarget().wantsBaseVisitor()) {
|
||||
gen.writeBaseVisitor(gen.generateBaseVisitor());
|
||||
ST baseVisitor = gen.generateBaseVisitor();
|
||||
if (g.tool.errMgr.getNumErrors() == errorCount) {
|
||||
gen.writeBaseVisitor(baseVisitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
gen.writeHeaderFile();
|
||||
|
|
|
@ -40,13 +40,12 @@ import org.antlr.v4.tool.Grammar;
|
|||
import org.stringtemplate.v4.AutoIndentWriter;
|
||||
import org.stringtemplate.v4.ST;
|
||||
import org.stringtemplate.v4.STGroup;
|
||||
import org.stringtemplate.v4.STGroupFile;
|
||||
import org.stringtemplate.v4.STWriter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/** General controller for code gen. Can instantiate sub generator(s).
|
||||
|
@ -158,7 +157,7 @@ public class CodeGenerator {
|
|||
*/
|
||||
ST getTokenVocabOutput() {
|
||||
ST vocabFileST = new ST(vocabFilePattern);
|
||||
Map<String,Integer> tokens = new HashMap<String,Integer>();
|
||||
Map<String,Integer> tokens = new LinkedHashMap<String,Integer>();
|
||||
// make constants for the token names
|
||||
for (String t : g.tokenNameToTypeMap.keySet()) {
|
||||
int tokenType = g.tokenNameToTypeMap.get(t);
|
||||
|
@ -169,7 +168,7 @@ public class CodeGenerator {
|
|||
vocabFileST.add("tokens", tokens);
|
||||
|
||||
// now dump the strings
|
||||
Map<String,Integer> literals = new HashMap<String,Integer>();
|
||||
Map<String,Integer> literals = new LinkedHashMap<String,Integer>();
|
||||
for (String literal : g.stringLiteralToTypeMap.keySet()) {
|
||||
int tokenType = g.stringLiteralToTypeMap.get(literal);
|
||||
if ( tokenType>=Token.MIN_USER_TOKEN_TYPE) {
|
||||
|
|
|
@ -42,6 +42,11 @@ import java.util.Set;
|
|||
|
||||
public class JavaTarget extends Target {
|
||||
|
||||
/**
|
||||
* The Java target can cache the code generation templates.
|
||||
*/
|
||||
private static final ThreadLocal<STGroup> targetTemplates = new ThreadLocal<STGroup>();
|
||||
|
||||
protected static final String[] javaKeywords = {
|
||||
"abstract", "assert", "boolean", "break", "byte", "case", "catch",
|
||||
"char", "class", "const", "continue", "default", "do", "double", "else",
|
||||
|
@ -93,8 +98,13 @@ public class JavaTarget extends Target {
|
|||
|
||||
@Override
|
||||
protected STGroup loadTemplates() {
|
||||
STGroup result = super.loadTemplates();
|
||||
result.registerRenderer(String.class, new JavaStringRenderer(), true);
|
||||
STGroup result = targetTemplates.get();
|
||||
if (result == null) {
|
||||
result = super.loadTemplates();
|
||||
result.registerRenderer(String.class, new JavaStringRenderer(), true);
|
||||
targetTemplates.set(result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.antlr.v4.misc.Utils;
|
|||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.parse.GrammarASTAdaptor;
|
||||
import org.antlr.v4.tool.Alternative;
|
||||
import org.antlr.v4.tool.ErrorType;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.LeftRecursiveRule;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
@ -266,17 +267,26 @@ public class OutputModelController {
|
|||
for (int i = 0; i < opAltsCode.size(); i++) {
|
||||
ST altActionST;
|
||||
LeftRecursiveRuleAltInfo altInfo = r.recOpAlts.getElement(i);
|
||||
String templateName;
|
||||
if ( altInfo.altLabel!=null ) {
|
||||
altActionST = codegenTemplates.getInstanceOf("recRuleLabeledAltStartAction");
|
||||
templateName = "recRuleLabeledAltStartAction";
|
||||
altActionST = codegenTemplates.getInstanceOf(templateName);
|
||||
altActionST.add("currentAltLabel", altInfo.altLabel);
|
||||
}
|
||||
else {
|
||||
altActionST = codegenTemplates.getInstanceOf("recRuleAltStartAction");
|
||||
templateName = "recRuleAltStartAction";
|
||||
altActionST = codegenTemplates.getInstanceOf(templateName);
|
||||
altActionST.add("ctxName", Utils.capitalize(r.name));
|
||||
}
|
||||
altActionST.add("ruleName", r.name);
|
||||
// add label of any lr ref we deleted
|
||||
altActionST.add("label", altInfo.leftRecursiveRuleRefLabel);
|
||||
if (altActionST.impl.formalArguments.containsKey("isListLabel")) {
|
||||
altActionST.add("isListLabel", altInfo.isListLabel);
|
||||
}
|
||||
else if (altInfo.isListLabel) {
|
||||
delegate.getGenerator().tool.errMgr.toolError(ErrorType.CODE_TEMPLATE_ARG_ISSUE, templateName, "isListLabel");
|
||||
}
|
||||
Action altAction =
|
||||
new Action(delegate, function.altLabelCtxs.get(altInfo.altLabel), altActionST);
|
||||
CodeBlockForAlt alt = opAltsCode.get(i);
|
||||
|
|
|
@ -41,9 +41,9 @@ import org.stringtemplate.v4.compiler.FormalArgument;
|
|||
import java.lang.reflect.Field;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -143,7 +143,7 @@ public class OutputModelWalker {
|
|||
}
|
||||
else if ( o instanceof Map ) {
|
||||
Map<?, ?> nestedOmoMap = (Map<?, ?>)o;
|
||||
Map<Object, ST> m = new HashMap<Object, ST>();
|
||||
Map<Object, ST> m = new LinkedHashMap<Object, ST>();
|
||||
for (Map.Entry<?, ?> entry : nestedOmoMap.entrySet()) {
|
||||
ST nestedST = walk((OutputModelObject)entry.getValue());
|
||||
// System.out.println("set ModelElement "+fieldName+"="+nestedST+" in "+templateName);
|
||||
|
|
|
@ -63,7 +63,7 @@ import org.antlr.v4.codegen.model.decl.TokenDecl;
|
|||
import org.antlr.v4.codegen.model.decl.TokenListDecl;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.atn.DecisionState;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusLoopbackState;
|
||||
import org.antlr.v4.runtime.atn.StarLoopEntryState;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.tool.Alternative;
|
||||
|
@ -246,7 +246,7 @@ public class ParserFactory extends DefaultOutputModelFactory {
|
|||
if (!g.tool.force_atn) {
|
||||
int decision;
|
||||
if ( ebnfRoot.getType()==ANTLRParser.POSITIVE_CLOSURE ) {
|
||||
decision = ((PlusBlockStartState)ebnfRoot.atnState).loopBackState.decision;
|
||||
decision = ((PlusLoopbackState)ebnfRoot.atnState).decision;
|
||||
}
|
||||
else if ( ebnfRoot.getType()==ANTLRParser.CLOSURE ) {
|
||||
decision = ((StarLoopEntryState)ebnfRoot.atnState).decision;
|
||||
|
|
|
@ -32,17 +32,21 @@ package org.antlr.v4.codegen.model;
|
|||
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusLoopbackState;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.tool.ast.BlockAST;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** */
|
||||
public class LL1PlusBlockSingleAlt extends LL1Loop {
|
||||
public LL1PlusBlockSingleAlt(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlockForAlt> alts) {
|
||||
super(factory, blkAST, alts);
|
||||
public LL1PlusBlockSingleAlt(OutputModelFactory factory, GrammarAST plusRoot, List<CodeBlockForAlt> alts) {
|
||||
super(factory, plusRoot, alts);
|
||||
|
||||
BlockAST blkAST = (BlockAST)plusRoot.getChild(0);
|
||||
PlusBlockStartState blkStart = (PlusBlockStartState)blkAST.atnState;
|
||||
|
||||
stateNumber = blkStart.loopBackState.stateNumber;
|
||||
blockStartStateNumber = blkStart.stateNumber;
|
||||
PlusBlockStartState plus = (PlusBlockStartState)blkAST.atnState;
|
||||
|
|
|
@ -33,6 +33,7 @@ package org.antlr.v4.codegen.model;
|
|||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.codegen.model.decl.RuleContextDecl;
|
||||
import org.antlr.v4.codegen.model.decl.RuleContextListDecl;
|
||||
import org.antlr.v4.codegen.model.decl.StructDecl;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.misc.Pair;
|
||||
|
@ -55,7 +56,14 @@ public class LeftRecursiveRuleFunction extends RuleFunction {
|
|||
if ( rrefAST.getType() == ANTLRParser.RULE_REF ) {
|
||||
Rule targetRule = factory.getGrammar().getRule(rrefAST.getText());
|
||||
String ctxName = gen.getTarget().getRuleFunctionContextStructName(targetRule);
|
||||
RuleContextDecl d = new RuleContextDecl(factory,label,ctxName);
|
||||
RuleContextDecl d;
|
||||
if (idAST.getParent().getType() == ANTLRParser.ASSIGN) {
|
||||
d = new RuleContextDecl(factory, label, ctxName);
|
||||
}
|
||||
else {
|
||||
d = new RuleContextListDecl(factory, label, ctxName);
|
||||
}
|
||||
|
||||
StructDecl struct = ruleCtx;
|
||||
if ( altLabelCtxs!=null ) {
|
||||
StructDecl s = altLabelCtxs.get(altLabel);
|
||||
|
|
|
@ -30,72 +30,29 @@
|
|||
|
||||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionChunk;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionText;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class Lexer extends OutputModelObject {
|
||||
public String name;
|
||||
public String grammarFileName;
|
||||
public Map<String,Integer> tokens;
|
||||
public class Lexer extends Recognizer {
|
||||
public Map<String,Integer> channels;
|
||||
public LexerFile file;
|
||||
public String[] tokenNames;
|
||||
public Set<String> ruleNames;
|
||||
public Collection<String> modes;
|
||||
@ModelElement public ActionChunk superClass;
|
||||
|
||||
@ModelElement public SerializedATN atn;
|
||||
@ModelElement public LinkedHashMap<Rule, RuleActionFunction> actionFuncs =
|
||||
new LinkedHashMap<Rule, RuleActionFunction>();
|
||||
@ModelElement public LinkedHashMap<Rule, RuleSempredFunction> sempredFuncs =
|
||||
new LinkedHashMap<Rule, RuleSempredFunction>();
|
||||
|
||||
public Lexer(OutputModelFactory factory, LexerFile file) {
|
||||
this.factory = factory;
|
||||
super(factory);
|
||||
this.file = file; // who contains us?
|
||||
|
||||
Grammar g = factory.getGrammar();
|
||||
grammarFileName = new File(g.fileName).getName();
|
||||
name = g.getRecognizerName();
|
||||
tokens = new LinkedHashMap<String,Integer>();
|
||||
LexerGrammar lg = (LexerGrammar)g;
|
||||
atn = new SerializedATN(factory, lg.atn);
|
||||
modes = lg.modes.keySet();
|
||||
|
||||
for (String t : g.tokenNameToTypeMap.keySet()) {
|
||||
Integer ttype = g.tokenNameToTypeMap.get(t);
|
||||
if ( ttype>0 ) tokens.put(t, ttype);
|
||||
}
|
||||
|
||||
tokenNames = g.getTokenDisplayNames();
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
if ( tokenNames[i]==null ) continue;
|
||||
CodeGenerator gen = factory.getGenerator();
|
||||
if ( tokenNames[i].charAt(0)=='\'' ) {
|
||||
boolean addQuotes = false;
|
||||
tokenNames[i] =
|
||||
gen.getTarget().getTargetStringLiteralFromANTLRStringLiteral(gen,
|
||||
tokenNames[i],
|
||||
addQuotes);
|
||||
tokenNames[i] = "\"'"+tokenNames[i]+"'\"";
|
||||
}
|
||||
else {
|
||||
tokenNames[i] = gen.getTarget().getTargetStringLiteralFromString(tokenNames[i], true);
|
||||
}
|
||||
}
|
||||
ruleNames = g.rules.keySet();
|
||||
|
||||
if (g.getOptionString("superClass") != null) {
|
||||
superClass = new ActionText(null, g.getOptionString("superClass"));
|
||||
}
|
||||
channels = new LinkedHashMap<String, Integer>(g.channelNameToValueMap);
|
||||
modes = ((LexerGrammar)g).modes.keySet();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ import org.antlr.v4.tool.Rule;
|
|||
import org.antlr.v4.tool.ast.ActionAST;
|
||||
import org.antlr.v4.tool.ast.AltAST;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -52,13 +52,13 @@ public class ListenerFile extends OutputFile {
|
|||
/**
|
||||
* The names of all listener contexts.
|
||||
*/
|
||||
public Set<String> listenerNames = new HashSet<String>();
|
||||
public Set<String> listenerNames = new LinkedHashSet<String>();
|
||||
/**
|
||||
* For listener contexts created for a labeled outer alternative, maps from
|
||||
* a listener context name to the name of the rule which defines the
|
||||
* context.
|
||||
*/
|
||||
public Map<String, String> listenerLabelRuleNames = new HashMap<String, String>();
|
||||
public Map<String, String> listenerLabelRuleNames = new LinkedHashMap<String, String>();
|
||||
|
||||
@ModelElement public Action header;
|
||||
|
||||
|
|
|
@ -30,70 +30,18 @@
|
|||
|
||||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionChunk;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionText;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class Parser extends OutputModelObject {
|
||||
public String name;
|
||||
public String grammarFileName;
|
||||
public String grammarName;
|
||||
@ModelElement public ActionChunk superClass;
|
||||
public Map<String,Integer> tokens;
|
||||
public String[] tokenNames;
|
||||
public Set<String> ruleNames;
|
||||
public Collection<Rule> rules;
|
||||
public class Parser extends Recognizer {
|
||||
public ParserFile file;
|
||||
|
||||
@ModelElement public List<RuleFunction> funcs = new ArrayList<RuleFunction>();
|
||||
@ModelElement public SerializedATN atn;
|
||||
@ModelElement public LinkedHashMap<Rule, RuleSempredFunction> sempredFuncs =
|
||||
new LinkedHashMap<Rule, RuleSempredFunction>();
|
||||
|
||||
public Parser(OutputModelFactory factory, ParserFile file) {
|
||||
this.factory = factory;
|
||||
super(factory);
|
||||
this.file = file; // who contains us?
|
||||
Grammar g = factory.getGrammar();
|
||||
grammarFileName = new File(g.fileName).getName();
|
||||
grammarName = g.name;
|
||||
name = g.getRecognizerName();
|
||||
tokens = new LinkedHashMap<String,Integer>();
|
||||
for (String t : g.tokenNameToTypeMap.keySet()) {
|
||||
Integer ttype = g.tokenNameToTypeMap.get(t);
|
||||
if ( ttype>0 ) tokens.put(t, ttype);
|
||||
}
|
||||
tokenNames = g.getTokenDisplayNames();
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
if ( tokenNames[i]==null ) continue;
|
||||
CodeGenerator gen = factory.getGenerator();
|
||||
if ( tokenNames[i].charAt(0)=='\'' ) {
|
||||
boolean addQuotes = false;
|
||||
tokenNames[i] =
|
||||
gen.getTarget().getTargetStringLiteralFromANTLRStringLiteral(gen,
|
||||
tokenNames[i],
|
||||
addQuotes);
|
||||
tokenNames[i] = "\"'"+tokenNames[i]+"'\"";
|
||||
}
|
||||
else {
|
||||
tokenNames[i] = gen.getTarget().getTargetStringLiteralFromString(tokenNames[i], true);
|
||||
}
|
||||
}
|
||||
ruleNames = g.rules.keySet();
|
||||
rules = g.rules.values();
|
||||
atn = new SerializedATN(factory, g.atn);
|
||||
if (g.getOptionString("superClass") != null) {
|
||||
superClass = new ActionText(null, g.getOptionString("superClass"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ package org.antlr.v4.codegen.model;
|
|||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusLoopbackState;
|
||||
import org.antlr.v4.tool.ast.BlockAST;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -45,9 +46,9 @@ public class PlusBlock extends Loop {
|
|||
List<CodeBlockForAlt> alts)
|
||||
{
|
||||
super(factory, plusRoot, alts);
|
||||
|
||||
PlusBlockStartState blkStart = (PlusBlockStartState)plusRoot.atnState;
|
||||
PlusLoopbackState loop = ((PlusBlockStartState)plusRoot.atnState).loopBackState;
|
||||
BlockAST blkAST = (BlockAST)plusRoot.getChild(0);
|
||||
PlusBlockStartState blkStart = (PlusBlockStartState)blkAST.atnState;
|
||||
PlusLoopbackState loop = blkStart.loopBackState;
|
||||
stateNumber = blkStart.loopBackState.stateNumber;
|
||||
blockStartStateNumber = blkStart.stateNumber;
|
||||
loopBackStateNumber = loop.stateNumber;
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* [The "BSD license"]
|
||||
* Copyright (c) 2014 Terence Parr
|
||||
* Copyright (c) 2014 Sam Harwell
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.codegen.model;
|
||||
|
||||
import org.antlr.v4.codegen.CodeGenerator;
|
||||
import org.antlr.v4.codegen.OutputModelFactory;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionChunk;
|
||||
import org.antlr.v4.codegen.model.chunk.ActionText;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public abstract class Recognizer extends OutputModelObject {
|
||||
public String name;
|
||||
public String grammarName;
|
||||
public String grammarFileName;
|
||||
public Map<String,Integer> tokens;
|
||||
|
||||
/**
|
||||
* @deprecated This field is provided only for compatibility with code
|
||||
* generation targets which have not yet been updated to use
|
||||
* {@link #literalNames} and {@link #symbolicNames}.
|
||||
*/
|
||||
@Deprecated
|
||||
public String[] tokenNames;
|
||||
|
||||
public String[] literalNames;
|
||||
public String[] symbolicNames;
|
||||
public Set<String> ruleNames;
|
||||
public Collection<Rule> rules;
|
||||
@ModelElement public ActionChunk superClass;
|
||||
|
||||
@ModelElement public SerializedATN atn;
|
||||
@ModelElement public LinkedHashMap<Rule, RuleSempredFunction> sempredFuncs =
|
||||
new LinkedHashMap<Rule, RuleSempredFunction>();
|
||||
|
||||
public Recognizer(OutputModelFactory factory) {
|
||||
super(factory);
|
||||
|
||||
Grammar g = factory.getGrammar();
|
||||
grammarFileName = new File(g.fileName).getName();
|
||||
grammarName = g.name;
|
||||
name = g.getRecognizerName();
|
||||
tokens = new LinkedHashMap<String,Integer>();
|
||||
for (Map.Entry<String, Integer> entry : g.tokenNameToTypeMap.entrySet()) {
|
||||
Integer ttype = entry.getValue();
|
||||
if ( ttype>0 ) {
|
||||
tokens.put(entry.getKey(), ttype);
|
||||
}
|
||||
}
|
||||
|
||||
ruleNames = g.rules.keySet();
|
||||
rules = g.rules.values();
|
||||
atn = new SerializedATN(factory, g.atn);
|
||||
if (g.getOptionString("superClass") != null) {
|
||||
superClass = new ActionText(null, g.getOptionString("superClass"));
|
||||
}
|
||||
else {
|
||||
superClass = null;
|
||||
}
|
||||
|
||||
CodeGenerator gen = factory.getGenerator();
|
||||
tokenNames = translateTokenStringsToTarget(g.getTokenDisplayNames(), gen);
|
||||
literalNames = translateTokenStringsToTarget(g.getTokenLiteralNames(), gen);
|
||||
symbolicNames = translateTokenStringsToTarget(g.getTokenSymbolicNames(), gen);
|
||||
}
|
||||
|
||||
protected static String[] translateTokenStringsToTarget(String[] tokenStrings, CodeGenerator gen) {
|
||||
String[] result = tokenStrings.clone();
|
||||
for (int i = 0; i < tokenStrings.length; i++) {
|
||||
result[i] = translateTokenStringToTarget(tokenStrings[i], gen);
|
||||
}
|
||||
|
||||
int lastTrueEntry = result.length - 1;
|
||||
while (lastTrueEntry >= 0 && result[lastTrueEntry] == null) {
|
||||
lastTrueEntry --;
|
||||
}
|
||||
|
||||
if (lastTrueEntry < result.length - 1) {
|
||||
result = Arrays.copyOf(result, lastTrueEntry + 1);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
protected static String translateTokenStringToTarget(String tokenName, CodeGenerator gen) {
|
||||
if (tokenName == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (tokenName.charAt(0) == '\'') {
|
||||
boolean addQuotes = false;
|
||||
String targetString =
|
||||
gen.getTarget().getTargetStringLiteralFromANTLRStringLiteral(gen, tokenName, addQuotes);
|
||||
return "\"'" + targetString + "'\"";
|
||||
}
|
||||
else {
|
||||
return gen.getTarget().getTargetStringLiteralFromString(tokenName, true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -68,6 +68,8 @@ import java.util.Set;
|
|||
import static org.antlr.v4.parse.ANTLRParser.RULE_REF;
|
||||
import static org.antlr.v4.parse.ANTLRParser.TOKEN_REF;
|
||||
|
||||
import java.util.LinkedHashSet;
|
||||
|
||||
/** */
|
||||
public class RuleFunction extends OutputModelObject {
|
||||
public String name;
|
||||
|
@ -207,7 +209,7 @@ public class RuleFunction extends OutputModelObject {
|
|||
}
|
||||
}
|
||||
}
|
||||
Set<Decl> decls = new HashSet<Decl>();
|
||||
Set<Decl> decls = new LinkedHashSet<Decl>();
|
||||
for (GrammarAST t : allRefs) {
|
||||
String refLabelName = t.getText();
|
||||
List<Decl> d = getDeclForAltElement(t,
|
||||
|
|
|
@ -36,8 +36,8 @@ import org.antlr.v4.tool.Rule;
|
|||
import org.antlr.v4.tool.ast.ActionAST;
|
||||
import org.antlr.v4.tool.ast.AltAST;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -49,13 +49,13 @@ public class VisitorFile extends OutputFile {
|
|||
/**
|
||||
* The names of all rule contexts which may need to be visited.
|
||||
*/
|
||||
public Set<String> visitorNames = new HashSet<String>();
|
||||
public Set<String> visitorNames = new LinkedHashSet<String>();
|
||||
/**
|
||||
* For rule contexts created for a labeled outer alternative, maps from
|
||||
* a listener context name to the name of the rule which defines the
|
||||
* context.
|
||||
*/
|
||||
public Map<String, String> visitorLabelRuleNames = new HashMap<String, String>();
|
||||
public Map<String, String> visitorLabelRuleNames = new LinkedHashMap<String, String>();
|
||||
|
||||
@ModelElement public Action header;
|
||||
|
||||
|
|
|
@ -36,8 +36,10 @@ import org.antlr.v4.tool.Attribute;
|
|||
/** */
|
||||
public class AttributeDecl extends Decl {
|
||||
public String type;
|
||||
public String initValue;
|
||||
public AttributeDecl(OutputModelFactory factory, Attribute a) {
|
||||
super(factory, a.name, a.decl);
|
||||
this.type = a.type;
|
||||
this.initValue = a.initValue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ package org.antlr.v4.misc;
|
|||
import org.antlr.v4.runtime.misc.OrderedHashSet;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -59,7 +59,7 @@ public class Graph<T> {
|
|||
}
|
||||
|
||||
/** Map from node payload to node containing it */
|
||||
protected Map<T,Node<T>> nodes = new HashMap<T,Node<T>>();
|
||||
protected Map<T,Node<T>> nodes = new LinkedHashMap<T,Node<T>>();
|
||||
|
||||
public void addEdge(T a, T b) {
|
||||
//System.out.println("add edge "+a+" to "+b);
|
||||
|
|
|
@ -150,6 +150,8 @@ import org.antlr.v4.tool.*;
|
|||
|
||||
|
||||
@members {
|
||||
public static final int COMMENTS_CHANNEL = 2;
|
||||
|
||||
public CommonTokenStream tokens; // track stream we push to; need for context info
|
||||
public boolean isLexerRule = false;
|
||||
|
||||
|
@ -261,15 +263,11 @@ COMMENT
|
|||
}
|
||||
)
|
||||
{
|
||||
// Unless we had a documentation comment, then we do not wish to
|
||||
// pass the comments in to the parser. If you are writing a formatter
|
||||
// then you will want to preserve the comments off channel, but could
|
||||
// just skip and save token space if not.
|
||||
// We do not wish to pass the comments in to the parser. If you are
|
||||
// writing a formatter then you will want to preserve the comments off
|
||||
// channel, but could just skip and save token space if not.
|
||||
//
|
||||
if ($type != DOC_COMMENT) {
|
||||
|
||||
$channel=2; // Comments are on channel 2
|
||||
}
|
||||
$channel=COMMENTS_CHANNEL;
|
||||
}
|
||||
;
|
||||
|
||||
|
@ -434,12 +432,13 @@ NESTED_ACTION
|
|||
// keywords used to specify ANTLR v3 grammars. Keywords may not be used as
|
||||
// labels for rules or in any other context where they would be ambiguous
|
||||
// with the keyword vs some other identifier
|
||||
// OPTIONS and TOKENS must also consume the opening brace that captures
|
||||
// their option block, as this is teh easiest way to parse it separate
|
||||
// to an ACTION block, despite it usingthe same {} delimiters.
|
||||
// OPTIONS, TOKENS, and CHANNELS must also consume the opening brace that captures
|
||||
// their option block, as this is the easiest way to parse it separate
|
||||
// to an ACTION block, despite it using the same {} delimiters.
|
||||
//
|
||||
OPTIONS : 'options' WSNLCHARS* '{' ;
|
||||
TOKENS_SPEC : 'tokens' WSNLCHARS* '{' ;
|
||||
OPTIONS : 'options' WSNLCHARS* '{' ;
|
||||
TOKENS_SPEC : 'tokens' WSNLCHARS* '{' ;
|
||||
CHANNELS : 'channels' WSNLCHARS* '{' ;
|
||||
|
||||
IMPORT : 'import' ;
|
||||
FRAGMENT : 'fragment' ;
|
||||
|
|
|
@ -150,13 +150,7 @@ if ( options!=null ) {
|
|||
Grammar.setNodeOptions($tree, options);
|
||||
}
|
||||
}
|
||||
:
|
||||
// The grammar itself can have a documenation comment, which is the
|
||||
// first terminal in the file.
|
||||
//
|
||||
DOC_COMMENT?
|
||||
|
||||
// Next we should see the type and name of the grammar file that
|
||||
: // First we should see the type and name of the grammar file that
|
||||
// we are about to parse.
|
||||
//
|
||||
grammarType id SEMI
|
||||
|
@ -195,7 +189,6 @@ if ( options!=null ) {
|
|||
|
||||
-> ^(grammarType // The grammar type is our root AST node
|
||||
id // We need to identify the grammar of course
|
||||
DOC_COMMENT? // We may or may not have a global documentation comment for the file
|
||||
prequelConstruct* // The set of declarations we accumulated
|
||||
rules // And of course, we need the set of rules we discovered
|
||||
modeSpec*
|
||||
|
@ -237,6 +230,9 @@ prequelConstruct
|
|||
// {tree} parser.
|
||||
tokensSpec
|
||||
|
||||
| // A list of custom channels used by the grammar
|
||||
channelsSpec
|
||||
|
||||
| // A declaration of language target implemented constructs. All such
|
||||
// action sections start with '@' and are given to the language target's
|
||||
// StringTemplate group. For instance @parser::header and @lexer::header
|
||||
|
@ -301,6 +297,10 @@ v3tokenSpec
|
|||
SEMI
|
||||
;
|
||||
|
||||
channelsSpec
|
||||
: CHANNELS^ id (COMMA! id)* RBRACE!
|
||||
;
|
||||
|
||||
// A declaration of a language target specifc section,
|
||||
// such as @header, @includes and so on. We do not verify these
|
||||
// sections, they are just passed on to the language target.
|
||||
|
@ -364,10 +364,7 @@ parserRule
|
|||
Grammar.setNodeOptions($tree, options);
|
||||
}
|
||||
}
|
||||
: // A rule may start with an optional documentation comment
|
||||
DOC_COMMENT?
|
||||
|
||||
// Next comes the rule name. Here we do not distinguish between
|
||||
: // Start with the rule name. Here we do not distinguish between
|
||||
// parser or lexer rules, the semantic verification phase will
|
||||
// reject any rules that make no sense, such as lexer rules in
|
||||
// a pure parser or tree parser.
|
||||
|
@ -412,7 +409,7 @@ parserRule
|
|||
|
||||
exceptionGroup
|
||||
|
||||
-> ^( RULE<RuleAST> RULE_REF DOC_COMMENT? ARG_ACTION<ActionAST>?
|
||||
-> ^( RULE<RuleAST> RULE_REF ARG_ACTION<ActionAST>?
|
||||
ruleReturns? throwsSpec? localsSpec? rulePrequels? ruleBlock exceptionGroup*
|
||||
)
|
||||
;
|
||||
|
@ -522,9 +519,9 @@ lexerRule
|
|||
@after {
|
||||
paraphrases.pop();
|
||||
}
|
||||
: DOC_COMMENT? FRAGMENT?
|
||||
: FRAGMENT?
|
||||
TOKEN_REF COLON lexerRuleBlock SEMI
|
||||
-> ^( RULE<RuleAST> TOKEN_REF DOC_COMMENT?
|
||||
-> ^( RULE<RuleAST> TOKEN_REF
|
||||
^(RULEMODIFIERS FRAGMENT)? lexerRuleBlock
|
||||
)
|
||||
;
|
||||
|
@ -547,11 +544,11 @@ lexerAlt
|
|||
( lexerCommands -> ^(LEXER_ALT_ACTION<AltAST> lexerElements lexerCommands)
|
||||
| -> lexerElements
|
||||
)
|
||||
| -> ^(ALT<AltAST> EPSILON) // empty alt
|
||||
;
|
||||
|
||||
lexerElements
|
||||
: lexerElement+ -> ^(ALT<AltAST> lexerElement+)
|
||||
: lexerElement+ -> ^(ALT<AltAST> lexerElement+)
|
||||
| -> ^(ALT<AltAST> EPSILON) // empty alt
|
||||
;
|
||||
|
||||
lexerElement
|
||||
|
|
|
@ -125,6 +125,7 @@ public void grammarOption(GrammarAST ID, GrammarAST valueAST) { }
|
|||
public void ruleOption(GrammarAST ID, GrammarAST valueAST) { }
|
||||
public void blockOption(GrammarAST ID, GrammarAST valueAST) { }
|
||||
public void defineToken(GrammarAST ID) { }
|
||||
public void defineChannel(GrammarAST ID) { }
|
||||
public void globalNamedAction(GrammarAST scope, GrammarAST ID, ActionAST action) { }
|
||||
public void importGrammar(GrammarAST label, GrammarAST ID) { }
|
||||
|
||||
|
@ -189,6 +190,12 @@ protected void exitTokensSpec(GrammarAST tree) { }
|
|||
protected void enterTokenSpec(GrammarAST tree) { }
|
||||
protected void exitTokenSpec(GrammarAST tree) { }
|
||||
|
||||
protected void enterChannelsSpec(GrammarAST tree) { }
|
||||
protected void exitChannelsSpec(GrammarAST tree) { }
|
||||
|
||||
protected void enterChannelSpec(GrammarAST tree) { }
|
||||
protected void exitChannelSpec(GrammarAST tree) { }
|
||||
|
||||
protected void enterAction(GrammarAST tree) { }
|
||||
protected void exitAction(GrammarAST tree) { }
|
||||
|
||||
|
@ -336,7 +343,7 @@ grammarSpec
|
|||
@after {
|
||||
exitGrammarSpec($start);
|
||||
}
|
||||
: ^( GRAMMAR ID {grammarName=$ID.text;} DOC_COMMENT?
|
||||
: ^( GRAMMAR ID {grammarName=$ID.text;}
|
||||
{discoverGrammar((GrammarRootAST)$GRAMMAR, $ID);}
|
||||
prequelConstructs
|
||||
{finishPrequels($prequelConstructs.firstOne);}
|
||||
|
@ -366,6 +373,7 @@ prequelConstruct
|
|||
: optionsSpec
|
||||
| delegateGrammars
|
||||
| tokensSpec
|
||||
| channelsSpec
|
||||
| action
|
||||
;
|
||||
|
||||
|
@ -450,6 +458,26 @@ tokenSpec
|
|||
: ID {defineToken($ID);}
|
||||
;
|
||||
|
||||
channelsSpec
|
||||
@init {
|
||||
enterChannelsSpec($start);
|
||||
}
|
||||
@after {
|
||||
exitChannelsSpec($start);
|
||||
}
|
||||
: ^(CHANNELS channelSpec+)
|
||||
;
|
||||
|
||||
channelSpec
|
||||
@init {
|
||||
enterChannelSpec($start);
|
||||
}
|
||||
@after {
|
||||
exitChannelSpec($start);
|
||||
}
|
||||
: ID {defineChannel($ID);}
|
||||
;
|
||||
|
||||
action
|
||||
@init {
|
||||
enterAction($start);
|
||||
|
@ -491,7 +519,7 @@ lexerRule
|
|||
}
|
||||
: ^( RULE TOKEN_REF
|
||||
{currentRuleName=$TOKEN_REF.text; currentRuleAST=$RULE;}
|
||||
DOC_COMMENT? (^(RULEMODIFIERS m=FRAGMENT {mods.add($m);}))?
|
||||
(^(RULEMODIFIERS m=FRAGMENT {mods.add($m);}))?
|
||||
{discoverLexerRule((RuleAST)$RULE, $TOKEN_REF, mods, (GrammarAST)input.LT(1));}
|
||||
lexerRuleBlock
|
||||
{
|
||||
|
@ -512,7 +540,7 @@ rule
|
|||
exitRule($start);
|
||||
}
|
||||
: ^( RULE RULE_REF {currentRuleName=$RULE_REF.text; currentRuleAST=$RULE;}
|
||||
DOC_COMMENT? (^(RULEMODIFIERS (m=ruleModifier{mods.add($m.start);})+))?
|
||||
(^(RULEMODIFIERS (m=ruleModifier{mods.add($m.start);})+))?
|
||||
ARG_ACTION?
|
||||
ret=ruleReturns?
|
||||
thr=throwsSpec?
|
||||
|
|
|
@ -67,7 +67,7 @@ rec_rule returns [boolean isLeftRec]
|
|||
currentOuterAltNumber = 1;
|
||||
}
|
||||
: ^( r=RULE id=RULE_REF {ruleName=$id.getText();}
|
||||
DOC_COMMENT? ruleModifier?
|
||||
ruleModifier?
|
||||
// (ARG_ACTION)? shouldn't allow args, right?
|
||||
(^(RETURNS a=ARG_ACTION {setReturnValues($a);}))?
|
||||
// ( ^(THROWS .+) )? don't allow
|
||||
|
@ -121,7 +121,7 @@ outerAlternative returns [boolean isLeftRec]
|
|||
;
|
||||
|
||||
binary
|
||||
: ^( ALT elementOptions? recurse element+ recurse epsilonElement* )
|
||||
: ^( ALT elementOptions? recurse element* recurse epsilonElement* )
|
||||
{setAltAssoc((AltAST)$ALT,currentOuterAltNumber);}
|
||||
;
|
||||
|
||||
|
@ -144,6 +144,7 @@ nonLeftRecur
|
|||
|
||||
recurse
|
||||
: ^(ASSIGN ID recurseNoLabel)
|
||||
| ^(PLUS_ASSIGN ID recurseNoLabel)
|
||||
| recurseNoLabel
|
||||
;
|
||||
|
||||
|
|
|
@ -255,6 +255,21 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
|
|||
checkTokenDefinition(ID.token);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void enterChannelsSpec(GrammarAST tree) {
|
||||
if (g.isParser()) {
|
||||
g.tool.errMgr.grammarError(ErrorType.CHANNELS_BLOCK_IN_PARSER_GRAMMAR, g.fileName, tree.token);
|
||||
}
|
||||
else if (g.isCombined()) {
|
||||
g.tool.errMgr.grammarError(ErrorType.CHANNELS_BLOCK_IN_COMBINED_GRAMMAR, g.fileName, tree.token);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void defineChannel(GrammarAST ID) {
|
||||
checkChannelDefinition(ID.token);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void elementOption(GrammarASTWithOptions elem, GrammarAST ID, GrammarAST valueAST) {
|
||||
String v = null;
|
||||
|
@ -394,6 +409,9 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
|
|||
}
|
||||
}
|
||||
|
||||
void checkChannelDefinition(Token tokenID) {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void enterLexerElement(GrammarAST tree) {
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.antlr.v4.runtime.misc.NotNull;
|
|||
import org.antlr.v4.runtime.misc.Pair;
|
||||
import org.antlr.v4.tool.ErrorType;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.Rule;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
|
||||
|
@ -127,6 +128,8 @@ public class SemanticPipeline {
|
|||
collector.tokenIDRefs, collector.terminals);
|
||||
}
|
||||
|
||||
assignChannelTypes(g, collector.channelDefs);
|
||||
|
||||
// CHECK RULE REFS NOW (that we've defined rules in grammar)
|
||||
symcheck.checkRuleArgs(g, collector.rulerefs);
|
||||
identifyStartRules(collector);
|
||||
|
@ -186,7 +189,10 @@ public class SemanticPipeline {
|
|||
for (String lit : conflictingLiterals) {
|
||||
// Remove literal if repeated across rules so it's not
|
||||
// found by parser grammar.
|
||||
G.stringLiteralToTypeMap.remove(lit);
|
||||
Integer value = G.stringLiteralToTypeMap.remove(lit);
|
||||
if (value != null && value > 0 && value < G.typeToStringLiteralList.size() && lit.equals(G.typeToStringLiteralList.get(value))) {
|
||||
G.typeToStringLiteralList.set(value, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,4 +263,38 @@ public class SemanticPipeline {
|
|||
g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
|
||||
g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign constant values to custom channels defined in a grammar.
|
||||
*
|
||||
* @param g The grammar.
|
||||
* @param channelDefs A collection of AST nodes defining individual channels
|
||||
* within a {@code channels{}} block in the grammar.
|
||||
*/
|
||||
void assignChannelTypes(Grammar g, List<GrammarAST> channelDefs) {
|
||||
Grammar outermost = g.getOutermostGrammar();
|
||||
for (GrammarAST channel : channelDefs) {
|
||||
String channelName = channel.getText();
|
||||
|
||||
// Channel names can't alias tokens or modes, because constant
|
||||
// values are also assigned to them and the ->channel(NAME) lexer
|
||||
// command does not distinguish between the various ways a constant
|
||||
// can be declared. This method does not verify that channels do not
|
||||
// alias rules, because rule names are not associated with constant
|
||||
// values in ANTLR grammar semantics.
|
||||
|
||||
if (g.getTokenType(channelName) != Token.INVALID_TYPE) {
|
||||
g.tool.errMgr.grammarError(ErrorType.CHANNEL_CONFLICTS_WITH_TOKEN, g.fileName, channel.token, channelName);
|
||||
}
|
||||
|
||||
if (outermost instanceof LexerGrammar) {
|
||||
LexerGrammar lexerGrammar = (LexerGrammar)outermost;
|
||||
if (lexerGrammar.modes.containsKey(channelName)) {
|
||||
g.tool.errMgr.grammarError(ErrorType.CHANNEL_CONFLICTS_WITH_MODE, g.fileName, channel.token, channelName);
|
||||
}
|
||||
}
|
||||
|
||||
outermost.defineChannelName(channel.getText());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ public class SymbolCollector extends GrammarTreeVisitor {
|
|||
public List<GrammarAST> tokenIDRefs = new ArrayList<GrammarAST>();
|
||||
public Set<String> strings = new HashSet<String>();
|
||||
public List<GrammarAST> tokensDefs = new ArrayList<GrammarAST>();
|
||||
public List<GrammarAST> channelDefs = new ArrayList<GrammarAST>();
|
||||
|
||||
/** Track action name node in @parser::members {...} or @members {...} */
|
||||
List<GrammarAST> namedActions = new ArrayList<GrammarAST>();
|
||||
|
@ -97,6 +98,11 @@ public class SymbolCollector extends GrammarTreeVisitor {
|
|||
tokensDefs.add(ID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void defineChannel(GrammarAST ID) {
|
||||
channelDefs.add(ID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void discoverRule(RuleAST rule, GrammarAST ID,
|
||||
List<GrammarAST> modifiers, ActionAST arg,
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.antlr.v4.runtime.atn.NotSetTransition;
|
|||
import org.antlr.v4.runtime.atn.PlusBlockStartState;
|
||||
import org.antlr.v4.runtime.atn.PlusLoopbackState;
|
||||
import org.antlr.v4.runtime.atn.RangeTransition;
|
||||
import org.antlr.v4.runtime.atn.RuleStartState;
|
||||
import org.antlr.v4.runtime.atn.RuleStopState;
|
||||
import org.antlr.v4.runtime.atn.RuleTransition;
|
||||
import org.antlr.v4.runtime.atn.SetTransition;
|
||||
|
@ -243,7 +244,14 @@ public class DOTGenerator {
|
|||
RuleTransition rr = ((RuleTransition)edge);
|
||||
// don't jump to other rules, but display edge to follow node
|
||||
edgeST = stlib.getInstanceOf("edge");
|
||||
edgeST.add("label", "<"+ruleNames[rr.ruleIndex]+">");
|
||||
|
||||
String label = "<" + ruleNames[rr.ruleIndex];
|
||||
if (((RuleStartState)rr.target).isPrecedenceRule) {
|
||||
label += "[" + rr.precedence + "]";
|
||||
}
|
||||
label += ">";
|
||||
|
||||
edgeST.add("label", label);
|
||||
edgeST.add("src", "s"+s.stateNumber);
|
||||
edgeST.add("target", "s"+rr.followState.stateNumber);
|
||||
edgeST.add("arrowhead", arrowhead);
|
||||
|
@ -284,7 +292,7 @@ public class DOTGenerator {
|
|||
SetTransition set = (SetTransition)edge;
|
||||
String label = set.label().toString();
|
||||
if ( isLexer ) label = set.label().toString(true);
|
||||
else if ( grammar!=null ) label = set.label().toString(grammar.getTokenDisplayNames());
|
||||
else if ( grammar!=null ) label = set.label().toString(grammar.getVocabulary());
|
||||
if ( edge instanceof NotSetTransition ) label = "~"+label;
|
||||
edgeST.add("label", getEdgeLabel(label));
|
||||
}
|
||||
|
@ -293,7 +301,7 @@ public class DOTGenerator {
|
|||
RangeTransition range = (RangeTransition)edge;
|
||||
String label = range.label().toString();
|
||||
if ( isLexer ) label = range.toString();
|
||||
else if ( grammar!=null ) label = range.label().toString(grammar.getTokenDisplayNames());
|
||||
else if ( grammar!=null ) label = range.label().toString(grammar.getVocabulary());
|
||||
edgeST.add("label", getEdgeLabel(label));
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -222,14 +222,10 @@ public enum ErrorType {
|
|||
* Compiler Error 56.
|
||||
*
|
||||
* <p>reference to undefined rule: <em>rule</em></p>
|
||||
*
|
||||
* @see #PARSER_RULE_REF_IN_LEXER_RULE
|
||||
*/
|
||||
UNDEFINED_RULE_REF(56, "reference to undefined rule: <arg>", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 160.
|
||||
*
|
||||
* <p>reference to undefined rule: <em>rule</em></p>
|
||||
*/
|
||||
PARSER_RULE_REF_IN_LEXER_RULE(160, "reference to parser rule <arg> in lexer rule <arg2>", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 57.
|
||||
*
|
||||
|
@ -433,7 +429,10 @@ public enum ErrorType {
|
|||
* <p>
|
||||
* all operators of alt <em>alt</em> of left-recursive rule must have same
|
||||
* associativity</p>
|
||||
*
|
||||
* @deprecated This warning is no longer applicable with the current syntax for specifying associativity.
|
||||
*/
|
||||
@Deprecated
|
||||
ALL_OPS_NEED_SAME_ASSOC(118, "all operators of alt <arg> of left-recursive rule must have same associativity", ErrorSeverity.WARNING),
|
||||
/**
|
||||
* Compiler Error 119.
|
||||
|
@ -924,6 +923,38 @@ public enum ErrorType {
|
|||
* @since 4.2.1
|
||||
*/
|
||||
RESERVED_RULE_NAME(159, "cannot declare a rule with reserved name <arg>", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 160.
|
||||
*
|
||||
* <p>reference to parser rule <em>rule</em> in lexer rule <em>name</em></p>
|
||||
*
|
||||
* @see #UNDEFINED_RULE_REF
|
||||
*/
|
||||
PARSER_RULE_REF_IN_LEXER_RULE(160, "reference to parser rule <arg> in lexer rule <arg2>", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 161.
|
||||
*
|
||||
* <p>channel <em>name</em> conflicts with token with same name</p>
|
||||
*/
|
||||
CHANNEL_CONFLICTS_WITH_TOKEN(161, "channel <arg> conflicts with token with same name", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 162.
|
||||
*
|
||||
* <p>channel <em>name</em> conflicts with mode with same name</p>
|
||||
*/
|
||||
CHANNEL_CONFLICTS_WITH_MODE(162, "channel <arg> conflicts with mode with same name", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 163.
|
||||
*
|
||||
* <p>custom channels are not supported in parser grammars</p>
|
||||
*/
|
||||
CHANNELS_BLOCK_IN_PARSER_GRAMMAR(163, "custom channels are not supported in parser grammars", ErrorSeverity.ERROR),
|
||||
/**
|
||||
* Compiler Error 164.
|
||||
*
|
||||
* <p>custom channels are not supported in combined grammars</p>
|
||||
*/
|
||||
CHANNELS_BLOCK_IN_COMBINED_GRAMMAR(164, "custom channels are not supported in combined grammars", ErrorSeverity.ERROR),
|
||||
|
||||
/*
|
||||
* Backward incompatibility errors
|
||||
|
|
|
@ -45,6 +45,8 @@ import org.antlr.v4.runtime.LexerInterpreter;
|
|||
import org.antlr.v4.runtime.ParserInterpreter;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNDeserializer;
|
||||
import org.antlr.v4.runtime.atn.ATNSerializer;
|
||||
|
@ -70,6 +72,7 @@ import java.util.Arrays;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -231,6 +234,26 @@ public class Grammar implements AttributeResolver {
|
|||
*/
|
||||
public final List<String> typeToTokenList = new ArrayList<String>();
|
||||
|
||||
/**
|
||||
* The maximum channel value which is assigned by this grammar. Values below
|
||||
* {@link Token#MIN_USER_CHANNEL_VALUE} are assumed to be predefined.
|
||||
*/
|
||||
int maxChannelType = Token.MIN_USER_CHANNEL_VALUE - 1;
|
||||
|
||||
/**
|
||||
* Map channel like {@code COMMENTS_CHANNEL} to its constant channel value.
|
||||
* Only user-defined channels are defined in this map.
|
||||
*/
|
||||
public final Map<String, Integer> channelNameToValueMap = new LinkedHashMap<String, Integer>();
|
||||
|
||||
/**
|
||||
* Map a constant channel value to its name. Indexed with raw channel value.
|
||||
* The predefined channels {@link Token#DEFAULT_CHANNEL} and
|
||||
* {@link Token#HIDDEN_CHANNEL} are not stored in this list, so the values
|
||||
* at the corresponding indexes is {@code null}.
|
||||
*/
|
||||
public final List<String> channelValueToNameList = new ArrayList<String>();
|
||||
|
||||
/** Map a name to an action.
|
||||
* The code generator will use this to fill holes in the output files.
|
||||
* I track the AST node for the action in case I need the line number
|
||||
|
@ -357,11 +380,9 @@ public class Grammar implements AttributeResolver {
|
|||
if ( t.getType()==ANTLRParser.ASSIGN ) {
|
||||
t = (GrammarAST)t.getChild(1);
|
||||
importedGrammarName = t.getText();
|
||||
tool.log("grammar", "import "+ importedGrammarName);
|
||||
}
|
||||
else if ( t.getType()==ANTLRParser.ID ) {
|
||||
importedGrammarName = t.getText();
|
||||
tool.log("grammar", "import " + t.getText());
|
||||
}
|
||||
Grammar g;
|
||||
try {
|
||||
|
@ -494,16 +515,24 @@ public class Grammar implements AttributeResolver {
|
|||
* The grammars are in import tree preorder. Don't include ourselves
|
||||
* in list as we're not a delegate of ourselves.
|
||||
*/
|
||||
public List<Grammar> getAllImportedGrammars() {
|
||||
if ( importedGrammars==null ) return null;
|
||||
List<Grammar> delegates = new ArrayList<Grammar>();
|
||||
for (Grammar d : importedGrammars) {
|
||||
delegates.add(d);
|
||||
List<Grammar> ds = d.getAllImportedGrammars();
|
||||
if (ds != null) delegates.addAll(ds);
|
||||
public List<Grammar> getAllImportedGrammars() {
|
||||
if (importedGrammars == null) {
|
||||
return null;
|
||||
}
|
||||
return delegates;
|
||||
}
|
||||
|
||||
LinkedHashMap<String, Grammar> delegates = new LinkedHashMap<String, Grammar>();
|
||||
for (Grammar d : importedGrammars) {
|
||||
delegates.put(d.fileName, d);
|
||||
List<Grammar> ds = d.getAllImportedGrammars();
|
||||
if (ds != null) {
|
||||
for (Grammar imported : ds) {
|
||||
delegates.put(imported.fileName, imported);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new ArrayList<Grammar>(delegates.values());
|
||||
}
|
||||
|
||||
public List<Grammar> getImportedGrammars() { return importedGrammars; }
|
||||
|
||||
|
@ -665,6 +694,26 @@ public class Grammar implements AttributeResolver {
|
|||
return INVALID_TOKEN_NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the constant channel value for a user-defined channel.
|
||||
*
|
||||
* <p>
|
||||
* This method only returns channel values for user-defined channels. All
|
||||
* other channels, including the predefined channels
|
||||
* {@link Token#DEFAULT_CHANNEL} and {@link Token#HIDDEN_CHANNEL} along with
|
||||
* any channel defined in code (e.g. in a {@code @members{}} block), are
|
||||
* ignored.</p>
|
||||
*
|
||||
* @param channel The channel name.
|
||||
* @return The channel value, if {@code channel} is the name of a known
|
||||
* user-defined token channel; otherwise, -1.
|
||||
*/
|
||||
public int getChannelValue(String channel) {
|
||||
Integer I = channelNameToValueMap.get(channel);
|
||||
int i = (I != null) ? I : -1;
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an array of rule names for rules defined or imported by the
|
||||
* grammar. The array index is the rule index, and the value is the name of
|
||||
|
@ -721,6 +770,53 @@ public class Grammar implements AttributeResolver {
|
|||
return tokenNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the literal names assigned to tokens in the grammar.
|
||||
*/
|
||||
@NotNull
|
||||
public String[] getTokenLiteralNames() {
|
||||
int numTokens = getMaxTokenType();
|
||||
String[] literalNames = new String[numTokens+1];
|
||||
for (int i = 0; i < Math.min(literalNames.length, typeToStringLiteralList.size()); i++) {
|
||||
literalNames[i] = typeToStringLiteralList.get(i);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Integer> entry : stringLiteralToTypeMap.entrySet()) {
|
||||
if (entry.getValue() >= 0 && entry.getValue() < literalNames.length && literalNames[entry.getValue()] == null) {
|
||||
literalNames[entry.getValue()] = entry.getKey();
|
||||
}
|
||||
}
|
||||
|
||||
return literalNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the symbolic names assigned to tokens in the grammar.
|
||||
*/
|
||||
@NotNull
|
||||
public String[] getTokenSymbolicNames() {
|
||||
int numTokens = getMaxTokenType();
|
||||
String[] symbolicNames = new String[numTokens+1];
|
||||
for (int i = 0; i < Math.min(symbolicNames.length, typeToTokenList.size()); i++) {
|
||||
if (typeToTokenList.get(i) == null || typeToTokenList.get(i).startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
symbolicNames[i] = typeToTokenList.get(i);
|
||||
}
|
||||
|
||||
return symbolicNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a {@link Vocabulary} instance describing the vocabulary used by the
|
||||
* grammar.
|
||||
*/
|
||||
@NotNull
|
||||
public Vocabulary getVocabulary() {
|
||||
return new VocabularyImpl(getTokenLiteralNames(), getTokenSymbolicNames());
|
||||
}
|
||||
|
||||
/** Given an arbitrarily complex SemanticContext, walk the "tree" and get display string.
|
||||
* Pull predicates from grammar text.
|
||||
*/
|
||||
|
@ -812,6 +908,12 @@ public class Grammar implements AttributeResolver {
|
|||
return maxTokenType;
|
||||
}
|
||||
|
||||
/** Return a new unique integer in the channel value space. */
|
||||
public int getNewChannelNumber() {
|
||||
maxChannelType++;
|
||||
return maxChannelType;
|
||||
}
|
||||
|
||||
public void importTokensFromTokensFile() {
|
||||
String vocab = getOptionString("tokenVocab");
|
||||
if ( vocab!=null ) {
|
||||
|
@ -832,6 +934,9 @@ public class Grammar implements AttributeResolver {
|
|||
for (String tokenName: importG.stringLiteralToTypeMap.keySet()) {
|
||||
defineStringLiteral(tokenName, importG.stringLiteralToTypeMap.get(tokenName));
|
||||
}
|
||||
for (Map.Entry<String, Integer> channel : importG.channelNameToValueMap.entrySet()) {
|
||||
defineChannelName(channel.getKey(), channel.getValue());
|
||||
}
|
||||
// this.tokenNameToTypeMap.putAll( importG.tokenNameToTypeMap );
|
||||
// this.stringLiteralToTypeMap.putAll( importG.stringLiteralToTypeMap );
|
||||
int max = Math.max(this.typeToTokenList.size(), importG.typeToTokenList.size());
|
||||
|
@ -840,6 +945,13 @@ public class Grammar implements AttributeResolver {
|
|||
maxTokenType = Math.max(maxTokenType, ttype);
|
||||
this.typeToTokenList.set(ttype, importG.typeToTokenList.get(ttype));
|
||||
}
|
||||
|
||||
max = Math.max(this.channelValueToNameList.size(), importG.channelValueToNameList.size());
|
||||
Utils.setSize(channelValueToNameList, max);
|
||||
for (int channelValue = 0; channelValue < importG.channelValueToNameList.size(); channelValue++) {
|
||||
maxChannelType = Math.max(maxChannelType, channelValue);
|
||||
this.channelValueToNameList.set(channelValue, importG.channelValueToNameList.get(channelValue));
|
||||
}
|
||||
}
|
||||
|
||||
public int defineTokenName(String name) {
|
||||
|
@ -903,6 +1015,68 @@ public class Grammar implements AttributeResolver {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a token channel with a specified name.
|
||||
*
|
||||
* <p>
|
||||
* If a channel with the specified name already exists, the previously
|
||||
* assigned channel value is returned.</p>
|
||||
*
|
||||
* @param name The channel name.
|
||||
* @return The constant channel value assigned to the channel.
|
||||
*/
|
||||
public int defineChannelName(String name) {
|
||||
Integer prev = channelNameToValueMap.get(name);
|
||||
if (prev == null) {
|
||||
return defineChannelName(name, getNewChannelNumber());
|
||||
}
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a token channel with a specified name.
|
||||
*
|
||||
* <p>
|
||||
* If a channel with the specified name already exists, the previously
|
||||
* assigned channel value is not altered.</p>
|
||||
*
|
||||
* @param name The channel name.
|
||||
* @return The constant channel value assigned to the channel.
|
||||
*/
|
||||
public int defineChannelName(String name, int value) {
|
||||
Integer prev = channelNameToValueMap.get(name);
|
||||
if (prev != null) {
|
||||
return prev;
|
||||
}
|
||||
|
||||
channelNameToValueMap.put(name, value);
|
||||
setChannelNameForValue(value, name);
|
||||
maxChannelType = Math.max(maxChannelType, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the channel name associated with a particular channel value.
|
||||
*
|
||||
* <p>
|
||||
* If a name has already been assigned to the channel with constant value
|
||||
* {@code channelValue}, this method does nothing.</p>
|
||||
*
|
||||
* @param channelValue The constant value for the channel.
|
||||
* @param name The channel name.
|
||||
*/
|
||||
public void setChannelNameForValue(int channelValue, String name) {
|
||||
if (channelValue >= channelValueToNameList.size()) {
|
||||
Utils.setSize(channelValueToNameList, channelValue + 1);
|
||||
}
|
||||
|
||||
String prevChannel = channelValueToNameList.get(channelValue);
|
||||
if (prevChannel == null) {
|
||||
channelValueToNameList.set(channelValue, name);
|
||||
}
|
||||
}
|
||||
|
||||
// no isolated attr at grammar action level
|
||||
@Override
|
||||
public Attribute resolveToAttribute(String x, ActionAST node) {
|
||||
|
@ -1056,7 +1230,7 @@ public class Grammar implements AttributeResolver {
|
|||
}
|
||||
|
||||
public Set<String> getStringLiterals() {
|
||||
final Set<String> strings = new HashSet<String>();
|
||||
final Set<String> strings = new LinkedHashSet<String>();
|
||||
GrammarTreeVisitor collector = new GrammarTreeVisitor() {
|
||||
@Override
|
||||
public void stringRef(TerminalAST ref) {
|
||||
|
@ -1127,7 +1301,7 @@ public class Grammar implements AttributeResolver {
|
|||
|
||||
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
|
||||
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
|
||||
return new LexerInterpreter(fileName, Arrays.asList(getTokenDisplayNames()), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
|
||||
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
|
||||
}
|
||||
|
||||
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
|
||||
|
@ -1137,6 +1311,6 @@ public class Grammar implements AttributeResolver {
|
|||
|
||||
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
|
||||
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
|
||||
return new ParserInterpreter(fileName, Arrays.asList(getTokenDisplayNames()), Arrays.asList(getRuleNames()), deserialized, tokenStream);
|
||||
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -289,8 +289,28 @@ public class GrammarTransformPipeline {
|
|||
|
||||
GrammarAST optionsRoot = (GrammarAST)imp.ast.getFirstChildWithType(ANTLRParser.OPTIONS);
|
||||
if ( optionsRoot!=null ) {
|
||||
rootGrammar.tool.errMgr.grammarError(ErrorType.OPTIONS_IN_DELEGATE,
|
||||
optionsRoot.g.fileName, optionsRoot.token, imp.name);
|
||||
// suppress the warning if the options match the options specified
|
||||
// in the root grammar
|
||||
// https://github.com/antlr/antlr4/issues/707
|
||||
|
||||
boolean hasNewOption = false;
|
||||
for (Map.Entry<String, GrammarAST> option : imp.ast.getOptions().entrySet()) {
|
||||
String importOption = imp.ast.getOptionString(option.getKey());
|
||||
if (importOption == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String rootOption = rootGrammar.ast.getOptionString(option.getKey());
|
||||
if (!importOption.equals(rootOption)) {
|
||||
hasNewOption = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasNewOption) {
|
||||
rootGrammar.tool.errMgr.grammarError(ErrorType.OPTIONS_IN_DELEGATE,
|
||||
optionsRoot.g.fileName, optionsRoot.token, imp.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
rootGrammar.tool.log("grammar", "Grammar: "+rootGrammar.ast.toStringTree());
|
||||
|
@ -398,6 +418,7 @@ public class GrammarTransformPipeline {
|
|||
// add strings from combined grammar (and imported grammars) into lexer
|
||||
// put them first as they are keywords; must resolve ambigs to these rules
|
||||
// tool.log("grammar", "strings from parser: "+stringLiterals);
|
||||
int insertIndex = 0;
|
||||
nextLit:
|
||||
for (String lit : stringLiterals) {
|
||||
// if lexer already has a rule for literal, continue
|
||||
|
@ -419,9 +440,12 @@ public class GrammarTransformPipeline {
|
|||
CommonToken idToken = new CommonToken(ANTLRParser.TOKEN_REF, rname);
|
||||
litRule.addChild(new TerminalAST(idToken));
|
||||
litRule.addChild(blk);
|
||||
lexerRulesRoot.insertChild(0, litRule); // add first
|
||||
lexerRulesRoot.insertChild(insertIndex, litRule);
|
||||
// lexerRulesRoot.getChildren().add(0, litRule);
|
||||
lexerRulesRoot.freshenParentAndChildIndexes(); // reset indexes and set litRule parent
|
||||
|
||||
// next literal will be added after the one just added
|
||||
insertIndex++;
|
||||
}
|
||||
|
||||
// TODO: take out after stable if slow
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.stringtemplate.v4.misc.MultiMap;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -212,7 +213,7 @@ public class Rule implements AttributeResolver {
|
|||
* this label. Unlabeled alternatives are not included in the result.
|
||||
*/
|
||||
public Map<String, List<Pair<Integer, AltAST>>> getAltLabels() {
|
||||
Map<String, List<Pair<Integer, AltAST>>> labels = new HashMap<String, List<Pair<Integer, AltAST>>>();
|
||||
Map<String, List<Pair<Integer, AltAST>>> labels = new LinkedHashMap<String, List<Pair<Integer, AltAST>>>();
|
||||
for (int i=1; i<=numberOfAlts; i++) {
|
||||
GrammarAST altLabel = alt[i].ast.altLabel;
|
||||
if ( altLabel!=null ) {
|
||||
|
|
|
@ -121,8 +121,74 @@ public abstract class BaseTest {
|
|||
public static final String newline = System.getProperty("line.separator");
|
||||
public static final String pathSep = System.getProperty("path.separator");
|
||||
|
||||
/**
|
||||
* When the {@code antlr.testinprocess} runtime property is set to
|
||||
* {@code true}, the test suite will attempt to load generated classes into
|
||||
* the test process for direct execution rather than invoking the JVM in a
|
||||
* new process for testing.
|
||||
*
|
||||
* <p>
|
||||
* In-process testing results in a substantial performance improvement, but
|
||||
* some test environments created by IDEs do not support the mechanisms
|
||||
* currently used by the tests to dynamically load compiled code. Therefore,
|
||||
* the default behavior (used in all other cases) favors reliable
|
||||
* cross-system test execution by executing generated test code in a
|
||||
* separate process.</p>
|
||||
*/
|
||||
public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess"));
|
||||
|
||||
/**
|
||||
* When the {@code antlr.preserve-test-dir} runtime property is set to
|
||||
* {@code true}, the temporary directories created by the test run will not
|
||||
* be removed at the end of the test run, even for tests that completed
|
||||
* successfully.
|
||||
*
|
||||
* <p>
|
||||
* The default behavior (used in all other cases) is removing the temporary
|
||||
* directories for all tests which completed successfully, and preserving
|
||||
* the directories for tests which failed.</p>
|
||||
*/
|
||||
public static final boolean PRESERVE_TEST_DIR = Boolean.parseBoolean(System.getProperty("antlr.preserve-test-dir"));
|
||||
|
||||
/**
|
||||
* The base test directory is the directory where generated files get placed
|
||||
* during unit test execution.
|
||||
*
|
||||
* <p>
|
||||
* The default value for this property is the {@code java.io.tmpdir} system
|
||||
* property, and can be overridden by setting the
|
||||
* {@code antlr.java-test-dir} property to a custom location. Note that the
|
||||
* {@code antlr.java-test-dir} property directly affects the
|
||||
* {@link #CREATE_PER_TEST_DIRECTORIES} value as well.</p>
|
||||
*/
|
||||
public static final String BASE_TEST_DIR;
|
||||
|
||||
/**
|
||||
* When {@code true}, a temporary directory will be created for each test
|
||||
* executed during the test run.
|
||||
*
|
||||
* <p>
|
||||
* This value is {@code true} when the {@code antlr.java-test-dir} system
|
||||
* property is set, and otherwise {@code false}.</p>
|
||||
*/
|
||||
public static final boolean CREATE_PER_TEST_DIRECTORIES;
|
||||
|
||||
static {
|
||||
String baseTestDir = System.getProperty("antlr.java-test-dir");
|
||||
boolean perTestDirectories = false;
|
||||
if (baseTestDir == null || baseTestDir.isEmpty()) {
|
||||
baseTestDir = System.getProperty("java.io.tmpdir");
|
||||
perTestDirectories = true;
|
||||
}
|
||||
|
||||
if (!new File(baseTestDir).isDirectory()) {
|
||||
throw new UnsupportedOperationException("The specified base test directory does not exist: " + baseTestDir);
|
||||
}
|
||||
|
||||
BASE_TEST_DIR = baseTestDir;
|
||||
CREATE_PER_TEST_DIRECTORIES = perTestDirectories;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build up the full classpath we need, including the surefire path (if present)
|
||||
*/
|
||||
|
@ -141,17 +207,26 @@ public abstract class BaseTest {
|
|||
@Override
|
||||
protected void succeeded(Description description) {
|
||||
// remove tmpdir if no error.
|
||||
eraseTempDir();
|
||||
if (!PRESERVE_TEST_DIR) {
|
||||
eraseTempDir();
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// new output dir for each test
|
||||
tmpdir = new File(System.getProperty("java.io.tmpdir"),
|
||||
getClass().getSimpleName()+"-"+System.currentTimeMillis()).getAbsolutePath();
|
||||
// tmpdir = "/tmp";
|
||||
if (CREATE_PER_TEST_DIRECTORIES) {
|
||||
// new output dir for each test
|
||||
String testDirectory = getClass().getSimpleName() + "-" + System.currentTimeMillis();
|
||||
tmpdir = new File(BASE_TEST_DIR, testDirectory).getAbsolutePath();
|
||||
}
|
||||
else {
|
||||
tmpdir = new File(BASE_TEST_DIR).getAbsolutePath();
|
||||
if (!PRESERVE_TEST_DIR && new File(tmpdir).exists()) {
|
||||
eraseFiles();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected org.antlr.v4.Tool newTool(String[] args) {
|
||||
|
@ -871,14 +946,11 @@ public abstract class BaseTest {
|
|||
}
|
||||
|
||||
void checkRuleATN(Grammar g, String ruleName, String expecting) {
|
||||
ParserATNFactory f = new ParserATNFactory(g);
|
||||
ATN atn = f.createATN();
|
||||
|
||||
DOTGenerator dot = new DOTGenerator(g);
|
||||
System.out.println(dot.getDOT(atn.ruleToStartState[g.getRule(ruleName).index]));
|
||||
System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index]));
|
||||
|
||||
Rule r = g.getRule(ruleName);
|
||||
ATNState startState = atn.ruleToStartState[r.index];
|
||||
ATNState startState = g.atn.ruleToStartState[r.index];
|
||||
ATNPrinter serializer = new ATNPrinter(g, startState);
|
||||
String result = serializer.asString();
|
||||
|
||||
|
@ -1272,7 +1344,7 @@ public abstract class BaseTest {
|
|||
|
||||
@Override
|
||||
public String getSourceName() {
|
||||
return null;
|
||||
return UNKNOWN_SOURCE_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -73,6 +73,7 @@ public class ParserInterpreterForTesting {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public String[] getTokenNames() {
|
||||
return g.getTokenNames();
|
||||
}
|
||||
|
|
|
@ -33,15 +33,23 @@ import org.antlr.v4.Tool;
|
|||
import org.antlr.v4.automata.ATNPrinter;
|
||||
import org.antlr.v4.automata.LexerATNFactory;
|
||||
import org.antlr.v4.automata.ParserATNFactory;
|
||||
import org.antlr.v4.parse.ANTLRParser;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.tool.ErrorType;
|
||||
import org.antlr.v4.tool.Grammar;
|
||||
import org.antlr.v4.tool.LexerGrammar;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
import org.antlr.v4.tool.ast.GrammarRootAST;
|
||||
import org.antlr.v4.tool.ast.RuleAST;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -286,6 +294,33 @@ public class TestATNConstruction extends BaseTest {
|
|||
"RuleStop_a_1-EOF->s7\n";
|
||||
checkRuleATN(g, "a", expecting);
|
||||
}
|
||||
@Test public void testAplusSingleAltHasPlusASTPointingAtLoopBackState() throws Exception {
|
||||
Grammar g = new Grammar(
|
||||
"parser grammar P;\n"+
|
||||
"s : a B ;\n" + // (RULE a (BLOCK (ALT (+ (BLOCK (ALT A))))))
|
||||
"a : A+;");
|
||||
String expecting =
|
||||
"RuleStart_a_2->PlusBlockStart_8\n" +
|
||||
"PlusBlockStart_8->s7\n" +
|
||||
"s7-A->BlockEnd_9\n" +
|
||||
"BlockEnd_9->PlusLoopBack_10\n" +
|
||||
"PlusLoopBack_10->PlusBlockStart_8\n" +
|
||||
"PlusLoopBack_10->s11\n" +
|
||||
"s11->RuleStop_a_3\n" +
|
||||
"RuleStop_a_3->s5\n";
|
||||
checkRuleATN(g, "a", expecting);
|
||||
// Get all AST -> ATNState relationships. Make sure loopback is covered when no loop entry decision
|
||||
List<GrammarAST> ruleNodes = g.ast.getNodesWithType(ANTLRParser.RULE);
|
||||
RuleAST a = (RuleAST)ruleNodes.get(1);
|
||||
List<GrammarAST> nodesInRule = a.getNodesWithType(null);
|
||||
Map<GrammarAST, ATNState> covered = new LinkedHashMap<GrammarAST, ATNState>();
|
||||
for (GrammarAST node : nodesInRule) {
|
||||
if ( node.atnState != null ) {
|
||||
covered.put(node, node.atnState);
|
||||
}
|
||||
}
|
||||
assertEquals("{RULE=2, BLOCK=8, +=10, BLOCK=8, A=7}", covered.toString());
|
||||
}
|
||||
@Test public void testAorBplus() throws Exception {
|
||||
Grammar g = new Grammar(
|
||||
"parser grammar P;\n"+
|
||||
|
|
|
@ -525,7 +525,7 @@ public class TestATNParserPrediction extends BaseTest {
|
|||
nvae.printStackTrace(System.err);
|
||||
}
|
||||
DFA dfa = interp.parser.decisionToDFA[decision];
|
||||
assertEquals(dfaString[i], dfa.toString(g.getTokenDisplayNames()));
|
||||
assertEquals(dfaString[i], dfa.toString(g.getVocabulary()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ package org.antlr.v4.test;
|
|||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.CommonToken;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.IntStream;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenFactory;
|
||||
|
@ -235,7 +236,7 @@ public class TestCommonTokenStream extends TestBufferedTokenStream {
|
|||
|
||||
@Override
|
||||
public String getSourceName() {
|
||||
return null;
|
||||
return IntStream.UNKNOWN_SOURCE_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -283,7 +284,7 @@ public class TestCommonTokenStream extends TestBufferedTokenStream {
|
|||
|
||||
@Override
|
||||
public String getSourceName() {
|
||||
return null;
|
||||
return IntStream.UNKNOWN_SOURCE_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -797,4 +797,24 @@ public class TestCompositeGrammars extends BaseTest {
|
|||
assertEquals("", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#670 "exception when importing
|
||||
* grammar".
|
||||
* https://github.com/antlr/antlr4/issues/670
|
||||
*/
|
||||
@Test
|
||||
public void testImportLargeGrammar() throws Exception {
|
||||
String slave = load("Java.g4", "UTF-8");
|
||||
String master =
|
||||
"grammar NewJava;\n" +
|
||||
"import Java;\n";
|
||||
|
||||
System.out.println("dir "+tmpdir);
|
||||
mkdir(tmpdir);
|
||||
writeFile(tmpdir, "Java.g4", slave);
|
||||
String found = execParser("NewJava.g4", master, "NewJavaParser", "NewJavaLexer", "compilationUnit", "package Foo;", debug);
|
||||
assertEquals("", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -196,8 +196,8 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"s0-'else'->:s1^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:29 reportAttemptingFullContext d=1 (stat), input='else'\n" +
|
||||
"line 1:38 reportAmbiguity d=1 (stat): ambigAlts={1, 2}, input='elsefoo}'\n",
|
||||
|
@ -228,8 +228,8 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"s0-'else'->:s1^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
|
||||
|
@ -244,8 +244,8 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
"s0-'}'->:s2=>2\n" +
|
||||
"s0-'else'->:s1^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1 (stat), input='else'\n" +
|
||||
"line 1:19 reportContextSensitivity d=1 (stat), input='else'\n" +
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.junit.Test;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/** */
|
||||
public class TestLeftRecursion extends BaseTest {
|
||||
|
@ -341,6 +342,70 @@ public class TestLeftRecursion extends BaseTest {
|
|||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#677 "labels not working in
|
||||
* grammar file".
|
||||
* https://github.com/antlr/antlr4/issues/677
|
||||
*
|
||||
* <p>This test treats {@code ,} and {@code >>} as part of a single compound
|
||||
* operator (similar to a ternary operator).</p>
|
||||
*/
|
||||
@Test public void testReturnValueAndActionsList1() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" +
|
||||
"expr:\n" +
|
||||
" a=expr '*' a=expr #Factor\n" +
|
||||
" | b+=expr (',' b+=expr)* '>>' c=expr #Send\n" +
|
||||
" | ID #JustId //semantic check on modifiers\n" +
|
||||
";\n" +
|
||||
"\n" +
|
||||
"ID : ('a'..'z'|'A'..'Z'|'_')\n" +
|
||||
" ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" +
|
||||
";\n" +
|
||||
"\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String[] tests = {
|
||||
"a*b", "(s (expr (expr a) * (expr b)) <EOF>)",
|
||||
"a,c>>x", "(s (expr (expr a) , (expr c) >> (expr x)) <EOF>)",
|
||||
"x", "(s (expr x) <EOF>)",
|
||||
"a*b,c,x*y>>r", "(s (expr (expr (expr a) * (expr b)) , (expr c) , (expr (expr x) * (expr y)) >> (expr r)) <EOF>)",
|
||||
};
|
||||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#677 "labels not working in
|
||||
* grammar file".
|
||||
* https://github.com/antlr/antlr4/issues/677
|
||||
*
|
||||
* <p>This test treats the {@code ,} and {@code >>} operators separately.</p>
|
||||
*/
|
||||
@Test public void testReturnValueAndActionsList2() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"s @after {System.out.println($ctx.toStringTree(this));} : expr EOF;\n" +
|
||||
"expr:\n" +
|
||||
" a=expr '*' a=expr #Factor\n" +
|
||||
" | b+=expr ',' b+=expr #Comma\n" +
|
||||
" | b+=expr '>>' c=expr #Send\n" +
|
||||
" | ID #JustId //semantic check on modifiers\n" +
|
||||
";\n" +
|
||||
"\n" +
|
||||
"ID : ('a'..'z'|'A'..'Z'|'_')\n" +
|
||||
" ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*\n" +
|
||||
";\n" +
|
||||
"\n" +
|
||||
"WS : [ \\t\\n]+ -> skip ;\n";
|
||||
String[] tests = {
|
||||
"a*b", "(s (expr (expr a) * (expr b)) <EOF>)",
|
||||
"a,c>>x", "(s (expr (expr (expr a) , (expr c)) >> (expr x)) <EOF>)",
|
||||
"x", "(s (expr x) <EOF>)",
|
||||
"a*b,c,x*y>>r", "(s (expr (expr (expr (expr (expr a) * (expr b)) , (expr c)) , (expr (expr x) * (expr y))) >> (expr r)) <EOF>)",
|
||||
};
|
||||
runTests(grammar, tests, "s");
|
||||
}
|
||||
|
||||
@Test public void testLabelsOnOpSubrule() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
|
@ -646,7 +711,8 @@ public class TestLeftRecursion extends BaseTest {
|
|||
}
|
||||
|
||||
public void runTests(String grammar, String[] tests, String startRule) {
|
||||
rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer");
|
||||
boolean success = rawGenerateAndBuildRecognizer("T.g4", grammar, "TParser", "TLexer");
|
||||
assertTrue(success);
|
||||
writeRecognizerAndCompile("TParser",
|
||||
"TLexer",
|
||||
startRule,
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TestLexerActions extends BaseTest {
|
|||
"Hello: Steve\n" +
|
||||
"\n" +
|
||||
"[@0,0:11='hello Steve\\n',<1>,1:0]\n" +
|
||||
"[@1,12:11='<EOF>',<-1>,2:12]\n";
|
||||
"[@1,12:11='<EOF>',<-1>,2:0]\n";
|
||||
assertEquals(expecting, found);
|
||||
}
|
||||
|
||||
|
@ -276,7 +276,7 @@ public class TestLexerActions extends BaseTest {
|
|||
"[@3,31:31='\\n',<4>,2:18]\n" +
|
||||
"[@4,32:45='Another line.\\n',<1>,3:0]\n" +
|
||||
"[@5,46:56='More line.\\n',<1>,4:0]\n" +
|
||||
"[@6,57:56='<EOF>',<-1>,5:11]\n";
|
||||
"[@6,57:56='<EOF>',<-1>,5:0]\n";
|
||||
assertEquals(expecting, found);
|
||||
}
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ public class TestLexerErrors extends BaseTest {
|
|||
String result = execLexer("T.g4", grammar, "TLexer", "x : x", false);
|
||||
String expecting =
|
||||
"[@0,0:0='x',<3>,1:0]\n" +
|
||||
"[@1,2:2=':',<2>,1:2]\n" +
|
||||
"[@1,2:2=':',<1>,1:2]\n" +
|
||||
"[@2,4:4='x',<3>,1:4]\n" +
|
||||
"[@3,5:4='<EOF>',<-1>,1:5]\n";
|
||||
assertEquals(expecting, result);
|
||||
|
|
|
@ -147,7 +147,7 @@ public class TestLexerExec extends BaseTest {
|
|||
String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n");
|
||||
assertEquals(
|
||||
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,14:13='<EOF>',<-1>,3:14]\n", found);
|
||||
"[@1,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ public class TestLexerExec extends BaseTest {
|
|||
assertEquals(
|
||||
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
|
||||
"[@2,14:13='<EOF>',<-1>,3:7]\n", found);
|
||||
"[@2,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ public class TestLexerExec extends BaseTest {
|
|||
String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n");
|
||||
assertEquals(
|
||||
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,14:13='<EOF>',<-1>,3:14]\n", found);
|
||||
"[@1,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ public class TestLexerExec extends BaseTest {
|
|||
assertEquals(
|
||||
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
|
||||
"[@2,14:13='<EOF>',<-1>,3:7]\n", found);
|
||||
"[@2,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ public class TestLexerExec extends BaseTest {
|
|||
String found = execLexer("L.g4", grammar, "L", "//blah\n//blah\n");
|
||||
assertEquals(
|
||||
"[@0,0:13='//blah\\n//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,14:13='<EOF>',<-1>,3:14]\n", found);
|
||||
"[@1,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -220,7 +220,7 @@ public class TestLexerExec extends BaseTest {
|
|||
assertEquals(
|
||||
"[@0,0:6='//blah\\n',<1>,1:0]\n" +
|
||||
"[@1,7:13='//blah\\n',<1>,2:0]\n" +
|
||||
"[@2,14:13='<EOF>',<-1>,3:7]\n", found);
|
||||
"[@2,14:13='<EOF>',<-1>,3:0]\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"[@1,9:9='\\n',<2>,1:9]\n" +
|
||||
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
|
||||
"[@3,35:35='\\n',<2>,3:16]\n" +
|
||||
"[@4,36:35='<EOF>',<-1>,4:17]\n";
|
||||
"[@4,36:35='<EOF>',<-1>,4:0]\n";
|
||||
|
||||
// stuff on end of comment matches another rule
|
||||
String found = execLexer("L.g4", grammar, "L",
|
||||
|
@ -262,7 +262,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"[@1,10:10='\\n',<2>,1:10]\n" +
|
||||
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
|
||||
"[@3,38:38='\\n',<2>,3:17]\n" +
|
||||
"[@4,39:38='<EOF>',<-1>,4:18]\n";
|
||||
"[@4,39:38='<EOF>',<-1>,4:0]\n";
|
||||
String found = execLexer("L.g4", grammar, "L",
|
||||
"/* ick */x\n" +
|
||||
"/* /* */x\n" +
|
||||
|
@ -286,7 +286,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"[@1,9:9='\\n',<2>,1:9]\n" +
|
||||
"[@2,10:34='/* /* */\\n/* /*nested*/ */',<1>,2:0]\n" +
|
||||
"[@3,35:35='\\n',<2>,3:16]\n" +
|
||||
"[@4,36:35='<EOF>',<-1>,4:17]\n";
|
||||
"[@4,36:35='<EOF>',<-1>,4:0]\n";
|
||||
|
||||
// stuff on end of comment matches another rule
|
||||
String found = execLexer("L.g4", grammar, "L",
|
||||
|
@ -311,7 +311,7 @@ public class TestLexerExec extends BaseTest {
|
|||
"[@1,10:10='\\n',<2>,1:10]\n" +
|
||||
"[@2,11:36='/* /* */x\\n/* /*nested*/ */',<1>,2:0]\n" +
|
||||
"[@3,38:38='\\n',<2>,3:17]\n" +
|
||||
"[@4,39:38='<EOF>',<-1>,4:18]\n";
|
||||
"[@4,39:38='<EOF>',<-1>,4:0]\n";
|
||||
String found = execLexer("L.g4", grammar, "L",
|
||||
"/* ick */x\n" +
|
||||
"/* /* */x\n" +
|
||||
|
@ -647,7 +647,7 @@ public class TestLexerExec extends BaseTest {
|
|||
grammar.append("lexer grammar L;\n");
|
||||
grammar.append("WS : [ \\t\\r\\n]+ -> skip;\n");
|
||||
for (int i = 0; i < 4000; i++) {
|
||||
grammar.append("KW").append(i).append(" : '").append("KW").append(i).append("';\n");
|
||||
grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n");
|
||||
}
|
||||
|
||||
String input = "KW400";
|
||||
|
@ -657,4 +657,34 @@ public class TestLexerExec extends BaseTest {
|
|||
"[@1,5:4='<EOF>',<-1>,1:5]\n";
|
||||
assertEquals(expecting, found);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#687 "Empty zero-length tokens
|
||||
* cannot have lexer commands" and antlr/antlr4#688 "Lexer cannot match
|
||||
* zero-length tokens"
|
||||
* https://github.com/antlr/antlr4/issues/687
|
||||
* https://github.com/antlr/antlr4/issues/688
|
||||
*/
|
||||
@Test public void testZeroLengthToken() throws Exception {
|
||||
String grammar =
|
||||
"lexer grammar L;\n"+
|
||||
"\n" +
|
||||
"BeginString\n" +
|
||||
" : '\\'' -> more, pushMode(StringMode)\n" +
|
||||
" ;\n" +
|
||||
"\n" +
|
||||
"mode StringMode;\n" +
|
||||
"\n" +
|
||||
" StringMode_X : 'x' -> more;\n" +
|
||||
" StringMode_Done : -> more, mode(EndStringMode);\n" +
|
||||
"\n" +
|
||||
"mode EndStringMode; \n" +
|
||||
"\n" +
|
||||
" EndString : '\\'' -> popMode;\n";
|
||||
String found = execLexer("L.g4", grammar, "L", "'xxx'");
|
||||
String expecting =
|
||||
"[@0,0:4=''xxx'',<1>,1:0]\n" +
|
||||
"[@1,5:4='<EOF>',<-1>,1:5]\n";
|
||||
assertEquals(expecting, found);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestParseErrors extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : 'a' x='b' {System.out.println(\"conjured=\"+$x);} 'c' ;";
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ac", false);
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<1>,1:1]\n";
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<2>,1:1]\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ public class TestParseErrors extends BaseTest {
|
|||
"grammar T;\n" +
|
||||
"a : 'a' x=('b'|'c') {System.out.println(\"conjured=\"+$x);} 'd' ;";
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "a", "ad", false);
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<1>,1:1]\n";
|
||||
String expecting = "conjured=[@-1,-1:-1='<missing 'b'>',<2>,1:1]\n";
|
||||
assertEquals(expecting, result);
|
||||
}
|
||||
|
||||
|
@ -304,8 +304,7 @@ public class TestParseErrors extends BaseTest {
|
|||
* This is a regression test for #26 "an exception upon simple rule with double recursion in an alternative".
|
||||
* https://github.com/antlr/antlr4/issues/26
|
||||
*/
|
||||
@Test
|
||||
public void testDuplicatedLeftRecursiveCall() throws Exception {
|
||||
void testDuplicatedLeftRecursiveCall(String input) throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"start : expr EOF;\n" +
|
||||
|
@ -313,24 +312,32 @@ public class TestParseErrors extends BaseTest {
|
|||
" | expr expr\n" +
|
||||
" ;\n" +
|
||||
"\n";
|
||||
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "x", true);
|
||||
assertEquals("", result);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xx", true);
|
||||
assertEquals("", result);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxx", true);
|
||||
assertEquals("", result);
|
||||
assertNull(this.stderrDuringParse);
|
||||
|
||||
result = execParser("T.g4", grammar, "TParser", "TLexer", "start", "xxxx", true);
|
||||
String result = execParser("T.g4", grammar, "TParser", "TLexer", "start", input, true);
|
||||
assertEquals("", result);
|
||||
assertNull(this.stderrDuringParse);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testDuplicatedLeftRecursiveCall1() throws Exception {
|
||||
testDuplicatedLeftRecursiveCall("x");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDuplicatedLeftRecursiveCall2() throws Exception {
|
||||
testDuplicatedLeftRecursiveCall("xx");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDuplicatedLeftRecursiveCall3() throws Exception {
|
||||
testDuplicatedLeftRecursiveCall("xxx");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDuplicatedLeftRecursiveCall4() throws Exception {
|
||||
testDuplicatedLeftRecursiveCall("xxxx");
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for #45 "NullPointerException in ATNConfig.hashCode".
|
||||
* https://github.com/antlr/antlr4/issues/45
|
||||
|
|
|
@ -567,4 +567,31 @@ public class TestParserExec extends BaseTest {
|
|||
assertEquals("", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#672 "Initialization failed in
|
||||
* locals".
|
||||
* https://github.com/antlr/antlr4/issues/672
|
||||
*/
|
||||
@Test public void testAttributeValueInitialization() throws Exception {
|
||||
String grammar =
|
||||
"grammar Data; \n" +
|
||||
"\n" +
|
||||
"file : group+ EOF; \n" +
|
||||
"\n" +
|
||||
"group: INT sequence {System.out.println($sequence.values.size());} ; \n" +
|
||||
"\n" +
|
||||
"sequence returns [List<Integer> values = new ArrayList<Integer>()] \n" +
|
||||
" locals[List<Integer> localValues = new ArrayList<Integer>()]\n" +
|
||||
" : (INT {$localValues.add($INT.int);})* {$values.addAll($localValues);}\n" +
|
||||
"; \n" +
|
||||
"\n" +
|
||||
"INT : [0-9]+ ; // match integers \n" +
|
||||
"WS : [ \\t\\n\\r]+ -> skip ; // toss out all whitespace\n";
|
||||
|
||||
String input = "2 9 10 3 1 2 3";
|
||||
String found = execParser("Data.g4", grammar, "DataParser", "DataLexer", "file", input, false);
|
||||
assertEquals("6\n", found);
|
||||
assertNull(stderrDuringParse);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,8 +72,6 @@ import org.junit.Test;
|
|||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.ref.Reference;
|
||||
import java.lang.ref.SoftReference;
|
||||
import java.lang.ref.WeakReference;
|
||||
|
@ -1237,7 +1235,7 @@ public class TestPerformance extends BaseTest {
|
|||
|
||||
if (USE_PARSER_INTERPRETER) {
|
||||
Parser referenceParser = parserCtor.newInstance(tokens);
|
||||
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), Arrays.asList(referenceParser.getTokenNames()), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
|
||||
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
|
||||
}
|
||||
else {
|
||||
parser = parserCtor.newInstance(tokens);
|
||||
|
@ -1318,7 +1316,7 @@ public class TestPerformance extends BaseTest {
|
|||
|
||||
if (USE_PARSER_INTERPRETER) {
|
||||
Parser referenceParser = parserCtor.newInstance(tokens);
|
||||
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), Arrays.asList(referenceParser.getTokenNames()), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
|
||||
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), referenceParser.getVocabulary(), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
|
||||
}
|
||||
else {
|
||||
parser = parserCtor.newInstance(tokens);
|
||||
|
@ -1998,4 +1996,36 @@ public class TestPerformance extends BaseTest {
|
|||
Assert.assertEquals("", found);
|
||||
Assert.assertEquals(null, stderrDuringParse);
|
||||
}
|
||||
|
||||
@Test(timeout = 20000)
|
||||
public void testExponentialInclude() {
|
||||
String grammarFormat =
|
||||
"parser grammar Level_%d_%d;\n" +
|
||||
"\n" +
|
||||
"%s import Level_%d_1, Level_%d_2;\n" +
|
||||
"\n" +
|
||||
"rule_%d_%d : EOF;\n";
|
||||
|
||||
System.out.println("dir "+tmpdir);
|
||||
mkdir(tmpdir);
|
||||
|
||||
long startTime = System.nanoTime();
|
||||
|
||||
int levels = 20;
|
||||
for (int level = 0; level < levels; level++) {
|
||||
String leafPrefix = level == levels - 1 ? "//" : "";
|
||||
String grammar1 = String.format(grammarFormat, level, 1, leafPrefix, level + 1, level + 1, level, 1);
|
||||
writeFile(tmpdir, "Level_" + level + "_1.g4", grammar1);
|
||||
if (level > 0) {
|
||||
String grammar2 = String.format(grammarFormat, level, 2, leafPrefix, level + 1, level + 1, level, 1);
|
||||
writeFile(tmpdir, "Level_" + level + "_2.g4", grammar2);
|
||||
}
|
||||
}
|
||||
|
||||
ErrorQueue equeue = antlr("Level_0_1.g4", false);
|
||||
Assert.assertTrue(equeue.errors.isEmpty());
|
||||
|
||||
long endTime = System.nanoTime();
|
||||
System.out.format("%s milliseconds.%n", (endTime - startTime) / 1000000.0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ public class TestSemPredEvalLexer extends BaseTest {
|
|||
"[@3,6:8='def',<1>,2:2]\n" + // ID
|
||||
"[@4,9:10=' ',<4>,2:5]\n" + // WS
|
||||
"[@5,11:11='\\n',<3>,2:7]\n" +
|
||||
"[@6,12:11='<EOF>',<-1>,3:8]\n" +
|
||||
"[@6,12:11='<EOF>',<-1>,3:0]\n" +
|
||||
"s0-'\n" +
|
||||
"'->:s2=>3\n" +
|
||||
"s0-'a'->:s1=>1\n" +
|
||||
|
|
|
@ -614,7 +614,7 @@ public class TestSemPredEvalParser extends BaseTest {
|
|||
String found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file",
|
||||
input, true);
|
||||
assertEquals("(file (para (paraContent s) \\n \\n) (para (paraContent \\n x \\n)) <EOF>)\n", found);
|
||||
assertEquals(stderrDuringParse, "line 5:2 mismatched input '<EOF>' expecting '\n'\n");
|
||||
assertEquals(stderrDuringParse, "line 5:0 mismatched input '<EOF>' expecting '\n'\n");
|
||||
|
||||
input = "s\n\n\nx\n\n";
|
||||
found = execParser("T2.g4", grammar, "T2Parser", "T2Lexer", "file",
|
||||
|
|
|
@ -61,19 +61,19 @@ public class TestTokenPositionOptions extends BaseTest {
|
|||
assertEquals(expectedTree, g.ast.toStringTree());
|
||||
|
||||
String expectedElementTokens =
|
||||
"[@5,11:11='s',<56>,2:0]\n" +
|
||||
"[@9,15:15='e',<56>,2:4]\n" +
|
||||
"[@11,17:19='';'',<61>,2:6]\n" +
|
||||
"[@15,23:23='e',<56>,3:0]\n" +
|
||||
"[@43,64:66=''-'',<61>,6:4]\n" +
|
||||
"[@45,68:68='e',<56>,6:8]\n" +
|
||||
"[@49,74:75='ID',<65>,7:4]\n" +
|
||||
"[@21,29:31=''*'',<61>,3:6]\n" +
|
||||
"[@23,33:33='e',<56>,3:10]\n" +
|
||||
"[@29,41:43=''+'',<61>,4:6]\n" +
|
||||
"[@31,45:45='e',<56>,4:10]\n" +
|
||||
"[@37,53:55=''.'',<61>,5:6]\n" +
|
||||
"[@39,57:58='ID',<65>,5:10]";
|
||||
"[@5,11:11='s',<57>,2:0]\n" +
|
||||
"[@9,15:15='e',<57>,2:4]\n" +
|
||||
"[@11,17:19='';'',<62>,2:6]\n" +
|
||||
"[@15,23:23='e',<57>,3:0]\n" +
|
||||
"[@43,64:66=''-'',<62>,6:4]\n" +
|
||||
"[@45,68:68='e',<57>,6:8]\n" +
|
||||
"[@49,74:75='ID',<66>,7:4]\n" +
|
||||
"[@21,29:31=''*'',<62>,3:6]\n" +
|
||||
"[@23,33:33='e',<57>,3:10]\n" +
|
||||
"[@29,41:43=''+'',<62>,4:6]\n" +
|
||||
"[@31,45:45='e',<57>,4:10]\n" +
|
||||
"[@37,53:55=''.'',<62>,5:6]\n" +
|
||||
"[@39,57:58='ID',<66>,5:10]";
|
||||
|
||||
IntervalSet types =
|
||||
new IntervalSet(ANTLRParser.TOKEN_REF,
|
||||
|
@ -105,19 +105,19 @@ public class TestTokenPositionOptions extends BaseTest {
|
|||
assertEquals(expectedTree, g.ast.toStringTree());
|
||||
|
||||
String expectedElementTokens =
|
||||
"[@5,11:11='s',<56>,2:0]\n" +
|
||||
"[@9,15:15='e',<56>,2:4]\n" +
|
||||
"[@11,17:19='';'',<61>,2:6]\n" +
|
||||
"[@15,23:23='e',<56>,3:0]\n" +
|
||||
"[@47,68:70=''-'',<61>,6:4]\n" +
|
||||
"[@49,72:72='e',<56>,6:8]\n" +
|
||||
"[@53,78:79='ID',<65>,7:4]\n" +
|
||||
"[@21,29:31=''*'',<61>,3:6]\n" +
|
||||
"[@25,35:35='e',<56>,3:12]\n" +
|
||||
"[@31,43:45=''+'',<61>,4:6]\n" +
|
||||
"[@33,47:47='e',<56>,4:10]\n" +
|
||||
"[@39,55:57=''.'',<61>,5:6]\n" +
|
||||
"[@43,61:62='ID',<65>,5:12]";
|
||||
"[@5,11:11='s',<57>,2:0]\n" +
|
||||
"[@9,15:15='e',<57>,2:4]\n" +
|
||||
"[@11,17:19='';'',<62>,2:6]\n" +
|
||||
"[@15,23:23='e',<57>,3:0]\n" +
|
||||
"[@47,68:70=''-'',<62>,6:4]\n" +
|
||||
"[@49,72:72='e',<57>,6:8]\n" +
|
||||
"[@53,78:79='ID',<66>,7:4]\n" +
|
||||
"[@21,29:31=''*'',<62>,3:6]\n" +
|
||||
"[@25,35:35='e',<57>,3:12]\n" +
|
||||
"[@31,43:45=''+'',<62>,4:6]\n" +
|
||||
"[@33,47:47='e',<57>,4:10]\n" +
|
||||
"[@39,55:57=''.'',<62>,5:6]\n" +
|
||||
"[@43,61:62='ID',<66>,5:12]";
|
||||
|
||||
IntervalSet types =
|
||||
new IntervalSet(ANTLRParser.TOKEN_REF,
|
||||
|
@ -149,20 +149,20 @@ public class TestTokenPositionOptions extends BaseTest {
|
|||
assertEquals(expectedTree, g.ast.toStringTree());
|
||||
|
||||
String expectedElementTokens =
|
||||
"[@5,11:11='s',<56>,2:0]\n" +
|
||||
"[@9,15:15='e',<56>,2:4]\n" +
|
||||
"[@11,17:19='';'',<61>,2:6]\n" +
|
||||
"[@15,23:23='e',<56>,3:0]\n" +
|
||||
"[@49,73:75=''-'',<61>,6:4]\n" +
|
||||
"[@51,77:77='e',<56>,6:8]\n" +
|
||||
"[@55,83:84='ID',<65>,7:4]\n" +
|
||||
"[@24,33:35=''*'',<61>,3:10]\n" +
|
||||
"[@26,37:39=''/'',<61>,3:14]\n" +
|
||||
"[@29,42:42='e',<56>,3:19]\n" +
|
||||
"[@35,50:52=''+'',<61>,4:6]\n" +
|
||||
"[@37,54:54='e',<56>,4:10]\n" +
|
||||
"[@43,62:64=''.'',<61>,5:6]\n" +
|
||||
"[@45,66:67='ID',<65>,5:10]";
|
||||
"[@5,11:11='s',<57>,2:0]\n" +
|
||||
"[@9,15:15='e',<57>,2:4]\n" +
|
||||
"[@11,17:19='';'',<62>,2:6]\n" +
|
||||
"[@15,23:23='e',<57>,3:0]\n" +
|
||||
"[@49,73:75=''-'',<62>,6:4]\n" +
|
||||
"[@51,77:77='e',<57>,6:8]\n" +
|
||||
"[@55,83:84='ID',<66>,7:4]\n" +
|
||||
"[@24,33:35=''*'',<62>,3:10]\n" +
|
||||
"[@26,37:39=''/'',<62>,3:14]\n" +
|
||||
"[@29,42:42='e',<57>,3:19]\n" +
|
||||
"[@35,50:52=''+'',<62>,4:6]\n" +
|
||||
"[@37,54:54='e',<57>,4:10]\n" +
|
||||
"[@43,62:64=''.'',<62>,5:6]\n" +
|
||||
"[@45,66:67='ID',<66>,5:10]";
|
||||
|
||||
IntervalSet types =
|
||||
new IntervalSet(ANTLRParser.TOKEN_REF,
|
||||
|
|
|
@ -567,4 +567,90 @@ public class TestToolSyntaxErrors extends BaseTest {
|
|||
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
||||
@Test public void testChannelDefinitionInLexer() throws Exception {
|
||||
String grammar =
|
||||
"lexer grammar T;\n" +
|
||||
"\n" +
|
||||
"channels {\n" +
|
||||
" WHITESPACE_CHANNEL,\n" +
|
||||
" COMMENT_CHANNEL\n" +
|
||||
"}\n" +
|
||||
"\n" +
|
||||
"COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" +
|
||||
"WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n";
|
||||
|
||||
String expected = "";
|
||||
|
||||
String[] pair = { grammar, expected };
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
||||
@Test public void testChannelDefinitionInParser() throws Exception {
|
||||
String grammar =
|
||||
"parser grammar T;\n" +
|
||||
"\n" +
|
||||
"channels {\n" +
|
||||
" WHITESPACE_CHANNEL,\n" +
|
||||
" COMMENT_CHANNEL\n" +
|
||||
"}\n" +
|
||||
"\n" +
|
||||
"start : EOF;\n";
|
||||
|
||||
String expected =
|
||||
"error(" + ErrorType.CHANNELS_BLOCK_IN_PARSER_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in parser grammars\n";
|
||||
|
||||
String[] pair = { grammar, expected };
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
||||
@Test public void testChannelDefinitionInCombined() throws Exception {
|
||||
String grammar =
|
||||
"grammar T;\n" +
|
||||
"\n" +
|
||||
"channels {\n" +
|
||||
" WHITESPACE_CHANNEL,\n" +
|
||||
" COMMENT_CHANNEL\n" +
|
||||
"}\n" +
|
||||
"\n" +
|
||||
"start : EOF;\n" +
|
||||
"\n" +
|
||||
"COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" +
|
||||
"WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n";
|
||||
|
||||
String expected =
|
||||
"warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:35: rule COMMENT contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" +
|
||||
"warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:11:35: rule WHITESPACE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n" +
|
||||
"error(" + ErrorType.CHANNELS_BLOCK_IN_COMBINED_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in combined grammars\n";
|
||||
|
||||
String[] pair = { grammar, expected };
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a regression test for antlr/antlr4#497 now that antlr/antlr4#309
|
||||
* is resolved.
|
||||
* https://github.com/antlr/antlr4/issues/497
|
||||
* https://github.com/antlr/antlr4/issues/309
|
||||
*/
|
||||
@Test public void testChannelDefinitions() throws Exception {
|
||||
String grammar =
|
||||
"lexer grammar T;\n" +
|
||||
"\n" +
|
||||
"channels {\n" +
|
||||
" WHITESPACE_CHANNEL,\n" +
|
||||
" COMMENT_CHANNEL\n" +
|
||||
"}\n" +
|
||||
"\n" +
|
||||
"COMMENT: '//' ~[\\n]+ -> channel(COMMENT_CHANNEL);\n" +
|
||||
"WHITESPACE: [ \\t]+ -> channel(WHITESPACE_CHANNEL);\n" +
|
||||
"NEWLINE: '\\r'? '\\n' -> channel(NEWLINE_CHANNEL);";
|
||||
|
||||
// WHITESPACE_CHANNEL and COMMENT_CHANNEL are defined, but NEWLINE_CHANNEL is not
|
||||
String expected =
|
||||
"warning(" + ErrorType.UNKNOWN_LEXER_CONSTANT.code + "): T.g4:10:34: rule NEWLINE contains a lexer command with an unrecognized constant value; lexer interpreters may produce incorrect output\n";
|
||||
|
||||
String[] pair = { grammar, expected };
|
||||
super.testErrors(pair, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ public class TestTopologicalSort extends BaseTest {
|
|||
g.addEdge("F", "H");
|
||||
g.addEdge("E", "F");
|
||||
|
||||
String expecting = "[H, F, E, D, G, A, B, C]";
|
||||
String expecting = "[H, F, G, E, D, A, B, C]";
|
||||
List<String> nodes = g.sort();
|
||||
String result = nodes.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
@ -95,7 +95,7 @@ public class TestTopologicalSort extends BaseTest {
|
|||
g.addEdge("Def.g4", "Java.tokens"); // walkers feed off generated tokens
|
||||
g.addEdge("Ref.g4", "Java.tokens");
|
||||
|
||||
String expecting = "[MyJava.tokens, Java.g4, Java.tokens, Ref.g4, Def.g4]";
|
||||
String expecting = "[MyJava.tokens, Java.g4, Java.tokens, Def.g4, Ref.g4]";
|
||||
List<String> nodes = g.sort();
|
||||
String result = nodes.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
@ -109,7 +109,7 @@ public class TestTopologicalSort extends BaseTest {
|
|||
g.addEdge("Def.g4", "JavaLexer.tokens");
|
||||
g.addEdge("Ref.g4", "JavaLexer.tokens");
|
||||
|
||||
String expecting = "[JavaLexer.g4, JavaLexer.tokens, JavaParser.g4, Ref.g4, Def.g4]";
|
||||
String expecting = "[JavaLexer.g4, JavaLexer.tokens, JavaParser.g4, Def.g4, Ref.g4]";
|
||||
List<String> nodes = g.sort();
|
||||
String result = nodes.toString();
|
||||
assertEquals(expecting, result);
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* [The "BSD license"]
|
||||
* Copyright (c) 2014 Terence Parr
|
||||
* Copyright (c) 2014 Sam Harwell
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.antlr.v4.test;
|
||||
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Sam Harwell
|
||||
*/
|
||||
public class TestVocabulary extends BaseTest {
|
||||
|
||||
@Test
|
||||
public void testEmptyVocabulary() {
|
||||
Assert.assertNotNull(VocabularyImpl.EMPTY_VOCABULARY);
|
||||
Assert.assertEquals("EOF", VocabularyImpl.EMPTY_VOCABULARY.getSymbolicName(Token.EOF));
|
||||
Assert.assertEquals("0", VocabularyImpl.EMPTY_VOCABULARY.getDisplayName(Token.INVALID_TYPE));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVocabularyFromTokenNames() {
|
||||
String[] tokenNames = {
|
||||
"<INVALID>",
|
||||
"TOKEN_REF", "RULE_REF", "'//'", "'/'", "'*'", "'!'", "ID", "STRING"
|
||||
};
|
||||
|
||||
Vocabulary vocabulary = VocabularyImpl.fromTokenNames(tokenNames);
|
||||
Assert.assertNotNull(vocabulary);
|
||||
Assert.assertEquals("EOF", vocabulary.getSymbolicName(Token.EOF));
|
||||
for (int i = 0; i < tokenNames.length; i++) {
|
||||
Assert.assertEquals(tokenNames[i], vocabulary.getDisplayName(i));
|
||||
|
||||
if (tokenNames[i].startsWith("'")) {
|
||||
Assert.assertEquals(tokenNames[i], vocabulary.getLiteralName(i));
|
||||
Assert.assertNull(vocabulary.getSymbolicName(i));
|
||||
}
|
||||
else if (Character.isUpperCase(tokenNames[i].charAt(0))) {
|
||||
Assert.assertNull(vocabulary.getLiteralName(i));
|
||||
Assert.assertEquals(tokenNames[i], vocabulary.getSymbolicName(i));
|
||||
}
|
||||
else {
|
||||
Assert.assertNull(vocabulary.getLiteralName(i));
|
||||
Assert.assertNull(vocabulary.getSymbolicName(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -42,6 +42,7 @@ public class TestXPath extends BaseTest {
|
|||
"DIV : '/' ;\n" +
|
||||
"ADD : '+' ;\n" +
|
||||
"SUB : '-' ;\n" +
|
||||
"RETURN : 'return' ;\n" +
|
||||
"ID : [a-zA-Z]+ ; // match identifiers\n" +
|
||||
"INT : [0-9]+ ; // match integers\n" +
|
||||
"NEWLINE:'\\r'? '\\n' -> skip; // return newlines to parser (is end-statement signal)\n" +
|
||||
|
@ -67,7 +68,8 @@ public class TestXPath extends BaseTest {
|
|||
"//ID", // any ID in tree
|
||||
"//expr/primary/ID",// any ID child of a primary under any expr
|
||||
"//body//ID", // any ID under a body
|
||||
"//'return'", // any 'return' literal in tree
|
||||
"//'return'", // any 'return' literal in tree, matched by literal name
|
||||
"//RETURN", // any 'return' literal in tree, matched by symbolic name
|
||||
"//primary/*", // all kids of any primary
|
||||
"//func/*/stat", // all stat nodes grandkids of any func node
|
||||
"/prog/func/'def'", // all def literal kids of func kid of prog
|
||||
|
@ -90,6 +92,7 @@ public class TestXPath extends BaseTest {
|
|||
"[y, x]",
|
||||
"[x, y, x]",
|
||||
"[return]",
|
||||
"[return]",
|
||||
"[3, 4, y, 1, 2, x]",
|
||||
"[stat, stat, stat, stat]",
|
||||
"[def, def]",
|
||||
|
|
Loading…
Reference in New Issue