pull master into branch

This commit is contained in:
Terence Parr 2013-11-14 14:43:50 -08:00
commit 2f902da3d2
65 changed files with 2315 additions and 312 deletions

2
.gitignore vendored
View File

@ -27,4 +27,4 @@ nbactions*.xml
*.hprof
# Playground
/tool/playground/
#/tool/playground/

View File

@ -1,5 +1,45 @@
ANTLR v4 Honey Badger
November 13, 2013
* move getChildren() from Tree into Trees (to avoid breaking change)
* Notation:
/prog/func, -> all funcs under prog at root
/prog/*, -> all children of prog at root
/*/func, -> all func kids of any root node
prog, -> prog must be root node
/prog, -> prog must be root node
/*, -> any root
*, -> any root
//ID, -> any ID in tree
//expr/primary/ID, -> any ID child of a primary under any expr
//body//ID, -> any ID under a body
//'return', -> any 'return' literal in tree
//primary/*, -> all kids of any primary
//func/*/stat, -> all stat nodes grandkids of any func node
/prog/func/'def', -> all def literal kids of func kid of prog
//stat/';', -> all ';' under any stat node
//expr/primary/!ID, -> anything but ID under primary under any expr node
//expr/!primary, -> anything but primary under any expr node
//!*, -> nothing anywhere
/!*, -> nothing at root
September 16, 2013
* Updated build.xml to support v4 grammars in v4 itself; compiles XPathLexer.g4
* Add to XPath:
Collection<ParseTree> findAll(String xpath);
September 11, 2013
* Add ! operator to XPath
* Use ANTLR v4 XPathLexer.g4 not regex
* Copy lots of find node stuff from v3 GrammarAST to Trees class in runtime.
September 10, 2013
* Adding in XPath stuff.
August 31, 2013
* Lots of little fixes thanks to Coverity Scan

View File

@ -19,6 +19,18 @@
<property name="antlr3.touch" value="${build.dir}/antlr3-${antlr3.version}.touch"/>
</target>
<target name="antlr4-init" depends="basic-init">
<property name="antlr4.version" value="4.1"/>
<property name="antlr4.jar.name" value="antlr-${antlr4.version}-complete.jar"/>
<property name="antlr4.jar" value="${lib.dir}/${antlr4.jar.name}"/>
<mkdir dir="${lib.dir}"/>
<get src="http://antlr.org/download/${antlr4.jar.name}" dest="${antlr4.jar}" skipexisting="true"/>
<path id="cp.antlr4" path="${antlr4.jar}"/>
<property name="build.antlr4.dir" value="${build.dir}/generated-sources/antlr4" />
<property name="antlr4.touch" value="${build.dir}/antlr4-${antlr4.version}.touch"/>
</target>
<target name="build-init" depends="basic-init">
<property name="version" value="4.1.1-dev"/>
<property name="build.sysclasspath" value="ignore"/>
@ -45,7 +57,17 @@
</uptodate>
</target>
<target name="up-to-date" depends="antlr3-up-to-date,build-init">
<target name="antlr4-up-to-date" depends="basic-init,antlr4-init">
<uptodate targetfile="${antlr4.touch}" property="is.antlr4.uptodate">
<srcfiles dir="${basedir}/tool/src">
<include name="**/*.g4"/>
<include name="**/*.tokens"/>
</srcfiles>
<srcfiles file="${antlr4.jar}"/>
</uptodate>
</target>
<target name="up-to-date" depends="antlr3-up-to-date,antlr4-up-to-date,build-init">
<uptodate targetfile="${jar.file}" property="is.source.uptodate">
<srcfiles dir="${basedir}/tool/src">
<include name="**/*.java"/>
@ -102,6 +124,33 @@
</sequential>
</macrodef>
<macrodef name="antlr4">
<attribute name="srcpath"/>
<element name="args" optional="true"/>
<sequential>
<local name="path.antlr4.local"/>
<local name="sources.antlr4.local"/>
<path id="path.antlr4.local">
<fileset dir="${basedir}/runtime/Java/src/@{srcpath}" includes="*.g4"/>
</path>
<pathconvert pathsep=" " property="sources.antlr4.local" refid="path.antlr4.local">
<map from="${basedir}/runtime/Java/src/@{srcpath}/" to=""/>
</pathconvert>
<mkdir dir="${build.antlr4.dir}/@{srcpath}"/>
<java classname="org.antlr.v4.Tool" fork="true" failonerror="true" maxmemory="300m"
dir="${basedir}/runtime/Java/src/@{srcpath}">
<arg value="-o"/>
<arg value="${build.antlr4.dir}/@{srcpath}"/>
<args/>
<arg line="${sources.antlr4.local}"/>
<classpath>
<path refid="cp.antlr4"/>
<pathelement location="${java.class.path}"/>
</classpath>
</java>
</sequential>
</macrodef>
<target name="antlr3" depends="build-init,antlr3-init,antlr3-up-to-date" unless="is.antlr3.uptodate">
<mkdir dir="${build.antlr3.dir}" />
@ -125,7 +174,28 @@
<touch file="${antlr3.touch}" mkdirs="true"/>
</target>
<target name="compile" depends="build-init,antlr3,up-to-date" description="Compile for generic OS" unless="is.jar.uptodate">
<target name="antlr4" depends="build-init,antlr4-init,antlr4-up-to-date" unless="is.antlr4.uptodate">
<mkdir dir="${build.antlr4.dir}" />
<path id="sources.antlr4">
<fileset dir="${basedir}/runtime/Java/src" includes="**/*.g4"/>
</path>
<pathconvert pathsep="${line.separator} " property="echo.sources.antlr4" refid="sources.antlr4">
<map from="${basedir}/runtime/Java/src/" to=""/>
</pathconvert>
<echo message="Generating ANTLR 4 grammars:${line.separator} ${echo.sources.antlr4}"/>
<antlr4 srcpath="org/antlr/v4/runtime/tree/xpath">
<args>
<arg value="-package"/>
<arg value="org.antlr.v4.runtime.tree.xpath"/>
</args>
</antlr4>
<touch file="${antlr4.touch}" mkdirs="true"/>
</target>
<target name="compile" depends="build-init,antlr3,antlr4,up-to-date" description="Compile for generic OS" unless="is.jar.uptodate">
<mkdir dir="${build.dir}/classes"/>
<javac
destdir="${build.dir}/classes"
@ -139,7 +209,7 @@
<path refid="cp.antlr3"/>
<pathelement location="${basedir}/runtime/Java/lib/org.abego.treelayout.core.jar"/>
</classpath>
<src path="${basedir}/tool/src:${basedir}/runtime/Java/src:${build.antlr3.dir}"/>
<src path="${basedir}/tool/src:${basedir}/runtime/Java/src:${build.antlr3.dir}:${build.antlr4.dir}"/>
</javac>
</target>

View File

@ -55,6 +55,23 @@
<sourceDirectory>src</sourceDirectory>
<resources/>
<plugins>
<plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr4-maven-plugin</artifactId>
<version>4.1</version>
<configuration>
<sourceDirectory>src</sourceDirectory>
</configuration>
<executions>
<execution>
<goals>
<goal>antlr4</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -30,6 +30,7 @@
package org.antlr.v4.runtime;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.AbstractPredicateTransition;
import org.antlr.v4.runtime.atn.PredicateTransition;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
@ -60,9 +61,17 @@ public class FailedPredicateException extends RecognitionException {
{
super(formatMessage(predicate, message), recognizer, recognizer.getInputStream(), recognizer._ctx);
ATNState s = recognizer.getInterpreter().atn.states.get(recognizer.getState());
PredicateTransition trans = (PredicateTransition)s.transition(0);
this.ruleIndex = trans.ruleIndex;
this.predicateIndex = trans.predIndex;
AbstractPredicateTransition trans = (AbstractPredicateTransition)s.transition(0);
if (trans instanceof PredicateTransition) {
this.ruleIndex = ((PredicateTransition)trans).ruleIndex;
this.predicateIndex = ((PredicateTransition)trans).predIndex;
}
else {
this.ruleIndex = 0;
this.predicateIndex = 0;
}
this.predicate = predicate;
this.setOffendingToken(recognizer.getCurrentToken());
}

View File

@ -35,6 +35,7 @@ import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.atn.RuleTransition;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.IntegerStack;
import org.antlr.v4.runtime.misc.IntervalSet;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
@ -111,6 +112,12 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
*/
protected TokenStream _input;
protected final IntegerStack _precedenceStack;
{
_precedenceStack = new IntegerStack();
_precedenceStack.push(0);
}
/**
* The {@link ParserRuleContext} object for the currently executing rule.
* This is always non-null during the parsing process.
@ -161,6 +168,8 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
_ctx = null;
_syntaxErrors = 0;
setTrace(false);
_precedenceStack.clear();
_precedenceStack.push(0);
ATNSimulator interpreter = getInterpreter();
if (interpreter != null) {
interpreter.reset();
@ -562,7 +571,17 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
_ctx = localctx;
}
/**
* @deprecated Use {@link #enterRecursionRule(ParserRuleContext, int, int)}
* instead.
*/
@Deprecated
public void enterRecursionRule(ParserRuleContext localctx, int ruleIndex) {
enterRecursionRule(localctx, ruleIndex, 0);
}
public void enterRecursionRule(ParserRuleContext localctx, int ruleIndex, int precedence) {
_precedenceStack.push(precedence);
_ctx = localctx;
_ctx.start = _input.LT(1);
if (_parseListeners != null) {
@ -591,6 +610,7 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
}
public void unrollRecursionContexts(ParserRuleContext _parentctx) {
_precedenceStack.pop();
_ctx.stop = _input.LT(-1);
ParserRuleContext retctx = _ctx; // save current ctx (return value)
@ -631,6 +651,11 @@ public abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
_ctx = ctx;
}
@Override
public boolean precpred(RuleContext localctx, int precedence) {
return precedence >= _precedenceStack.peek();
}
public boolean inContext(String context) {
// TODO: useful in parser?
return false;

View File

@ -140,6 +140,10 @@ public abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
return true;
}
public boolean precpred(@Nullable RuleContext localctx, int precedence) {
return true;
}
public void action(@Nullable RuleContext _localctx, int ruleIndex, int actionIndex) {
}

View File

@ -38,7 +38,7 @@ import org.antlr.v4.runtime.tree.Trees;
import org.antlr.v4.runtime.tree.gui.TreeViewer;
import javax.print.PrintException;
import javax.swing.JDialog;
import javax.swing.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
@ -59,7 +59,7 @@ import java.util.concurrent.Future;
* getting error information.
*
* These objects are used during parsing and prediction.
* For the special case of parsers and tree parsers, we use the subclass
* For the special case of parsers, we use the subclass
* ParserRuleContext.
*
* @see ParserRuleContext

View File

@ -52,12 +52,59 @@ public abstract class ATNSimulator {
SERIALIZED_VERSION = 3;
}
/**
* This is the earliest supported serialized UUID.
*/
private static final UUID BASE_SERIALIZED_UUID;
/**
* This UUID indicates an extension of {@link BASE_SERIALIZED_UUID} for the
* addition of precedence predicates.
*/
private static final UUID ADDED_PRECEDENCE_TRANSITIONS;
/**
* This list contains all of the currently supported UUIDs, ordered by when
* the feature first appeared in this branch.
*/
private static final List<UUID> SUPPORTED_UUIDS;
/**
* This is the current serialized UUID.
*/
public static final UUID SERIALIZED_UUID;
static {
/* WARNING: DO NOT MERGE THIS LINE. If UUIDs differ during a merge,
* resolve the conflict by generating a new ID!
*/
SERIALIZED_UUID = UUID.fromString("33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3");
BASE_SERIALIZED_UUID = UUID.fromString("33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3");
ADDED_PRECEDENCE_TRANSITIONS = UUID.fromString("1DA0C57D-6C06-438A-9B27-10BCB3CE0F61");
SUPPORTED_UUIDS = new ArrayList<UUID>();
SUPPORTED_UUIDS.add(BASE_SERIALIZED_UUID);
SUPPORTED_UUIDS.add(ADDED_PRECEDENCE_TRANSITIONS);
SERIALIZED_UUID = ADDED_PRECEDENCE_TRANSITIONS;
}
/**
* Determines if a particular serialized representation of an ATN supports
* a particular feature, identified by the {@link UUID} used for serializing
* the ATN at the time the feature was first introduced.
*
* @param feature The {@link UUID} marking the first time the feature was
* supported in the serialized ATN.
* @param actualUuid The {@link UUID} of the actual serialized ATN which is
* currently being deserialized.
* @return {@code true} if the {@code actualUuid} value represents a
* serialized ATN at or after the feature identified by {@code feature} was
* introduced; otherwise, {@code false}.
*/
private static boolean isFeatureSupported(UUID feature, UUID actualUuid) {
int featureIndex = SUPPORTED_UUIDS.indexOf(feature);
if (featureIndex < 0) {
return false;
}
return SUPPORTED_UUIDS.indexOf(actualUuid) >= featureIndex;
}
/** Must distinguish between missing edge and edge we know leads nowhere */
@ -134,11 +181,14 @@ public abstract class ATNSimulator {
UUID uuid = toUUID(data, p);
p += 8;
if (!uuid.equals(SERIALIZED_UUID)) {
String reason = String.format(Locale.getDefault(), "Could not deserialize ATN with UUID %s (expected %s).", uuid, SERIALIZED_UUID);
if (!uuid.equals(SERIALIZED_UUID)
&& !uuid.equals(BASE_SERIALIZED_UUID)) {
String reason = String.format(Locale.getDefault(), "Could not deserialize ATN with UUID %s (expected %s or a legacy UUID).", uuid, SERIALIZED_UUID);
throw new UnsupportedOperationException(new InvalidClassException(ATN.class.getName(), reason));
}
boolean supportsPrecedencePredicates = isFeatureSupported(ADDED_PRECEDENCE_TRANSITIONS, uuid);
ATNType grammarType = ATNType.values()[toInt(data[p++])];
int maxTokenType = toInt(data[p++]);
ATN atn = new ATN(grammarType, maxTokenType);
@ -189,6 +239,14 @@ public abstract class ATNSimulator {
((DecisionState)atn.states.get(stateNumber)).nonGreedy = true;
}
if (supportsPrecedencePredicates) {
int numPrecedenceStates = toInt(data[p++]);
for (int i = 0; i < numPrecedenceStates; i++) {
int stateNumber = toInt(data[p++]);
((RuleStartState)atn.states.get(stateNumber)).isPrecedenceRule = true;
}
}
//
// RULES
//
@ -452,11 +510,13 @@ public abstract class ATNSimulator {
return new RangeTransition(target, arg1, arg2);
}
case Transition.RULE :
RuleTransition rt = new RuleTransition((RuleStartState)atn.states.get(arg1), arg2, target);
RuleTransition rt = new RuleTransition((RuleStartState)atn.states.get(arg1), arg2, arg3, target);
return rt;
case Transition.PREDICATE :
PredicateTransition pt = new PredicateTransition(target, arg1, arg2, arg3 != 0);
return pt;
case Transition.PRECEDENCE:
return new PrecedencePredicateTransition(target, arg1);
case Transition.ATOM :
if (arg3 != 0) {
return new AtomTransition(target, Token.EOF);

View File

@ -0,0 +1,43 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.runtime.atn;
/**
*
* @author Sam Harwell
*/
public abstract class AbstractPredicateTransition extends Transition {
public AbstractPredicateTransition(ATNState target) {
super(target);
}
}

View File

@ -234,7 +234,7 @@ public class LL1Analyzer {
calledRuleStack.clear(((RuleTransition)t).target.ruleIndex);
}
}
else if ( t instanceof PredicateTransition ) {
else if ( t instanceof AbstractPredicateTransition ) {
if ( seeThruPreds ) {
_LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF);
}

View File

@ -482,6 +482,9 @@ public class LexerATNSimulator extends ATNSimulator {
c = new LexerATNConfig(config, t.target, newContext);
break;
case Transition.PRECEDENCE:
throw new UnsupportedOperationException("Precedence predicates are not supported in lexers.");
case Transition.PREDICATE:
/* Track traversing semantic predicates. If we traverse,
we cannot add a DFA state for this "reach" computation

View File

@ -1184,6 +1184,9 @@ public class ParserATNSimulator extends ATNSimulator {
case Transition.RULE:
return ruleTransition(config, (RuleTransition)t);
case Transition.PRECEDENCE:
return precedenceTransition(config, (PrecedencePredicateTransition)t, collectPredicates, inContext, fullCtx);
case Transition.PREDICATE:
return predTransition(config, (PredicateTransition)t,
collectPredicates,
@ -1207,6 +1210,52 @@ public class ParserATNSimulator extends ATNSimulator {
return new ATNConfig(config, t.target);
}
@Nullable
public ATNConfig precedenceTransition(@NotNull ATNConfig config,
@NotNull PrecedencePredicateTransition pt,
boolean collectPredicates,
boolean inContext,
boolean fullCtx)
{
if ( debug ) {
System.out.println("PRED (collectPredicates="+collectPredicates+") "+
pt.precedence+">=_p"+
", ctx dependent=true");
if ( parser != null ) {
System.out.println("context surrounding pred is "+
parser.getRuleInvocationStack());
}
}
ATNConfig c = null;
if (collectPredicates && inContext) {
if ( fullCtx ) {
// In full context mode, we can evaluate predicates on-the-fly
// during closure, which dramatically reduces the size of
// the config sets. It also obviates the need to test predicates
// later during conflict resolution.
int currentPosition = _input.index();
_input.seek(_startIndex);
boolean predSucceeds = pt.getPredicate().eval(parser, _outerContext);
_input.seek(currentPosition);
if ( predSucceeds ) {
c = new ATNConfig(config, pt.target); // no pred context
}
}
else {
SemanticContext newSemCtx =
SemanticContext.and(config.semanticContext, pt.getPredicate());
c = new ATNConfig(config, pt.target, newSemCtx);
}
}
else {
c = new ATNConfig(config, pt.target);
}
if ( debug ) System.out.println("config from pred transition="+c);
return c;
}
@Nullable
protected ATNConfig predTransition(@NotNull ATNConfig config,
@NotNull PredicateTransition pt,

View File

@ -0,0 +1,71 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.runtime.atn;
import org.antlr.v4.runtime.misc.NotNull;
/**
*
* @author Sam Harwell
*/
public final class PrecedencePredicateTransition extends AbstractPredicateTransition {
public final int precedence;
public PrecedencePredicateTransition(@NotNull ATNState target, int precedence) {
super(target);
this.precedence = precedence;
}
@Override
public int getSerializationType() {
return PRECEDENCE;
}
@Override
public boolean isEpsilon() {
return true;
}
@Override
public boolean matches(int symbol, int minVocabSymbol, int maxVocabSymbol) {
return false;
}
public SemanticContext.PrecedencePredicate getPredicate() {
return new SemanticContext.PrecedencePredicate(precedence);
}
@Override
public String toString() {
return precedence + " >= _p";
}
}

View File

@ -38,7 +38,7 @@ import org.antlr.v4.runtime.misc.NotNull;
* may have to combine a bunch of them as it collects predicates from
* multiple ATN configurations into a single DFA state.
*/
public final class PredicateTransition extends Transition {
public final class PredicateTransition extends AbstractPredicateTransition {
public final int ruleIndex;
public final int predIndex;
public final boolean isCtxDependent; // e.g., $i ref in pred

View File

@ -32,6 +32,7 @@ package org.antlr.v4.runtime.atn;
public final class RuleStartState extends ATNState {
public RuleStopState stopState;
public boolean isPrecedenceRule;
@Override
public int getStateType() {

View File

@ -37,16 +37,32 @@ public final class RuleTransition extends Transition {
/** Ptr to the rule definition object for this rule ref */
public final int ruleIndex; // no Rule object at runtime
public final int precedence;
/** What node to begin computations following ref to rule */
@NotNull
public ATNState followState;
/**
* @deprecated Use
* {@link #RuleTransition(RuleStartState, int, int, ATNState)} instead.
*/
@Deprecated
public RuleTransition(@NotNull RuleStartState ruleStart,
int ruleIndex,
@NotNull ATNState followState)
{
this(ruleStart, ruleIndex, 0, followState);
}
public RuleTransition(@NotNull RuleStartState ruleStart,
int ruleIndex,
int precedence,
@NotNull ATNState followState)
{
super(ruleStart);
this.ruleIndex = ruleIndex;
this.precedence = precedence;
this.followState = followState;
}

View File

@ -36,8 +36,13 @@ import org.antlr.v4.runtime.misc.MurmurHash;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Utils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/** A tree structure used to record the semantic context in which
@ -116,6 +121,54 @@ public abstract class SemanticContext {
}
}
public static class PrecedencePredicate extends SemanticContext implements Comparable<PrecedencePredicate> {
public final int precedence;
protected PrecedencePredicate() {
this.precedence = 0;
}
public PrecedencePredicate(int precedence) {
this.precedence = precedence;
}
@Override
public boolean eval(Recognizer<?, ?> parser, RuleContext outerContext) {
return parser.precpred(outerContext, precedence);
}
@Override
public int compareTo(PrecedencePredicate o) {
return precedence - o.precedence;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = 31 * hashCode + precedence;
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof PrecedencePredicate)) {
return false;
}
if (this == obj) {
return true;
}
PrecedencePredicate other = (PrecedencePredicate)obj;
return this.precedence == other.precedence;
}
@Override
public String toString() {
return super.toString();
}
}
public static class AND extends SemanticContext {
@NotNull public final SemanticContext[] opnds;
@ -126,6 +179,13 @@ public abstract class SemanticContext {
if ( b instanceof AND ) operands.addAll(Arrays.asList(((AND)b).opnds));
else operands.add(b);
List<PrecedencePredicate> precedencePredicates = filterPrecedencePredicates(operands);
if (!precedencePredicates.isEmpty()) {
// interested in the transition with the lowest precedence
PrecedencePredicate reduced = Collections.min(precedencePredicates);
operands.add(reduced);
}
opnds = operands.toArray(new SemanticContext[operands.size()]);
}
@ -166,6 +226,13 @@ public abstract class SemanticContext {
if ( b instanceof OR ) operands.addAll(Arrays.asList(((OR)b).opnds));
else operands.add(b);
List<PrecedencePredicate> precedencePredicates = filterPrecedencePredicates(operands);
if (!precedencePredicates.isEmpty()) {
// interested in the transition with the highest precedence
PrecedencePredicate reduced = Collections.max(precedencePredicates);
operands.add(reduced);
}
this.opnds = operands.toArray(new SemanticContext[operands.size()]);
}
@ -222,4 +289,25 @@ public abstract class SemanticContext {
return result;
}
private static List<PrecedencePredicate> filterPrecedencePredicates(Collection<? extends SemanticContext> collection) {
ArrayList<PrecedencePredicate> result = null;
for (Iterator<? extends SemanticContext> iterator = collection.iterator(); iterator.hasNext(); ) {
SemanticContext context = iterator.next();
if (context instanceof PrecedencePredicate) {
if (result == null) {
result = new ArrayList<PrecedencePredicate>();
}
result.add((PrecedencePredicate)context);
iterator.remove();
}
}
if (result == null) {
return Collections.emptyList();
}
return result;
}
}

View File

@ -63,6 +63,7 @@ public abstract class Transition {
public static final int SET = 7; // ~(A|B) or ~atom, wildcard, which convert to next 2
public static final int NOT_SET = 8;
public static final int WILDCARD = 9;
public static final int PRECEDENCE = 10;
public static final List<String> serializationNames =
@ -76,7 +77,8 @@ public abstract class Transition {
"ACTION",
"SET",
"NOT_SET",
"WILDCARD"
"WILDCARD",
"PRECEDENCE"
));
public static final Map<Class<? extends Transition>, Integer> serializationTypes =
@ -90,6 +92,7 @@ public abstract class Transition {
put(SetTransition.class, SET);
put(NotSetTransition.class, NOT_SET);
put(WildcardTransition.class, WILDCARD);
put(PrecedencePredicateTransition.class, PRECEDENCE);
}});
/** The target of this transition. */

View File

@ -38,7 +38,9 @@ import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class Utils {
// Seriously: why isn't this built in to java? ugh!
@ -133,4 +135,15 @@ public class Utils {
t.join();
}
/** Convert array of strings to string->index map. Useful for
* converting rulenames to name->ruleindex map.
*/
public static Map<String, Integer> toMap(String[] keys) {
Map<String, Integer> m = new HashMap<String, Integer>();
for (int i=0; i<keys.length; i++) {
m.put(keys[i], i);
}
return m;
}
}

View File

@ -49,16 +49,16 @@ public interface ParseTree extends SyntaxTree {
ParseTree getChild(int i);
/** The {@link ParseTreeVisitor} needs a double dispatch method. */
public <T> T accept(ParseTreeVisitor<? extends T> visitor);
<T> T accept(ParseTreeVisitor<? extends T> visitor);
/** Return the combined text of all leaf nodes. Does not get any
* off-channel tokens (if any) so won't return whitespace and
* comments if they are sent to parser on hidden channel.
*/
public String getText();
String getText();
/** Specialize toStringTree so that it can print out more information
* based upon the parser.
*/
public String toStringTree(Parser parser);
String toStringTree(Parser parser);
}

View File

@ -34,6 +34,8 @@ import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.misc.Interval;
import java.util.List;
public class TerminalNodeImpl implements TerminalNode {
public Token symbol;
public ParseTree parent;

View File

@ -33,6 +33,9 @@ package org.antlr.v4.runtime.tree;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.Token;
import java.util.Collection;
import java.util.List;
/** The basic notion of a tree has a parent, a payload, and a list of children.
* It is the most abstract interface for all the trees used by ANTLR.
*/

View File

@ -31,6 +31,7 @@
package org.antlr.v4.runtime.tree;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
@ -42,6 +43,7 @@ import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
@ -152,6 +154,16 @@ public class Trees {
return t.getPayload().toString();
}
/** Return ordered list of all children of this node */
public static List<Tree> getChildren(Tree t) {
List<Tree> kids = new ArrayList<Tree>();
for (int i=0; i<t.getChildCount(); i++) {
kids.add(t.getChild(i));
}
return kids;
}
/** Return a list of all ancestors of this node. The first node of
* list is the root and the last is the parent of this node.
*/
@ -167,6 +179,49 @@ public class Trees {
return ancestors;
}
public static Collection<ParseTree> findAllTokenNodes(ParseTree t, int ttype) {
return findAllNodes(t, ttype, true);
}
public static Collection<ParseTree> findAllRuleNodes(ParseTree t, int ruleIndex) {
return findAllNodes(t, ruleIndex, false);
}
public static List<ParseTree> findAllNodes(ParseTree t, int index, boolean findTokens) {
List<ParseTree> nodes = new ArrayList<ParseTree>();
_findAllNodes(t, index, findTokens, nodes);
return nodes;
}
public static void _findAllNodes(ParseTree t, int index, boolean findTokens,
List<? super ParseTree> nodes)
{
// check this node (the root) first
if ( findTokens && t instanceof TerminalNode ) {
TerminalNode tnode = (TerminalNode)t;
if ( tnode.getSymbol().getType()==index ) nodes.add(t);
}
else if ( !findTokens && t instanceof ParserRuleContext ) {
ParserRuleContext ctx = (ParserRuleContext)t;
if ( ctx.getRuleIndex() == index ) nodes.add(t);
}
// check children
for (int i = 0; i < t.getChildCount(); i++){
_findAllNodes(t.getChild(i), index, findTokens, nodes);
}
}
public static List<ParseTree> descendants(ParseTree t){
List<ParseTree> nodes = new ArrayList<ParseTree>();
nodes.add(t);
int n = t.getChildCount();
for (int i = 0 ; i < n ; i++){
nodes.addAll(descendants(t.getChild(i)));
}
return nodes;
}
private Trees() {
}
}

View File

@ -0,0 +1,206 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.LexerNoViableAltException;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.misc.Utils;
import org.antlr.v4.runtime.tree.ParseTree;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/** Represent a subset of XPath XML path syntax for use in identifying nodes in
* parse trees.
*
* Split path into words and separators / and // via ANTLR itself then walk
* path elements from left to right. At each separator-word pair, find set
* of nodes. Next stage uses those as work list.
*
* The basic interface is ParseTree.findAll(parser, pathString). But that is
* just shorthand for:
*
* XPath p = new XPath(parser, xpath);
* return p.evaluate(this);
*
* See {@link org.antlr.v4.test.TestXPath} for descriptions. In short, this allows
* operators:
*
* / root
* // anywhere
* ! invert; this must appear directly after root or anywhere operator
*
* and path elements:
*
* ID token name
* 'string' any string literal token from the grammar
* expr rule name
* * wildcard matching any node
*
* Whitespace is not allowed.
*/
public class XPath {
public static final String WILDCARD = "*"; // word not operator/separator
public static final String NOT = "!"; // word for invert operator
protected String path;
protected XPathElement[] elements;
protected Parser parser;
public XPath(Parser parser, String path) {
this.parser = parser;
this.path = path;
elements = split(path);
// System.out.println(Arrays.toString(elements));
}
// TODO: check for invalid token/rule names, bad syntax
public XPathElement[] split(String path) {
ANTLRInputStream in;
try {
in = new ANTLRInputStream(new StringReader(path));
}
catch (IOException ioe) {
throw new IllegalArgumentException("Could not read path: "+path, ioe);
}
XPathLexer lexer = new XPathLexer(in) {
public void recover(LexerNoViableAltException e) { throw e; }
};
lexer.removeErrorListeners();
lexer.addErrorListener(new XPathLexerErrorListener());
CommonTokenStream tokenStream = new CommonTokenStream(lexer);
try {
tokenStream.fill();
}
catch (LexerNoViableAltException e) {
int pos = lexer.getCharPositionInLine();
String msg = "Invalid tokens or characters at index "+pos+" in path '"+path+"'";
throw new IllegalArgumentException(msg, e);
}
List<Token> tokens = tokenStream.getTokens();
// System.out.println("path="+path+"=>"+tokens);
List<XPathElement> elements = new ArrayList<XPathElement>();
int n = tokens.size();
int i=0;
loop:
while ( i<n ) {
Token el = tokens.get(i);
Token next = null;
switch ( el.getType() ) {
case XPathLexer.ROOT :
case XPathLexer.ANYWHERE :
boolean anywhere = el.getType() == XPathLexer.ANYWHERE;
i++;
next = tokens.get(i);
boolean invert = next.getType()==XPathLexer.BANG;
if ( invert ) {
i++;
next = tokens.get(i);
}
XPathElement pathElement = getXPathElement(next, anywhere);
pathElement.invert = invert;
elements.add(pathElement);
i++;
break;
case XPathLexer.TOKEN_REF :
case XPathLexer.RULE_REF :
case XPathLexer.WILDCARD :
elements.add( getXPathElement(el, false) );
i++;
break;
case Token.EOF :
break loop;
default :
throw new IllegalArgumentException("Unknowth path element "+el);
}
}
return elements.toArray(new XPathElement[0]);
}
/** Convert word like * or ID or expr to a path element. anywhere is true
* if // precedes the word.
*/
protected XPathElement getXPathElement(Token wordToken, boolean anywhere) {
if ( wordToken.getType()==Token.EOF ) {
throw new IllegalArgumentException("Missing path element at end of path");
}
String word = wordToken.getText();
Map<String, Integer> ruleIndexes = Utils.toMap(parser.getRuleNames());
Map<String, Integer> tokenTypes = Utils.toMap(parser.getTokenNames());
Integer ttype = tokenTypes.get(word);
Integer ruleIndex = ruleIndexes.get(word);
switch ( wordToken.getType() ) {
case XPathLexer.WILDCARD :
return anywhere ?
new XPathWildcardAnywhereElement() :
new XPathWildcardElement();
case XPathLexer.TOKEN_REF :
case XPathLexer.STRING :
if ( ttype==null ) {
throw new IllegalArgumentException(word+
" at index "+
wordToken.getStartIndex()+
" isn't a valid token name");
}
return anywhere ?
new XPathTokenAnywhereElement(word, ttype) :
new XPathTokenElement(word, ttype);
default :
if ( ruleIndex==null ) {
throw new IllegalArgumentException(word+
" at index "+
wordToken.getStartIndex()+
" isn't a valid rule name");
}
return anywhere ?
new XPathRuleAnywhereElement(word, ruleIndex) :
new XPathRuleElement(word, ruleIndex);
}
}
public static Collection<ParseTree> findAll(ParseTree tree, String xpath, Parser parser) {
XPath p = new XPath(parser, xpath);
return p.evaluate(tree);
}
/** Return a list of all nodes starting at t as root that satisfy the path.
* The root / is relative to the node passed to evaluate().
*/
public Collection<ParseTree> evaluate(final ParseTree t) {
ParserRuleContext dummyRoot = new ParserRuleContext();
dummyRoot.children = new ArrayList<ParseTree>() {{add(t);}}; // don't set t's parent.
Collection<ParseTree> work = new ArrayList<ParseTree>();
work.add(dummyRoot);
int i = 0;
while ( i < elements.length ) {
Collection<ParseTree> next = new ArrayList<ParseTree>();
for (ParseTree node : work) {
if ( node.getChildCount()>0 ) {
// only try to match next element if it has children
// e.g., //func/*/stat might have a token node for which
// we can't go looking for stat nodes.
Collection<? extends ParseTree> matching = elements[i].evaluate(node);
next.addAll(matching);
}
}
i++;
work = next;
}
return work;
}
}

View File

@ -0,0 +1,26 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import java.util.Collection;
public abstract class XPathElement {
protected String nodeName;
protected boolean invert;
/** Construct element like /ID or or ID or "/*" etc...
* op is null if just node
*/
public XPathElement(String nodeName) {
this.nodeName = nodeName;
}
/** Given tree rooted at t return all nodes matched by this path element */
public abstract Collection<ParseTree> evaluate(ParseTree t);
@Override
public String toString() {
String inv = invert ? "!" : "";
return getClass().getSimpleName()+"["+inv+nodeName+"]";
}
}

View File

@ -0,0 +1,63 @@
lexer grammar XPathLexer;
tokens { TOKEN_REF, RULE_REF }
/*
path : separator? word (separator word)* EOF ;
separator
: '/' '!'
| '//' '!'
| '/'
| '//'
;
word: TOKEN_REF
| RULE_REF
| STRING
| '*'
;
*/
ANYWHERE : '//' ;
ROOT : '/' ;
WILDCARD : '*' ;
BANG : '!' ;
ID : NameStartChar NameChar*
{
String text = getText();
if ( Character.isUpperCase(text.charAt(0)) ) setType(TOKEN_REF);
else setType(RULE_REF);
}
;
fragment
NameChar : NameStartChar
| '0'..'9'
| '_'
| '\u00B7'
| '\u0300'..'\u036F'
| '\u203F'..'\u2040'
;
fragment
NameStartChar
: 'A'..'Z' | 'a'..'z'
| '\u00C0'..'\u00D6'
| '\u00D8'..'\u00F6'
| '\u00F8'..'\u02FF'
| '\u0370'..'\u037D'
| '\u037F'..'\u1FFF'
| '\u200C'..'\u200D'
| '\u2070'..'\u218F'
| '\u2C00'..'\u2FEF'
| '\u3001'..'\uD7FF'
| '\uF900'..'\uFDCF'
| '\uFDF0'..'\uFFFD'
; // ignores | ['\u10000-'\uEFFFF] ;
STRING : '\'' .*? '\'' ;
//WS : [ \t\r\n]+ -> skip ;

View File

@ -0,0 +1,14 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.BaseErrorListener;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
public class XPathLexerErrorListener extends BaseErrorListener {
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol,
int line, int charPositionInLine, String msg,
RecognitionException e)
{
}
}

View File

@ -0,0 +1,20 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.Collection;
/** Either ID at start of path or ...//ID in middle of path */
public class XPathRuleAnywhereElement extends XPathElement {
protected int ruleIndex;
public XPathRuleAnywhereElement(String ruleName, int ruleIndex) {
super(ruleName);
this.ruleIndex = ruleIndex;
}
@Override
public Collection<ParseTree> evaluate(ParseTree t) {
return Trees.findAllRuleNodes(t, ruleIndex);
}
}

View File

@ -0,0 +1,35 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.Tree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class XPathRuleElement extends XPathElement {
protected int ruleIndex;
public XPathRuleElement(String ruleName, int ruleIndex) {
super(ruleName);
this.ruleIndex = ruleIndex;
}
@Override
public Collection<ParseTree> evaluate(ParseTree t) {
// return all children of t that match nodeName
List<ParseTree> nodes = new ArrayList<ParseTree>();
for (Tree c : Trees.getChildren(t)) {
if ( c instanceof ParserRuleContext ) {
ParserRuleContext ctx = (ParserRuleContext)c;
if ( (ctx.getRuleIndex() == ruleIndex && !invert) ||
(ctx.getRuleIndex() != ruleIndex && invert) )
{
nodes.add(ctx);
}
}
}
return nodes;
}
}

View File

@ -0,0 +1,19 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.Collection;
public class XPathTokenAnywhereElement extends XPathElement {
protected int tokenType;
public XPathTokenAnywhereElement(String tokenName, int tokenType) {
super(tokenName);
this.tokenType = tokenType;
}
@Override
public Collection<ParseTree> evaluate(ParseTree t) {
return Trees.findAllTokenNodes(t, tokenType);
}
}

View File

@ -0,0 +1,35 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.antlr.v4.runtime.tree.Tree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class XPathTokenElement extends XPathElement {
protected int tokenType;
public XPathTokenElement(String tokenName, int tokenType) {
super(tokenName);
this.tokenType = tokenType;
}
@Override
public Collection<ParseTree> evaluate(ParseTree t) {
// return all children of t that match nodeName
List<ParseTree> nodes = new ArrayList<ParseTree>();
for (Tree c : Trees.getChildren(t)) {
if ( c instanceof TerminalNode ) {
TerminalNode tnode = (TerminalNode)c;
if ( (tnode.getSymbol().getType() == tokenType && !invert) ||
(tnode.getSymbol().getType() != tokenType && invert) )
{
nodes.add(tnode);
}
}
}
return nodes;
}
}

View File

@ -0,0 +1,19 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.ArrayList;
import java.util.Collection;
public class XPathWildcardAnywhereElement extends XPathElement {
public XPathWildcardAnywhereElement() {
super(XPath.WILDCARD);
}
@Override
public Collection<ParseTree> evaluate(ParseTree t) {
if ( invert ) return new ArrayList<ParseTree>(); // !* is weird but valid (empty)
return Trees.descendants(t);
}
}

View File

@ -0,0 +1,26 @@
package org.antlr.v4.runtime.tree.xpath;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.Tree;
import org.antlr.v4.runtime.tree.Trees;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public class XPathWildcardElement extends XPathElement {
public XPathWildcardElement() {
super(XPath.WILDCARD);
}
@Override
public Collection<ParseTree> evaluate(final ParseTree t) {
if ( invert ) return new ArrayList<ParseTree>(); // !* is weird but valid (empty)
List<ParseTree> kids = new ArrayList<ParseTree>();
for (Tree c : Trees.getChildren(t)) {
kids.add((ParseTree)c);
}
return kids;
}
}

View File

@ -29,16 +29,16 @@
*/
/** How to generate rules derived from left-recursive rules.
* These rely on recRuleDefArg(), recRuleAltPredicate(),
* These rely on recRuleAltPredicate(),
* recRuleArg(), recRuleSetResultAction(), recRuleSetReturnAction()
* templates in main language.stg
*/
group LeftRecursiveRules;
recRule(ruleName, precArgDef, argName, primaryAlts, opAlts, setResultAction,
recRule(ruleName, argName, primaryAlts, opAlts, setResultAction,
userRetvals, leftRecursiveRuleRefLabels) ::=
<<
<ruleName>[<precArgDef>]<if(userRetvals)> returns [<userRetvals>]<endif>
<ruleName><if(userRetvals)> returns [<userRetvals>]<endif>
: ( {} <primaryAlts:{alt | <alt.altText> }; separator="\n | ">
)
(
@ -47,6 +47,6 @@ recRule(ruleName, precArgDef, argName, primaryAlts, opAlts, setResultAction,
;
>>
recRuleAlt(alt, pred) ::= <<
{<pred>}? <alt.altText>
recRuleAlt(alt, precOption, opPrec, pred) ::= <<
{<pred>}?\<<precOption>=<opPrec>\> <alt.altText>
>>

View File

@ -359,13 +359,13 @@ LeftRecursiveRuleFunction(currentRule,code,locals,ruleCtx,altLabelCtxs,
<ruleCtx>
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
<if(currentRule.modifiers)><currentRule.modifiers:{f | <f> }><else>public final <endif><currentRule.ctxType> <currentRule.name>(<currentRule.args; separator=",">) throws RecognitionException {
<if(currentRule.modifiers)><currentRule.modifiers:{f | <f> }><else>public final <endif><currentRule.ctxType> <currentRule.name>(int _p<currentRule.args:{a | , <a>}>) throws RecognitionException {
ParserRuleContext _parentctx = _ctx;
int _parentState = getState();
<currentRule.ctxType> _localctx = new <currentRule.ctxType>(_ctx, _parentState<currentRule.args:{a | , <a.name>}>);
<currentRule.ctxType> _prevctx = _localctx;
int _startState = <currentRule.startState>;
enterRecursionRule(_localctx, RULE_<currentRule.name>);
enterRecursionRule(_localctx, RULE_<currentRule.name>, _p);
<namedActions.init>
<locals; separator="\n">
try {
@ -594,7 +594,7 @@ cases(ttypes) ::= <<
>>
InvokeRule(r, argExprsChunks) ::= <<
setState(<r.stateNumber>); <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<argExprsChunks>);
setState(<r.stateNumber>); <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif><r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>);
>>
MatchToken(m) ::= <<
@ -792,21 +792,18 @@ labelref(x) ::= "<if(!x.isLocal)>((<x.ctx.name>)_localctx).<endif><x.name>"
ctx(actionChunk) ::= "((<actionChunk.ctx.name>)_localctx)"
// used for left-recursive rules
recRuleDefArg() ::= "int _p"
recRuleArg() ::= "$_p"
recRuleAltPredicate(ruleName,opPrec) ::= "<opPrec> >= <recRuleArg()>"
recRuleSetResultAction() ::= "$tree=$<ruleName>.tree;"
recRuleAltPredicate(ruleName,opPrec) ::= "precpred(_ctx, <opPrec>)"
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>;"
recRuleSetStopToken() ::= "_ctx.stop = _input.LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
_localctx = new <ctxName>Context(_parentctx, _parentState, _p);
_localctx = new <ctxName>Context(_parentctx, _parentState);
<if(label)>_localctx.<label> = _prevctx;<endif>
pushNewRecursionContext(_localctx, _startState, RULE_<ruleName>);
>>
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label) ::= <<
_localctx = new <currentAltLabel; format="cap">Context(new <ruleName; format="cap">Context(_parentctx, _parentState, _p));
_localctx = new <currentAltLabel; format="cap">Context(new <ruleName; format="cap">Context(_parentctx, _parentState));
<if(label)>((<currentAltLabel; format="cap">Context)_localctx).<label> = _prevctx;<endif>
pushNewRecursionContext(_localctx, _startState, RULE_<ruleName>);
>>

View File

@ -36,6 +36,7 @@ import org.antlr.runtime.tree.CommonTreeNodeStream;
import org.antlr.runtime.tree.Tree;
import org.antlr.v4.Tool;
import org.antlr.v4.codegen.CodeGenerator;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.LeftRecursiveRuleWalker;
import org.antlr.v4.runtime.misc.IntervalSet;
@ -221,8 +222,6 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
public String getArtificialOpPrecRule() {
ST ruleST = recRuleTemplates.getInstanceOf("recRule");
ruleST.add("ruleName", ruleName);
ST argDefST = codegenTemplates.getInstanceOf("recRuleDefArg");
ruleST.add("precArgDef", argDefST);
ST ruleArgST = codegenTemplates.getInstanceOf("recRuleArg");
ruleST.add("argName", ruleArgST);
ST setResultST = codegenTemplates.getInstanceOf("recRuleSetResultAction");
@ -241,6 +240,8 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
predST.add("ruleName", ruleName);
altST.add("pred", predST);
altST.add("alt", altInfo);
altST.add("precOption", LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME);
altST.add("opPrec", precedence(alt));
ruleST.add("opAlts", altST);
}
@ -260,7 +261,7 @@ public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
boolean recursive = rref.getText().equals(ruleName);
boolean rightmost = rref == outerAltRuleRefs.get(outerAltRuleRefs.size()-1);
if ( recursive && rightmost ) {
rref.setText(ruleName+"["+prec+"]");
rref.setText(ruleName+"<"+LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME+"="+prec+">");
}
}
return t;

View File

@ -43,6 +43,8 @@ import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.ScopeParser;
import org.antlr.v4.parse.ToolANTLRParser;
import org.antlr.v4.runtime.misc.Pair;
import org.antlr.v4.semantics.BasicSemanticChecks;
import org.antlr.v4.semantics.RuleCollector;
import org.antlr.v4.tool.AttributeDict;
import org.antlr.v4.tool.ErrorType;
import org.antlr.v4.tool.Grammar;
@ -54,6 +56,7 @@ import org.antlr.v4.tool.ast.ActionAST;
import org.antlr.v4.tool.ast.AltAST;
import org.antlr.v4.tool.ast.BlockAST;
import org.antlr.v4.tool.ast.GrammarAST;
import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.antlr.v4.tool.ast.GrammarRootAST;
import org.antlr.v4.tool.ast.RuleAST;
@ -67,6 +70,8 @@ import java.util.List;
* MODIFIES grammar AST in place.
*/
public class LeftRecursiveRuleTransformer {
public static final String PRECEDENCE_OPTION_NAME = "p";
public GrammarRootAST ast;
public Collection<Rule> rules;
public Grammar g;
@ -95,11 +100,10 @@ public class LeftRecursiveRuleTransformer {
// update all refs to recursive rules to have [0] argument
for (GrammarAST r : ast.getNodesWithType(ANTLRParser.RULE_REF)) {
if ( r.getParent().getType()==ANTLRParser.RULE ) continue; // must be rule def
if ( r.getChildCount()>0 ) continue; // already has arg; must be in rewritten rule
if ( ((GrammarASTWithOptions)r).getOptionString(PRECEDENCE_OPTION_NAME) != null ) continue; // already has arg; must be in rewritten rule
if ( leftRecursiveRuleNames.contains(r.getText()) ) {
// found ref to recursive rule not already rewritten with arg
ActionAST arg = new ActionAST(new CommonToken(ANTLRParser.ARG_ACTION, "0"));
r.addChild(arg);
((GrammarASTWithOptions)r).setOption(PRECEDENCE_OPTION_NAME, (GrammarAST)new GrammarASTAdaptor().create(ANTLRParser.INT, "0"));
}
}
}
@ -144,6 +148,12 @@ public class LeftRecursiveRuleTransformer {
transform.reduceBlocksToSets(r.ast);
transform.expandParameterizedLoops(r.ast);
// Rerun semantic checks on the new rule
RuleCollector ruleCollector = new RuleCollector(g);
ruleCollector.visit(t, "rule");
BasicSemanticChecks basics = new BasicSemanticChecks(g, ruleCollector);
basics.visit(t, "rule");
// track recursive alt info for codegen
r.recPrimaryAlts = new ArrayList<LeftRecursiveRuleAltInfo>();
r.recPrimaryAlts.addAll(leftRecursiveRuleWalker.prefixAlts);

View File

@ -42,8 +42,10 @@ import org.antlr.v4.runtime.atn.AtomTransition;
import org.antlr.v4.runtime.atn.BlockStartState;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.LoopEndState;
import org.antlr.v4.runtime.atn.PrecedencePredicateTransition;
import org.antlr.v4.runtime.atn.PredicateTransition;
import org.antlr.v4.runtime.atn.RangeTransition;
import org.antlr.v4.runtime.atn.RuleStartState;
import org.antlr.v4.runtime.atn.RuleTransition;
import org.antlr.v4.runtime.atn.SetTransition;
import org.antlr.v4.runtime.atn.Transition;
@ -119,6 +121,7 @@ public class ATNSerializer {
// dump states, count edges and collect sets while doing so
IntegerList nonGreedyStates = new IntegerList();
IntegerList precedenceStates = new IntegerList();
data.add(atn.states.size());
for (ATNState s : atn.states) {
if ( s==null ) { // might be optimized away
@ -131,6 +134,10 @@ public class ATNSerializer {
nonGreedyStates.add(s.stateNumber);
}
if (s instanceof RuleStartState && ((RuleStartState)s).isPrecedenceRule) {
precedenceStates.add(s.stateNumber);
}
data.add(stateType);
if (s.ruleIndex == -1) {
@ -171,6 +178,12 @@ public class ATNSerializer {
data.add(nonGreedyStates.get(i));
}
// precedence states
data.add(precedenceStates.size());
for (int i = 0; i < precedenceStates.size(); i++) {
data.add(precedenceStates.get(i));
}
int nrules = atn.ruleToStartState.length;
data.add(nrules);
for (int r=0; r<nrules; r++) {
@ -260,6 +273,11 @@ public class ATNSerializer {
trg = ((RuleTransition)t).followState.stateNumber;
arg1 = ((RuleTransition)t).target.stateNumber;
arg2 = ((RuleTransition)t).ruleIndex;
arg3 = ((RuleTransition)t).precedence;
break;
case Transition.PRECEDENCE:
PrecedencePredicateTransition ppt = (PrecedencePredicateTransition)t;
arg1 = ppt.precedence;
break;
case Transition.PREDICATE :
PredicateTransition pt = (PredicateTransition)t;
@ -382,6 +400,10 @@ public class ATNSerializer {
for (int i = 0; i < numNonGreedyStates; i++) {
int stateNumber = ATNSimulator.toInt(data[p++]);
}
int numPrecedenceStates = ATNSimulator.toInt(data[p++]);
for (int i = 0; i < numPrecedenceStates; i++) {
int stateNumber = ATNSimulator.toInt(data[p++]);
}
int nrules = ATNSimulator.toInt(data[p++]);
for (int i=0; i<nrules; i++) {
int s = ATNSimulator.toInt(data[p++]);

View File

@ -35,6 +35,7 @@ import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.Token;
import org.antlr.runtime.tree.CommonTreeNodeStream;
import org.antlr.runtime.tree.Tree;
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
import org.antlr.v4.misc.CharSupport;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.ATNBuilder;
@ -42,6 +43,7 @@ import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.ATNType;
import org.antlr.v4.runtime.atn.AbstractPredicateTransition;
import org.antlr.v4.runtime.atn.ActionTransition;
import org.antlr.v4.runtime.atn.AtomTransition;
import org.antlr.v4.runtime.atn.BasicBlockStartState;
@ -54,6 +56,7 @@ import org.antlr.v4.runtime.atn.LoopEndState;
import org.antlr.v4.runtime.atn.NotSetTransition;
import org.antlr.v4.runtime.atn.PlusBlockStartState;
import org.antlr.v4.runtime.atn.PlusLoopbackState;
import org.antlr.v4.runtime.atn.PrecedencePredicateTransition;
import org.antlr.v4.runtime.atn.PredicateTransition;
import org.antlr.v4.runtime.atn.RuleStartState;
import org.antlr.v4.runtime.atn.RuleStopState;
@ -79,6 +82,7 @@ import org.antlr.v4.tool.ast.ActionAST;
import org.antlr.v4.tool.ast.AltAST;
import org.antlr.v4.tool.ast.BlockAST;
import org.antlr.v4.tool.ast.GrammarAST;
import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.antlr.v4.tool.ast.PredAST;
import org.antlr.v4.tool.ast.QuantifierAST;
import org.antlr.v4.tool.ast.TerminalAST;
@ -297,7 +301,11 @@ public class ParserATNFactory implements ATNFactory {
RuleStartState start = atn.ruleToStartState[r.index];
ATNState left = newState(node);
ATNState right = newState(node);
RuleTransition call = new RuleTransition(start, r.index, right);
int precedence = 0;
if (((GrammarASTWithOptions)node).getOptionString(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME) != null) {
precedence = Integer.parseInt(((GrammarASTWithOptions)node).getOptionString(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME));
}
RuleTransition call = new RuleTransition(start, r.index, precedence, right);
left.addTransition(call);
node.atnState = left;
@ -330,8 +338,17 @@ public class ParserATNFactory implements ATNFactory {
//System.out.println("sempred: "+ pred);
ATNState left = newState(pred);
ATNState right = newState(pred);
boolean isCtxDependent = UseDefAnalyzer.actionIsContextDependent(pred);
PredicateTransition p = new PredicateTransition(right, currentRule.index, g.sempreds.get(pred), isCtxDependent);
AbstractPredicateTransition p;
if (pred.getOptionString(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME) != null) {
int precedence = Integer.parseInt(pred.getOptionString(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME));
p = new PrecedencePredicateTransition(right, precedence);
}
else {
boolean isCtxDependent = UseDefAnalyzer.actionIsContextDependent(pred);
p = new PredicateTransition(right, currentRule.index, g.sempreds.get(pred), isCtxDependent);
}
left.addTransition(p);
pred.atnState = left;
return new Handle(left, right);
@ -627,6 +644,7 @@ public class ParserATNFactory implements ATNFactory {
RuleStartState start = newState(RuleStartState.class, r.ast);
RuleStopState stop = newState(RuleStopState.class, r.ast);
start.stopState = stop;
start.isPrecedenceRule = r instanceof LeftRecursiveRule;
start.setRuleIndex(r.index);
stop.setRuleIndex(r.index);
atn.ruleToStartState[r.index] = start;

View File

@ -170,7 +170,7 @@ atom[GrammarAST label, boolean invert] returns [List<SrcOp> omos]
;
ruleref[GrammarAST label] returns [List<SrcOp> omos]
: ^(RULE_REF ARG_ACTION?) {$omos = controller.ruleRef($RULE_REF, $label, $ARG_ACTION);}
: ^(RULE_REF ARG_ACTION? elementOptions?) {$omos = controller.ruleRef($RULE_REF, $label, $ARG_ACTION);}
;
range[GrammarAST label] returns [List<SrcOp> omos]

View File

@ -83,8 +83,9 @@ public class InvokeRule extends RuleElement implements LabeledOp {
rf.addContextDecl(ast.getAltLabel(), d);
}
}
if ( ast.getChildCount()>0 ) {
ActionAST arg = (ActionAST)ast.getChild(0);
ActionAST arg = (ActionAST)ast.getFirstChildWithType(ANTLRParser.ARG_ACTION);
if ( arg != null ) {
argExprsChunks = ActionTranslator.translateAction(factory, rf, arg.token, arg);
}

View File

@ -852,7 +852,13 @@ if ( options!=null ) {
// directive to become the root node or ignore the tree produced
//
ruleref
: RULE_REF ARG_ACTION? -> ^(RULE_REF<RuleRefAST> ARG_ACTION<ActionAST>?)
@after {
GrammarAST options = (GrammarAST)$tree.getFirstChildWithType(ANTLRParser.ELEMENT_OPTIONS);
if ( options!=null ) {
Grammar.setNodeOptions($tree, options);
}
}
: RULE_REF ARG_ACTION? elementOptions? -> ^(RULE_REF<RuleRefAST> ARG_ACTION<ActionAST>? elementOptions?)
;
catch [RecognitionException re] { throw re; } // pass upwards to element

View File

@ -182,7 +182,9 @@ atom returns [ATNFactory.Handle p]
;
ruleref returns [ATNFactory.Handle p]
: ^(RULE_REF ARG_ACTION?) {$p = factory.ruleRef($RULE_REF);}
: ^(RULE_REF ARG_ACTION? ^(ELEMENT_OPTIONS .*)) {$p = factory.ruleRef($RULE_REF);}
| ^(RULE_REF ARG_ACTION?) {$p = factory.ruleRef($RULE_REF);}
| RULE_REF {$p = factory.ruleRef($RULE_REF);}
;
range returns [ATNFactory.Handle p]

View File

@ -939,7 +939,7 @@ ruleref
@after {
exitRuleref($start);
}
: ^(RULE_REF arg=ARG_ACTION?)
: ^(RULE_REF arg=ARG_ACTION? elementOptions?)
{
ruleRef($RULE_REF, (ActionAST)$ARG_ACTION);
if ( $arg!=null ) actionInAlt((ActionAST)$arg);

View File

@ -204,7 +204,7 @@ alternative
;
atom
: ^(RULE_REF ARG_ACTION?)
: ^(RULE_REF ARG_ACTION? elementOptions?)
| ^(STRING_LITERAL elementOptions)
| STRING_LITERAL
| ^(TOKEN_REF elementOptions)

View File

@ -501,6 +501,20 @@ public class BasicSemanticChecks extends GrammarTreeVisitor {
return false;
}
boolean checkRuleRefOptions(TerminalAST elem, GrammarAST ID, GrammarAST valueAST) {
Token optionID = ID.token;
String fileName = optionID.getInputStream().getSourceName();
// don't care about id<SimpleValue> options
if ( valueAST!=null && !Grammar.ruleRefOptions.contains(optionID.getText()) ) {
g.tool.errMgr.grammarError(ErrorType.ILLEGAL_OPTION,
fileName,
optionID,
optionID.getText());
return false;
}
// TODO: extra checks depending on rule kind?
return true;
}
boolean checkTokenOptions(TerminalAST elem, GrammarAST ID, GrammarAST valueAST) {
Token optionID = ID.token;

View File

@ -30,6 +30,7 @@
package org.antlr.v4.semantics;
import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.tool.Alternative;
@ -263,7 +264,7 @@ public class SymbolChecks {
for (GrammarAST ref : rulerefs) {
String ruleName = ref.getText();
Rule r = g.getRule(ruleName);
GrammarAST arg = (GrammarAST)ref.getChild(0);
GrammarAST arg = (GrammarAST)ref.getFirstChildWithType(ANTLRParser.ARG_ACTION);
if ( arg!=null && (r==null || r.args==null) ) {
errMgr.grammarError(ErrorType.RULE_HAS_NO_ARGS,
g.fileName, ref.token, ruleName);

View File

@ -33,6 +33,7 @@ package org.antlr.v4.tool;
import org.antlr.v4.misc.Utils;
import org.antlr.v4.runtime.atn.ATNConfig;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.AbstractPredicateTransition;
import org.antlr.v4.runtime.atn.ActionTransition;
import org.antlr.v4.runtime.atn.AtomTransition;
import org.antlr.v4.runtime.atn.BlockEndState;
@ -41,7 +42,6 @@ import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.NotSetTransition;
import org.antlr.v4.runtime.atn.PlusBlockStartState;
import org.antlr.v4.runtime.atn.PlusLoopbackState;
import org.antlr.v4.runtime.atn.PredicateTransition;
import org.antlr.v4.runtime.atn.RangeTransition;
import org.antlr.v4.runtime.atn.RuleStopState;
import org.antlr.v4.runtime.atn.RuleTransition;
@ -255,7 +255,7 @@ public class DOTGenerator {
edgeST = stlib.getInstanceOf("action-edge");
edgeST.add("label", getEdgeLabel(edge.toString()));
}
else if ( edge instanceof PredicateTransition ) {
else if ( edge instanceof AbstractPredicateTransition ) {
edgeST = stlib.getInstanceOf("edge");
edgeST.add("label", getEdgeLabel(edge.toString()));
}

View File

@ -30,14 +30,8 @@
package org.antlr.v4.tool;
import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.Tree;
import org.antlr.runtime.tree.TreeVisitor;
import org.antlr.runtime.tree.TreeVisitorAction;
import org.antlr.runtime.tree.TreeWizard;
import org.antlr.v4.Tool;
import org.antlr.v4.analysis.LeftRecursiveRuleTransformer;
import org.antlr.v4.misc.CharSupport;
import org.antlr.v4.misc.OrderedHashMap;
import org.antlr.v4.misc.Utils;
@ -45,8 +39,10 @@ import org.antlr.v4.parse.ANTLRParser;
import org.antlr.v4.parse.GrammarASTAdaptor;
import org.antlr.v4.parse.GrammarTreeVisitor;
import org.antlr.v4.parse.TokenVocabParser;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.IntSet;
@ -61,9 +57,12 @@ import org.antlr.v4.tool.ast.GrammarASTWithOptions;
import org.antlr.v4.tool.ast.GrammarRootAST;
import org.antlr.v4.tool.ast.PredAST;
import org.antlr.v4.tool.ast.TerminalAST;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.antlr.v4.tool.interp.ParserInterpreter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
@ -90,6 +89,12 @@ public class Grammar implements AttributeResolver {
public static final Set<String> LexerBlockOptions = new HashSet<String>();
/** Legal options for rule refs like id<key=value> */
public static final Set<String> ruleRefOptions = new HashSet<String>();
static {
ruleRefOptions.add(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME);
}
/** Legal options for terminal refs like ID<assoc=right> */
public static final Set<String> tokenOptions = new HashSet<String>();
static {
@ -100,6 +105,7 @@ public class Grammar implements AttributeResolver {
public static final Set<String> semPredOptions = new HashSet<String>();
static {
semPredOptions.add(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME);
semPredOptions.add("fail");
}
@ -123,7 +129,7 @@ public class Grammar implements AttributeResolver {
public GrammarRootAST ast;
/** Track stream used to create this grammar */
@NotNull
public final TokenStream tokenStream;
public final org.antlr.runtime.TokenStream tokenStream;
public String text; // testing only
public String fileName;
@ -225,33 +231,44 @@ public class Grammar implements AttributeResolver {
}
/** For testing */
public Grammar(String grammarText) throws RecognitionException {
public Grammar(String grammarText) throws org.antlr.runtime.RecognitionException {
this(GRAMMAR_FROM_STRING_NAME, grammarText, null);
}
public Grammar(String grammarText, LexerGrammar tokenVocabSource) throws org.antlr.runtime.RecognitionException {
this(GRAMMAR_FROM_STRING_NAME, grammarText, tokenVocabSource, null);
}
/** For testing */
public Grammar(String grammarText, ANTLRToolListener listener)
throws RecognitionException
throws org.antlr.runtime.RecognitionException
{
this(GRAMMAR_FROM_STRING_NAME, grammarText, listener);
}
/** For testing; builds trees, does sem anal */
public Grammar(String fileName, String grammarText)
throws RecognitionException
throws org.antlr.runtime.RecognitionException
{
this(fileName, grammarText, null);
}
/** For testing; builds trees, does sem anal */
public Grammar(String fileName, String grammarText, @Nullable ANTLRToolListener listener)
throws RecognitionException
throws org.antlr.runtime.RecognitionException
{
this(fileName, grammarText, null, listener);
}
/** For testing; builds trees, does sem anal */
public Grammar(String fileName, String grammarText, Grammar tokenVocabSource, @Nullable ANTLRToolListener listener)
throws org.antlr.runtime.RecognitionException
{
this.text = grammarText;
this.fileName = fileName;
this.tool = new Tool();
this.tool.addListener(listener);
ANTLRStringStream in = new ANTLRStringStream(grammarText);
org.antlr.runtime.ANTLRStringStream in = new org.antlr.runtime.ANTLRStringStream(grammarText);
in.name = fileName;
this.ast = tool.load(fileName, in);
@ -267,8 +284,8 @@ public class Grammar implements AttributeResolver {
// ensure each node has pointer to surrounding grammar
final Grammar thiz = this;
TreeVisitor v = new TreeVisitor(new GrammarASTAdaptor());
v.visit(ast, new TreeVisitorAction() {
org.antlr.runtime.tree.TreeVisitor v = new org.antlr.runtime.tree.TreeVisitor(new GrammarASTAdaptor());
v.visit(ast, new org.antlr.runtime.tree.TreeVisitorAction() {
@Override
public Object pre(Object t) { ((GrammarAST)t).g = thiz; return t; }
@Override
@ -276,6 +293,10 @@ public class Grammar implements AttributeResolver {
});
initTokenSymbolTables();
if (tokenVocabSource != null) {
importVocab(tokenVocabSource);
}
tool.process(this, false);
}
@ -513,6 +534,15 @@ public class Grammar implements AttributeResolver {
return tokenName;
}
public String[] getRuleNames() {
String[] result = new String[rules.size()];
for (Rule rule : rules.values()) {
result[rule.index] = rule.name;
}
return result;
}
public List<String> getTokenDisplayNames(IntegerList types) {
List<String> names = new ArrayList<String>();
for (int t : types.toArray()) names.add(getTokenDisplayName(t));
@ -717,7 +747,7 @@ public class Grammar implements AttributeResolver {
return 0;
}
public TokenStream getTokenStream() {
public org.antlr.runtime.TokenStream getTokenStream() {
if ( ast!=null ) return ast.tokenStream;
return null;
}
@ -782,7 +812,7 @@ public class Grammar implements AttributeResolver {
// TODO: allow doc comment in there
};
GrammarASTAdaptor adaptor = new GrammarASTAdaptor(ast.token.getInputStream());
TreeWizard wiz = new TreeWizard(adaptor,ANTLRParser.tokenNames);
org.antlr.runtime.tree.TreeWizard wiz = new org.antlr.runtime.tree.TreeWizard(adaptor,ANTLRParser.tokenNames);
List<Pair<GrammarAST,GrammarAST>> lexerRuleToStringLiteral =
new ArrayList<Pair<GrammarAST,GrammarAST>>();
@ -792,7 +822,7 @@ public class Grammar implements AttributeResolver {
for (GrammarAST r : ruleNodes) {
//tool.log("grammar", r.toStringTree());
// System.out.println("chk: "+r.toStringTree());
Tree name = r.getChild(0);
org.antlr.runtime.tree.Tree name = r.getChild(0);
if ( name.getType()==ANTLRParser.TOKEN_REF ) {
// check rule against patterns
boolean isLitRule;
@ -808,7 +838,7 @@ public class Grammar implements AttributeResolver {
}
protected static boolean defAlias(GrammarAST r, String pattern,
TreeWizard wiz,
org.antlr.runtime.tree.TreeWizard wiz,
List<Pair<GrammarAST,GrammarAST>> lexerRuleToStringLiteral)
{
HashMap<String, Object> nodes = new HashMap<String, Object>();
@ -835,8 +865,27 @@ public class Grammar implements AttributeResolver {
return strings;
}
public void setLookaheadDFA(int decision, DFA lookaheadDFA) {
decisionDFAs.put(decision, lookaheadDFA);
}
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
return new LexerInterpreter(fileName, Arrays.asList(getTokenNames()), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), atn, input);
}
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
return new ParserInterpreter(fileName, Arrays.asList(getTokenNames()), Arrays.asList(getRuleNames()), atn, tokenStream);
}
}

View File

@ -0,0 +1,54 @@
/*
* [The "BSD license"]
* Copyright (c) 2013 Terence Parr
* Copyright (c) 2013 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.tool.interp;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.misc.Nullable;
/** This object is used by the ParserInterpreter and is the same as a regular
* ParserRuleContext except that we need to track the rule index of the
* current context so that we can build parse trees.
*/
public class InterpreterRuleContext extends ParserRuleContext {
private final int ruleIndex;
public InterpreterRuleContext(@Nullable ParserRuleContext parent,
int invokingStateNumber,
int ruleIndex)
{
super(parent, invokingStateNumber);
this.ruleIndex = ruleIndex;
}
@Override
public int getRuleIndex() {
return ruleIndex;
}
}

View File

@ -30,101 +30,70 @@
package org.antlr.v4.tool.interp;
import org.antlr.v4.Tool;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CommonTokenFactory;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenFactory;
import org.antlr.v4.runtime.TokenSource;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.atn.ATNType;
import org.antlr.v4.runtime.atn.LexerATNSimulator;
import org.antlr.v4.runtime.atn.PredictionContextCache;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.Pair;
import org.antlr.v4.tool.LexerGrammar;
public class LexerInterpreter implements TokenSource {
protected LexerGrammar g;
protected LexerATNSimulator interp;
protected CharStream input;
protected Pair<TokenSource, CharStream> tokenFactorySourcePair;
import java.util.Collection;
/** How to create token objects */
protected TokenFactory<?> _factory = CommonTokenFactory.DEFAULT;
public class LexerInterpreter extends Lexer {
protected final String grammarFileName;
protected final ATN atn;
protected final String[] tokenNames;
protected final String[] ruleNames;
protected final String[] modeNames;
protected final DFA[] _decisionToDFA;
protected final PredictionContextCache _sharedContextCache =
new PredictionContextCache();
public LexerInterpreter(LexerGrammar g, String inputString) {
this(g);
setInput(inputString);
}
public LexerInterpreter(String grammarFileName, Collection<String> tokenNames, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
super(input);
public LexerInterpreter(LexerGrammar g) {
Tool antlr = new Tool();
antlr.process(g,false);
_decisionToDFA = new DFA[g.atn.getNumberOfDecisions()];
if (atn.grammarType != ATNType.LEXER) {
throw new IllegalArgumentException("The ATN must be a lexer ATN.");
}
this.grammarFileName = grammarFileName;
this.atn = atn;
this.tokenNames = tokenNames.toArray(new String[tokenNames.size()]);
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
this.modeNames = modeNames.toArray(new String[modeNames.size()]);
this._decisionToDFA = new DFA[atn.getNumberOfDecisions()];
for (int i = 0; i < _decisionToDFA.length; i++) {
_decisionToDFA[i] = new DFA(g.atn.getDecisionState(i), i);
_decisionToDFA[i] = new DFA(atn.getDecisionState(i), i);
}
interp = new LexerATNSimulator(g.atn,_decisionToDFA,_sharedContextCache);
}
public void setInput(String inputString) {
setInput(new ANTLRInputStream(inputString));
}
public void setInput(CharStream input) {
this.input = input;
this.tokenFactorySourcePair = new Pair<TokenSource, CharStream>(this, input);
this._interp = new LexerATNSimulator(atn,_decisionToDFA,_sharedContextCache);
}
@Override
public String getSourceName() { return g.name; }
@Override
public void setTokenFactory(TokenFactory<?> factory) {
this._factory = factory;
public ATN getATN() {
return atn;
}
@Override
public TokenFactory<?> getTokenFactory() {
return _factory;
public String getGrammarFileName() {
return grammarFileName;
}
@Override
public int getCharPositionInLine() {
return 0;
public String[] getTokenNames() {
return tokenNames;
}
@Override
public int getLine() {
return 0;
public String[] getRuleNames() {
return ruleNames;
}
@Override
public CharStream getInputStream() {
return input;
}
@Override
public Token nextToken() {
// TODO: Deal with off channel tokens
int start = input.index();
int tokenStartCharPositionInLine = interp.getCharPositionInLine();
int tokenStartLine = interp.getLine();
int mark = input.mark(); // make sure unuffered stream holds chars long enough to get text
try {
int ttype = interp.match(input, Lexer.DEFAULT_MODE);
int stop = input.index()-1;
return _factory.create(tokenFactorySourcePair, ttype, null, Token.DEFAULT_CHANNEL, start, stop,
tokenStartLine, tokenStartCharPositionInLine);
}
finally {
input.release(mark);
}
public String[] getModeNames() {
return modeNames;
}
}

View File

@ -1,131 +1,277 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
* Copyright (c) 2013 Terence Parr
* Copyright (c) 2013 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.tool.interp;
import org.antlr.v4.Tool;
import org.antlr.runtime.Token;
import org.antlr.v4.runtime.FailedPredicateException;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.ActionTransition;
import org.antlr.v4.runtime.atn.AtomTransition;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.LoopEndState;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.atn.PrecedencePredicateTransition;
import org.antlr.v4.runtime.atn.PredicateTransition;
import org.antlr.v4.runtime.atn.PredictionContextCache;
import org.antlr.v4.runtime.atn.RuleStartState;
import org.antlr.v4.runtime.atn.RuleStopState;
import org.antlr.v4.runtime.atn.RuleTransition;
import org.antlr.v4.runtime.atn.StarLoopEntryState;
import org.antlr.v4.runtime.atn.Transition;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.runtime.misc.Pair;
public class ParserInterpreter {
public static class DummyParser extends Parser {
public final ATN atn;
public final DFA[] decisionToDFA; // not shared for interp
public final PredictionContextCache sharedContextCache =
new PredictionContextCache();
import java.util.ArrayDeque;
import java.util.BitSet;
import java.util.Collection;
import java.util.Deque;
public Grammar g;
public DummyParser(Grammar g, ATN atn, TokenStream input) {
super(input);
this.g = g;
this.atn = atn;
this.decisionToDFA = new DFA[atn.getNumberOfDecisions()];
for (int i = 0; i < decisionToDFA.length; i++) {
decisionToDFA[i] = new DFA(atn.getDecisionState(i), i);
/** A parser simulator that mimics what ANTLR's generated
* parser code does. A ParserATNSimulator is used to make
* predictions via adaptivePredict but this class moves a pointer through the
* ATN to simulate parsing. ParserATNSimulator just
* makes us efficient rather than having to backtrack, for example.
*
* This properly creates parse trees even for left recursive rules.
*
* We rely on the left recursive rule invocation and special predicate
* transitions to make left recursive rules work.
*
* See TestParserInterpreter for examples.
*/
public class ParserInterpreter extends Parser {
protected final String grammarFileName;
protected final ATN atn;
protected final BitSet pushRecursionContextStates;
protected final DFA[] decisionToDFA; // not shared like it is for generated parsers
protected final PredictionContextCache sharedContextCache =
new PredictionContextCache();
protected final String[] tokenNames;
protected final String[] ruleNames;
protected final Deque<Pair<ParserRuleContext, Integer>> _parentContextStack = new ArrayDeque<Pair<ParserRuleContext, Integer>>();
protected final Deque<InterpreterRuleContext> _contextStack = new ArrayDeque<InterpreterRuleContext>();
public ParserInterpreter(String grammarFileName, Collection<String> tokenNames, Collection<String> ruleNames, ATN atn, TokenStream input) {
super(input);
this.grammarFileName = grammarFileName;
this.atn = atn;
this.tokenNames = tokenNames.toArray(new String[tokenNames.size()]);
this.ruleNames = ruleNames.toArray(new String[ruleNames.size()]);
this.decisionToDFA = new DFA[atn.getNumberOfDecisions()];
for (int i = 0; i < decisionToDFA.length; i++) {
decisionToDFA[i] = new DFA(atn.getDecisionState(i), i);
}
// identify the ATN states where pushNewRecursionContext must be called
this.pushRecursionContextStates = new BitSet(atn.states.size());
for (ATNState state : atn.states) {
if (!(state instanceof StarLoopEntryState)) {
continue;
}
RuleStartState ruleStartState = atn.ruleToStartState[state.ruleIndex];
if (!ruleStartState.isPrecedenceRule) {
continue;
}
ATNState maybeLoopEndState = state.transition(state.getNumberOfTransitions() - 1).target;
if (!(maybeLoopEndState instanceof LoopEndState)) {
continue;
}
if (maybeLoopEndState.epsilonOnlyTransitions && maybeLoopEndState.transition(0).target instanceof RuleStopState) {
this.pushRecursionContextStates.set(state.stateNumber);
}
}
@Override
public String getGrammarFileName() {
throw new UnsupportedOperationException("not implemented");
}
@Override
public String[] getRuleNames() {
return g.rules.keySet().toArray(new String[g.rules.size()]);
}
@Override
public String[] getTokenNames() {
return g.getTokenNames();
}
@Override
public ATN getATN() {
return atn;
}
// get atn simulator that knows how to do predictions
setInterpreter(new ParserATNSimulator(this, atn,
decisionToDFA,
sharedContextCache));
}
protected Grammar g;
public DummyParser parser;
protected ParserATNSimulator atnSimulator;
protected TokenStream input;
public ParserInterpreter(@NotNull Grammar g) {
this.g = g;
@Override
public ATN getATN() {
return atn;
}
public ParserInterpreter(@NotNull Grammar g, @NotNull TokenStream input) {
Tool antlr = new Tool();
antlr.process(g,false);
parser = new DummyParser(g, g.atn, input);
atnSimulator =
new ParserATNSimulator(parser, g.atn, parser.decisionToDFA,
parser.sharedContextCache);
@Override
public String[] getTokenNames() {
return tokenNames;
}
public int adaptivePredict(@NotNull TokenStream input, int decision,
@Nullable ParserRuleContext outerContext)
{
return atnSimulator.adaptivePredict(input, decision, outerContext);
@Override
public String[] getRuleNames() {
return ruleNames;
}
public int matchATN(@NotNull TokenStream input,
@NotNull ATNState startState)
{
if (startState.getNumberOfTransitions() == 1) {
return 1;
}
else if (startState instanceof DecisionState) {
return atnSimulator.adaptivePredict(input, ((DecisionState)startState).decision, null);
}
else if (startState.getNumberOfTransitions() > 0) {
return 1;
@Override
public String getGrammarFileName() {
return grammarFileName;
}
/** Begin parsing at startRuleIndex */
public ParserRuleContext parse(int startRuleIndex) {
RuleStartState startRuleStartState = atn.ruleToStartState[startRuleIndex];
InterpreterRuleContext rootContext = new InterpreterRuleContext(null, ATNState.INVALID_STATE_NUMBER, startRuleIndex);
if (startRuleStartState.isPrecedenceRule) {
enterRecursionRule(rootContext, startRuleIndex, 0);
}
else {
return -1;
enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex);
}
while ( true ) {
ATNState p = getATNState();
switch ( p.getStateType() ) {
case ATNState.RULE_STOP :
// pop; return from rule
if ( _ctx.isEmpty() ) {
exitRule();
return rootContext;
}
visitRuleStopState(p);
break;
default :
visitState(p);
break;
}
}
}
public ParserATNSimulator getATNSimulator() {
return atnSimulator;
@Override
public void enterRecursionRule(ParserRuleContext localctx, int ruleIndex, int precedence) {
_parentContextStack.push(new Pair<ParserRuleContext, Integer>(_ctx, localctx.invokingState));
super.enterRecursionRule(localctx, ruleIndex, precedence);
}
protected ATNState getATNState() {
return atn.states.get(getState());
}
protected void visitState(ATNState p) {
int edge;
if (p.getNumberOfTransitions() > 1) {
edge = getInterpreter().adaptivePredict(_input, ((DecisionState)p).decision, _ctx);
}
else {
edge = 1;
}
Transition transition = p.transition(edge - 1);
switch (transition.getSerializationType()) {
case Transition.EPSILON:
if (pushRecursionContextStates.get(p.stateNumber) && !(transition.target instanceof LoopEndState)) {
InterpreterRuleContext ctx = new InterpreterRuleContext(_parentContextStack.peek().a, _parentContextStack.peek().b, _ctx.getRuleIndex());
pushNewRecursionContext(ctx, atn.ruleToStartState[p.ruleIndex].stateNumber, _ctx.getRuleIndex());
}
break;
case Transition.ATOM:
match(((AtomTransition)transition).label);
break;
case Transition.RANGE:
case Transition.SET:
case Transition.NOT_SET:
if (!transition.matches(_input.LA(1), Token.MIN_TOKEN_TYPE, 65535)) {
_errHandler.recoverInline(this);
}
matchWildcard();
break;
case Transition.WILDCARD:
matchWildcard();
break;
case Transition.RULE:
RuleStartState ruleStartState = (RuleStartState)transition.target;
int ruleIndex = ruleStartState.ruleIndex;
InterpreterRuleContext ctx = new InterpreterRuleContext(_ctx, p.stateNumber, ruleIndex);
if (ruleStartState.isPrecedenceRule) {
enterRecursionRule(ctx, ruleIndex, ((RuleTransition)transition).precedence);
}
else {
enterRule(ctx, transition.target.stateNumber, ruleIndex);
}
break;
case Transition.PREDICATE:
PredicateTransition predicateTransition = (PredicateTransition)transition;
if (!sempred(_ctx, predicateTransition.ruleIndex, predicateTransition.predIndex)) {
throw new FailedPredicateException(this);
}
break;
case Transition.ACTION:
ActionTransition actionTransition = (ActionTransition)transition;
action(_ctx, actionTransition.ruleIndex, actionTransition.actionIndex);
break;
case Transition.PRECEDENCE:
if (!precpred(_ctx, ((PrecedencePredicateTransition)transition).precedence)) {
throw new FailedPredicateException(this, String.format("precpred(_ctx, %d)", ((PrecedencePredicateTransition)transition).precedence));
}
break;
default:
throw new UnsupportedOperationException("Unrecognized ATN transition type.");
}
setState(transition.target.stateNumber);
}
protected void visitRuleStopState(ATNState p) {
RuleStartState ruleStartState = atn.ruleToStartState[p.ruleIndex];
if (ruleStartState.isPrecedenceRule) {
Pair<ParserRuleContext, Integer> parentContext = _parentContextStack.pop();
unrollRecursionContexts(parentContext.a);
setState(parentContext.b);
}
else {
exitRule();
}
RuleTransition ruleTransition = (RuleTransition)atn.states.get(getState()).transition(0);
setState(ruleTransition.followState.stateNumber);
}
}

View File

@ -58,6 +58,7 @@ import org.antlr.v4.runtime.misc.IntegerList;
import org.antlr.v4.runtime.misc.Interval;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.runtime.misc.Pair;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.semantics.SemanticPipeline;
import org.antlr.v4.tool.ANTLRMessage;
@ -486,6 +487,56 @@ public abstract class BaseTest {
return output;
}
/*
public ParseTree execParser(String startRuleName, String input,
String parserName, String lexerName)
throws Exception
{
Pair<Parser, Lexer> pl = getParserAndLexer(input, parserName, lexerName);
Parser parser = pl.a;
return execStartRule(startRuleName, parser);
}
*/
public ParseTree execStartRule(String startRuleName, Parser parser)
throws IllegalAccessException, InvocationTargetException,
NoSuchMethodException
{
Method startRule = null;
Object[] args = null;
try {
startRule = parser.getClass().getMethod(startRuleName);
}
catch (NoSuchMethodException nsme) {
// try with int _p arg for recursive func
startRule = parser.getClass().getMethod(startRuleName, int.class);
args = new Integer[] {0};
}
ParseTree result = (ParseTree)startRule.invoke(parser, args);
// System.out.println("parse tree = "+result.toStringTree(parser));
return result;
}
public Pair<Parser, Lexer> getParserAndLexer(String input,
String parserName, String lexerName)
throws Exception
{
final Class<? extends Lexer> lexerClass = loadLexerClassFromTempDir(lexerName);
final Class<? extends Parser> parserClass = loadParserClassFromTempDir(parserName);
ANTLRInputStream in = new ANTLRInputStream(new StringReader(input));
Class<? extends Lexer> c = lexerClass.asSubclass(Lexer.class);
Constructor<? extends Lexer> ctor = c.getConstructor(CharStream.class);
Lexer lexer = ctor.newInstance(in);
Class<? extends Parser> pc = parserClass.asSubclass(Parser.class);
Constructor<? extends Parser> pctor = pc.getConstructor(TokenStream.class);
CommonTokenStream tokens = new CommonTokenStream(lexer);
Parser parser = pctor.newInstance(tokens);
return new Pair<Parser, Lexer>(parser, lexer);
}
protected String execParser(String grammarFileName,
String grammarStr,
String parserName,

View File

@ -0,0 +1,131 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.Tool;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.atn.ATNState;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.atn.PredictionContextCache;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.misc.Nullable;
import org.antlr.v4.tool.Grammar;
public class ParserInterpreterForTesting {
public static class DummyParser extends Parser {
public final ATN atn;
public final DFA[] decisionToDFA; // not shared for interp
public final PredictionContextCache sharedContextCache =
new PredictionContextCache();
public Grammar g;
public DummyParser(Grammar g, ATN atn, TokenStream input) {
super(input);
this.g = g;
this.atn = atn;
this.decisionToDFA = new DFA[atn.getNumberOfDecisions()];
for (int i = 0; i < decisionToDFA.length; i++) {
decisionToDFA[i] = new DFA(atn.getDecisionState(i), i);
}
}
@Override
public String getGrammarFileName() {
throw new UnsupportedOperationException("not implemented");
}
@Override
public String[] getRuleNames() {
return g.rules.keySet().toArray(new String[g.rules.size()]);
}
@Override
public String[] getTokenNames() {
return g.getTokenNames();
}
@Override
public ATN getATN() {
return atn;
}
}
protected Grammar g;
public DummyParser parser;
protected ParserATNSimulator atnSimulator;
protected TokenStream input;
public ParserInterpreterForTesting(@NotNull Grammar g) {
this.g = g;
}
public ParserInterpreterForTesting(@NotNull Grammar g, @NotNull TokenStream input) {
Tool antlr = new Tool();
antlr.process(g,false);
parser = new DummyParser(g, g.atn, input);
atnSimulator =
new ParserATNSimulator(parser, g.atn, parser.decisionToDFA,
parser.sharedContextCache);
}
public int adaptivePredict(@NotNull TokenStream input, int decision,
@Nullable ParserRuleContext outerContext)
{
return atnSimulator.adaptivePredict(input, decision, outerContext);
}
public int matchATN(@NotNull TokenStream input,
@NotNull ATNState startState)
{
if (startState.getNumberOfTransitions() == 1) {
return 1;
}
else if (startState instanceof DecisionState) {
return atnSimulator.adaptivePredict(input, ((DecisionState)startState).decision, null);
}
else if (startState.getNumberOfTransitions() > 0) {
return 1;
}
else {
return -1;
}
}
public ParserATNSimulator getATNSimulator() {
return atnSimulator;
}
}

View File

@ -43,10 +43,9 @@ import org.antlr.v4.tool.DOTGenerator;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.Rule;
import org.antlr.v4.tool.interp.ParserInterpreter;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
// NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH
// NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH
@ -393,7 +392,7 @@ public class TestATNInterpreter extends BaseTest {
IntTokenStream input = new IntTokenStream(types);
System.out.println("input="+input.types);
ParserInterpreter interp = new ParserInterpreter(g, input);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input);
ATNState startState = atn.ruleToStartState[g.getRule("a").index];
if ( startState.transition(0).target instanceof BlockStartState ) {
startState = startState.transition(0).target;

View File

@ -37,9 +37,7 @@ import org.antlr.v4.runtime.NoViableAltException;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.atn.DecisionState;
import org.antlr.v4.runtime.atn.LexerATNSimulator;
import org.antlr.v4.runtime.atn.ParserATNSimulator;
import org.antlr.v4.runtime.atn.PredictionContextCache;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.IntegerList;
@ -47,10 +45,9 @@ import org.antlr.v4.tool.DOTGenerator;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.Rule;
import org.antlr.v4.tool.interp.ParserInterpreter;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
// NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH
// NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH
@ -488,7 +485,7 @@ public class TestATNParserPrediction extends BaseTest {
// Check ATN prediction
// ParserATNSimulator interp = new ParserATNSimulator(atn);
TokenStream input = new IntTokenStream(types);
ParserInterpreter interp = new ParserInterpreter(g, input);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, input);
int alt = interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY);
assertEquals(expectedAlt, alt);
@ -515,7 +512,7 @@ public class TestATNParserPrediction extends BaseTest {
g.importVocab(lg);
semanticProcess(g);
ParserInterpreter interp = new ParserInterpreter(g, null);
ParserInterpreterForTesting interp = new ParserInterpreterForTesting(g, null);
for (int i=0; i<inputString.length; i++) {
// Check DFA
IntegerList types = getTokenTypesViaATN(inputString[i], lexInterp);

View File

@ -61,8 +61,7 @@ public class TestBufferedTokenStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
String result = tokens.LT(1).getText();
@ -83,8 +82,7 @@ public class TestBufferedTokenStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
String result = tokens.LT(2).getText();
@ -105,8 +103,7 @@ public class TestBufferedTokenStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
int i = 1;
@ -136,8 +133,7 @@ public class TestBufferedTokenStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
Token t = tokens.LT(1);
@ -164,8 +160,7 @@ public class TestBufferedTokenStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = createTokenStream(lexEngine);
tokens.consume(); // get x into buffer

View File

@ -349,4 +349,32 @@ public class TestParserExec extends BaseTest {
assertEquals("x\ny\n", found);
}
/**
* This is a regression test for antlr/antlr4#334 "BailErrorStrategy: bails
* out on proper input".
* https://github.com/antlr/antlr4/issues/334
*/
@Test public void testPredictionIssue334() {
String grammar =
"grammar T;\n" +
"\n" +
"file @init{setErrorHandler(new BailErrorStrategy());} \n" +
"@after {System.out.println($ctx.toStringTree(this));}\n" +
" : item (SEMICOLON item)* SEMICOLON? EOF ;\n" +
"item : A B?;\n" +
"\n" +
"\n" +
"\n" +
"SEMICOLON: ';';\n" +
"\n" +
"A : 'a'|'A';\n" +
"B : 'b'|'B';\n" +
"\n" +
"WS : [ \\r\\t\\n]+ -> skip;\n";
String input = "a";
String found = execParser("T.g4", grammar, "TParser", "TLexer", "file", input, false);
assertEquals("(file (item a) <EOF>)\n", found);
assertNull(stderrDuringParse);
}
}

View File

@ -0,0 +1,211 @@
/*
* [The "BSD license"]
* Copyright (c) 2013 Terence Parr
* Copyright (c) 2013 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.test;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.atn.ATN;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.tool.Grammar;
import org.antlr.v4.tool.LexerGrammar;
import org.antlr.v4.tool.interp.LexerInterpreter;
import org.antlr.v4.tool.interp.ParserInterpreter;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
public class TestParserInterpreter extends BaseTest {
@Test public void testA() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : A ;",
lg);
testInterp(lg, g, "s", "a", "(s a)");
}
@Test public void testAorB() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n"+
"s : A{;} | B ;",
lg);
testInterp(lg, g, "s", "a", "(s a)");
testInterp(lg, g, "s", "b", "(s b)");
}
@Test public void testCall() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n"+
"s : t C ;\n" +
"t : A{;} | B ;\n",
lg);
testInterp(lg, g, "s", "ac", "(s (t a) c)");
testInterp(lg, g, "s", "bc", "(s (t b) c)");
}
@Test public void testCall2() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n"+
"s : t C ;\n" +
"t : u ;\n" +
"u : A{;} | B ;\n",
lg);
testInterp(lg, g, "s", "ac", "(s (t (u a)) c)");
testInterp(lg, g, "s", "bc", "(s (t (u b)) c)");
}
@Test public void testOptionalA() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : A? B ;\n",
lg);
testInterp(lg, g, "s", "b", "(s b)");
testInterp(lg, g, "s", "ab", "(s a b)");
}
@Test public void testOptionalAorB() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : (A{;}|B)? C ;\n",
lg);
testInterp(lg, g, "s", "c", "(s c)");
testInterp(lg, g, "s", "ac", "(s a c)");
testInterp(lg, g, "s", "bc", "(s b c)");
}
@Test public void testStarA() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : A* B ;\n",
lg);
testInterp(lg, g, "s", "b", "(s b)");
testInterp(lg, g, "s", "ab", "(s a b)");
testInterp(lg, g, "s", "aaaaaab", "(s a a a a a a b)");
}
@Test public void testStarAorB() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : (A{;}|B)* C ;\n",
lg);
testInterp(lg, g, "s", "c", "(s c)");
testInterp(lg, g, "s", "ac", "(s a c)");
testInterp(lg, g, "s", "bc", "(s b c)");
testInterp(lg, g, "s", "abaaabc", "(s a b a a a b c)");
testInterp(lg, g, "s", "babac", "(s b a b a c)");
}
@Test public void testLeftRecursion() throws Exception {
LexerGrammar lg = new LexerGrammar(
"lexer grammar L;\n" +
"A : 'a' ;\n" +
"B : 'b' ;\n" +
"C : 'c' ;\n" +
"PLUS : '+' ;\n" +
"MULT : '*' ;\n");
Grammar g = new Grammar(
"parser grammar T;\n" +
"s : e ;\n" +
"e : e MULT e\n" +
" | e PLUS e\n" +
" | A\n" +
" ;\n",
lg);
testInterp(lg, g, "s", "a", "(s (e a))");
testInterp(lg, g, "s", "a+a", "(s (e (e a) + (e a)))");
testInterp(lg, g, "s", "a*a", "(s (e (e a) * (e a)))");
testInterp(lg, g, "s", "a+a+a", "(s (e (e (e a) + (e a)) + (e a)))");
testInterp(lg, g, "s", "a*a+a", "(s (e (e (e a) * (e a)) + (e a)))");
testInterp(lg, g, "s", "a+a*a", "(s (e (e a) + (e (e a) * (e a))))");
}
void testInterp(LexerGrammar lg, Grammar g,
String startRule, String input,
String parseTree)
{
LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
ATN atn = createATN(g, true);
ParserInterpreter parser = new ParserInterpreter(g.fileName, Arrays.asList(g.getTokenNames()), Arrays.asList(g.getRuleNames()), atn, tokens);
ParseTree t = parser.parse(g.rules.get(startRule).index);
System.out.println("parse tree: "+t.toStringTree(parser));
assertEquals(parseTree, t.toStringTree(parser));
}
}

View File

@ -64,6 +64,7 @@ import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.ParseTreeListener;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.antlr.v4.tool.interp.ParserInterpreter;
import org.junit.Assert;
import org.junit.Test;
@ -184,6 +185,11 @@ public class TestPerformance extends BaseTest {
* test completes.
*/
private static final boolean DELETE_TEMP_FILES = true;
/**
* {@code true} to use a {@link ParserInterpreter} for parsing instead of
* generated parser.
*/
private static final boolean USE_PARSER_INTERPRETER = false;
/**
* {@code true} to call {@link System#gc} and then wait for 5 seconds at the
@ -1247,7 +1253,15 @@ public class TestPerformance extends BaseTest {
parser.setInputStream(tokens);
} else {
Parser previousParser = parser;
parser = parserCtor.newInstance(tokens);
if (USE_PARSER_INTERPRETER) {
Parser referenceParser = parserCtor.newInstance(tokens);
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), Arrays.asList(referenceParser.getTokenNames()), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
}
else {
parser = parserCtor.newInstance(tokens);
}
DFA[] decisionToDFA = (FILE_GRANULARITY || previousParser == null ? parser : previousParser).getInterpreter().decisionToDFA;
if (!REUSE_PARSER_DFA || (!FILE_GRANULARITY && previousParser == null)) {
decisionToDFA = new DFA[decisionToDFA.length];
@ -1262,6 +1276,7 @@ public class TestPerformance extends BaseTest {
sharedParsers[thread] = parser;
}
parser.removeParseListeners();
parser.removeErrorListeners();
if (!TWO_STAGE_PARSING) {
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
@ -1287,14 +1302,18 @@ public class TestPerformance extends BaseTest {
Method parseMethod = parserClass.getMethod(entryPoint);
Object parseResult;
ParseTreeListener checksumParserListener = null;
try {
if (COMPUTE_CHECKSUM) {
checksumParserListener = new ChecksumParseTreeListener(checksum);
parser.addParseListener(checksumParserListener);
if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) {
parser.addParseListener(new ChecksumParseTreeListener(checksum));
}
if (USE_PARSER_INTERPRETER) {
ParserInterpreter parserInterpreter = (ParserInterpreter)parser;
parseResult = parserInterpreter.parse(Collections.lastIndexOfSubList(Arrays.asList(parser.getRuleNames()), Collections.singletonList(entryPoint)));
}
else {
parseResult = parseMethod.invoke(parser);
}
parseResult = parseMethod.invoke(parser);
} catch (InvocationTargetException ex) {
if (!TWO_STAGE_PARSING) {
throw ex;
@ -1313,7 +1332,15 @@ public class TestPerformance extends BaseTest {
parser.setInputStream(tokens);
} else {
Parser previousParser = parser;
parser = parserCtor.newInstance(tokens);
if (USE_PARSER_INTERPRETER) {
Parser referenceParser = parserCtor.newInstance(tokens);
parser = new ParserInterpreter(referenceParser.getGrammarFileName(), Arrays.asList(referenceParser.getTokenNames()), Arrays.asList(referenceParser.getRuleNames()), referenceParser.getATN(), tokens);
}
else {
parser = parserCtor.newInstance(tokens);
}
DFA[] decisionToDFA = previousParser.getInterpreter().decisionToDFA;
if (COMPUTE_TRANSITION_STATS) {
parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), decisionToDFA, parser.getInterpreter().getSharedContextCache()));
@ -1324,11 +1351,15 @@ public class TestPerformance extends BaseTest {
sharedParsers[thread] = parser;
}
parser.removeParseListeners();
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new SummarizingDiagnosticErrorListener());
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setBuildParseTree(BUILD_PARSE_TREES);
if (COMPUTE_CHECKSUM && !BUILD_PARSE_TREES) {
parser.addParseListener(new ChecksumParseTreeListener(checksum));
}
if (!BUILD_PARSE_TREES && BLANK_LISTENER) {
parser.addParseListener(listener);
}
@ -1338,13 +1369,11 @@ public class TestPerformance extends BaseTest {
parseResult = parseMethod.invoke(parser);
}
finally {
if (checksumParserListener != null) {
parser.removeParseListener(checksumParserListener);
}
}
assertThat(parseResult, instanceOf(ParseTree.class));
if (COMPUTE_CHECKSUM && BUILD_PARSE_TREES) {
ParseTreeWalker.DEFAULT.walk(new ChecksumParseTreeListener(checksum), (ParseTree)parseResult);
}
if (BUILD_PARSE_TREES && BLANK_LISTENER) {
ParseTreeWalker.DEFAULT.walk(listener, (ParseTree)parseResult);
}

View File

@ -29,6 +29,7 @@
*/
package org.antlr.v4.test;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.TokenStreamRewriter;
import org.antlr.v4.runtime.misc.Interval;
@ -50,7 +51,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"A : 'a';\n" +
"B : 'b';\n" +
"C : 'c';\n");
LexerInterpreter lexEngine = new LexerInterpreter(g, "abc");
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream("abc"));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -67,7 +68,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -84,7 +85,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -102,7 +103,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -119,7 +120,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -136,7 +137,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -158,7 +159,7 @@ public class TestTokenStreamRewriter extends BaseTest {
// Tokens: 0123456789
// Input: x = 3 * 0;
String input = "x = 3 * 0;";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -196,7 +197,7 @@ public class TestTokenStreamRewriter extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
String input = "x = 3 * 0 + 2 * 0;";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -248,7 +249,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -266,7 +267,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -285,7 +286,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -303,7 +304,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -328,7 +329,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -348,7 +349,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -366,7 +367,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -385,7 +386,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -403,7 +404,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -421,7 +422,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -439,7 +440,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -457,7 +458,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -483,7 +484,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -501,7 +502,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -518,7 +519,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -535,7 +536,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -562,7 +563,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcccba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -589,7 +590,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcba";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -607,7 +608,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -625,7 +626,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -644,7 +645,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -664,7 +665,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -686,7 +687,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -705,7 +706,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -725,7 +726,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -752,7 +753,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -772,7 +773,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -792,7 +793,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -812,7 +813,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -832,7 +833,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -850,7 +851,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abcc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);
@ -868,7 +869,7 @@ public class TestTokenStreamRewriter extends BaseTest {
"B : 'b';\n" +
"C : 'c';\n");
String input = "abc";
LexerInterpreter lexEngine = new LexerInterpreter(g, input);
LexerInterpreter lexEngine = g.createLexerInterpreter(new ANTLRInputStream(input));
CommonTokenStream stream = new CommonTokenStream(lexEngine);
stream.fill();
TokenStreamRewriter tokens = new TokenStreamRewriter(stream);

View File

@ -310,10 +310,9 @@ public class TestUnbufferedCharStream extends BaseTest {
// Tokens: 012345678901234567
// Input: x = 3 * 0 + 2 * 0;
TestingUnbufferedCharStream input = createStream("x = 302 * 91 + 20234234 * 0;");
LexerInterpreter lexEngine = new LexerInterpreter(g);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
// copy text into tokens from char stream
lexEngine.setTokenFactory(new CommonTokenFactory(true));
lexEngine.setInput(input);
CommonTokenStream tokens = new CommonTokenStream(lexEngine);
String result = tokens.LT(1).getText();
String expecting = "x";

View File

@ -63,8 +63,7 @@ public class TestUnbufferedTokenStream extends BaseTest {
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TokenStream tokens = new UnbufferedTokenStream<Token>(lexEngine);
assertEquals("x", tokens.LT(1).getText());
@ -90,8 +89,7 @@ public class TestUnbufferedTokenStream extends BaseTest {
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TestingUnbufferedTokenStream<Token> tokens = new TestingUnbufferedTokenStream<Token>(lexEngine);
assertEquals("[[@0,0:0='x',<1>,1:0]]", tokens.getBuffer().toString());
@ -128,8 +126,7 @@ public class TestUnbufferedTokenStream extends BaseTest {
CharStream input = new ANTLRInputStream(
new StringReader("x = 302;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TestingUnbufferedTokenStream<Token> tokens = new TestingUnbufferedTokenStream<Token>(lexEngine);
int m = tokens.mark();
@ -164,8 +161,7 @@ public class TestUnbufferedTokenStream extends BaseTest {
CharStream input = new ANTLRInputStream(
new StringReader("x = 302 + 1;")
);
LexerInterpreter lexEngine = new LexerInterpreter(g);
lexEngine.setInput(input);
LexerInterpreter lexEngine = g.createLexerInterpreter(input);
TestingUnbufferedTokenStream<Token> tokens = new TestingUnbufferedTokenStream<Token>(lexEngine);
int m = tokens.mark();

View File

@ -0,0 +1,241 @@
package org.antlr.v4.test;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.misc.Pair;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.antlr.v4.runtime.tree.xpath.XPath;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class TestXPath extends BaseTest {
public static final String grammar =
"grammar Expr;\n" +
"prog: func+ ;\n" +
"func: 'def' ID '(' arg (',' arg)* ')' body ;\n" +
"body: '{' stat+ '}' ;\n" +
"arg : ID ;\n" +
"stat: expr ';' # printExpr\n" +
" | ID '=' expr ';' # assign\n" +
" | 'return' expr ';' # ret\n" +
" | ';' # blank\n" +
" ;\n" +
"expr: expr ('*'|'/') expr # MulDiv\n" +
" | expr ('+'|'-') expr # AddSub\n" +
" | primary # prim\n" +
" ;\n" +
"primary" +
" : INT # int\n" +
" | ID # id\n" +
" | '(' expr ')' # parens\n" +
" ;" +
"\n" +
"MUL : '*' ; // assigns token name to '*' used above in grammar\n" +
"DIV : '/' ;\n" +
"ADD : '+' ;\n" +
"SUB : '-' ;\n" +
"ID : [a-zA-Z]+ ; // match identifiers\n" +
"INT : [0-9]+ ; // match integers\n" +
"NEWLINE:'\\r'? '\\n' -> skip; // return newlines to parser (is end-statement signal)\n" +
"WS : [ \\t]+ -> skip ; // toss out whitespace\n";
@Test public void testValidPaths() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String xpath[] = {
"/prog/func", // all funcs under prog at root
"/prog/*", // all children of prog at root
"/*/func", // all func kids of any root node
"prog", // prog must be root node
"/prog", // prog must be root node
"/*", // any root
"*", // any root
"//ID", // any ID in tree
"//expr/primary/ID",// any ID child of a primary under any expr
"//body//ID", // any ID under a body
"//'return'", // any 'return' literal in tree
"//primary/*", // all kids of any primary
"//func/*/stat", // all stat nodes grandkids of any func node
"/prog/func/'def'", // all def literal kids of func kid of prog
"//stat/';'", // all ';' under any stat node
"//expr/primary/!ID", // anything but ID under primary under any expr node
"//expr/!primary", // anything but primary under any expr node
"//!*", // nothing anywhere
"/!*", // nothing at root
};
String expected[] = {
"[func, func]",
"[func, func]",
"[func, func]",
"[prog]",
"[prog]",
"[prog]",
"[prog]",
"[f, x, y, x, y, g, x, x]",
"[y, x]",
"[x, y, x]",
"[return]",
"[3, 4, y, 1, 2, x]",
"[stat, stat, stat, stat]",
"[def, def]",
"[;, ;, ;, ;]",
"[3, 4, 1, 2]",
"[expr, expr, expr, expr, expr, expr]",
"[]",
"[]",
};
for (int i=0; i<xpath.length; i++) {
List<String> nodes = getNodeStrings(input, xpath[i], "prog", "ExprParser", "ExprLexer");
String result = nodes.toString();
assertEquals("path "+xpath[i]+" failed", expected[i], result);
}
}
@Test public void testWeirdChar() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "&";
String expected = "Invalid tokens or characters at index 0 in path '&'";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
@Test public void testWeirdChar2() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "//w&e/";
String expected = "Invalid tokens or characters at index 3 in path '//w&e/'";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
@Test public void testBadSyntax() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "///";
String expected = "/ at index 2 isn't a valid rule name";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
@Test public void testMissingWordAtEnd() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "//";
String expected = "Missing path element at end of path";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
@Test public void testBadTokenName() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "//Ick";
String expected = "Ick at index 2 isn't a valid token name";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
@Test public void testBadRuleName() throws Exception {
boolean ok =
rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser",
"ExprLexer", false);
assertTrue(ok);
String input =
"def f(x,y) { x = 3+4; y; ; }\n" +
"def g(x) { return 1+2*x; }\n";
String path = "/prog/ick";
String expected = "ick at index 6 isn't a valid rule name";
testError(input, path, expected, "prog", "ExprParser", "ExprLexer");
}
protected void testError(String input, String path, String expected,
String startRuleName,
String parserName, String lexerName)
throws Exception
{
Pair<Parser, Lexer> pl = getParserAndLexer(input, parserName, lexerName);
Parser parser = pl.a;
ParseTree tree = execStartRule(startRuleName, parser);
IllegalArgumentException e = null;
try {
XPath.findAll(tree, path, parser);
}
catch (IllegalArgumentException iae) {
e = iae;
}
assertNotNull(e);
assertEquals(expected, e.getMessage());
}
public List<String> getNodeStrings(String input, String xpath,
String startRuleName,
String parserName, String lexerName)
throws Exception
{
Pair<Parser, Lexer> pl = getParserAndLexer(input, parserName, lexerName);
Parser parser = pl.a;
ParseTree tree = execStartRule(startRuleName, parser);
List<String> nodes = new ArrayList<String>();
for (ParseTree t : XPath.findAll(tree, xpath, parser) ) {
if ( t instanceof RuleContext) {
RuleContext r = (RuleContext)t;
nodes.add(parser.getRuleNames()[r.getRuleIndex()]);
}
else {
TerminalNode token = (TerminalNode)t;
nodes.add(token.getText());
}
}
return nodes;
}
}