forked from jasder/antlr
v4: PredictionContext optimization
[git-p4: depot-paths = "//depot/code/antlr4/main/": change = 9507]
This commit is contained in:
parent
a8219d943e
commit
11b6fa13f9
|
@ -301,7 +301,7 @@ public class DefaultErrorStrategy<Symbol> implements ANTLRErrorStrategy<Symbol>
|
|||
// is free to conjure up and insert the missing token
|
||||
ATNState currentState = recognizer.getInterpreter().atn.states.get(recognizer._ctx.s);
|
||||
ATNState next = currentState.transition(0).target;
|
||||
IntervalSet expectingAtLL2 = recognizer.getInterpreter().atn.nextTokens(next, recognizer._ctx);
|
||||
IntervalSet expectingAtLL2 = recognizer.getInterpreter().atn.nextTokens(next, PredictionContext.fromRuleContext(recognizer._ctx));
|
||||
// System.out.println("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames()));
|
||||
if ( expectingAtLL2.contains(currentSymbolType) ) {
|
||||
reportMissingToken(recognizer);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
*/
|
||||
package org.antlr.v4.runtime;
|
||||
|
||||
import org.antlr.v4.runtime.atn.PredictionContext;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
|
||||
/** The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
|
||||
|
@ -84,7 +85,7 @@ public class RecognitionException extends RuntimeException {
|
|||
public IntervalSet getExpectedTokens() {
|
||||
// TODO: do we really need this type check?
|
||||
if ( recognizer!=null && recognizer instanceof BaseRecognizer<?> ) {
|
||||
return recognizer.getInterpreter().atn.nextTokens(ctx);
|
||||
return recognizer.getInterpreter().atn.nextTokens(PredictionContext.fromRuleContext(ctx), ctx.s);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -82,8 +82,8 @@ public class ATN {
|
|||
/** Compute the set of valid tokens reachable from the current
|
||||
* position in the parse.
|
||||
*/
|
||||
public IntervalSet nextTokens(@NotNull RuleContext ctx) {
|
||||
ATNState s = states.get(ctx.s);
|
||||
public IntervalSet nextTokens(@NotNull PredictionContext ctx, int stateNumber) {
|
||||
ATNState s = states.get(stateNumber);
|
||||
if ( s == null ) return null;
|
||||
return nextTokens(s, ctx);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ public class ATN {
|
|||
* the rule surrounding s. In other words, the set will be
|
||||
* restricted to tokens reachable staying within s's rule.
|
||||
*/
|
||||
public IntervalSet nextTokens(ATNState s, RuleContext ctx) {
|
||||
public IntervalSet nextTokens(ATNState s, PredictionContext ctx) {
|
||||
LL1Analyzer anal = new LL1Analyzer(this);
|
||||
IntervalSet next = anal.LOOK(s, ctx);
|
||||
return next;
|
||||
|
|
|
@ -53,7 +53,7 @@ public class ATNConfig {
|
|||
* execution of the ATN simulator.
|
||||
*/
|
||||
@Nullable
|
||||
public final RuleContext context;
|
||||
public final PredictionContext context;
|
||||
|
||||
/**
|
||||
* Indicates that we have reached this ATN configuration after
|
||||
|
@ -91,7 +91,7 @@ public class ATNConfig {
|
|||
|
||||
public ATNConfig(@NotNull ATNState state,
|
||||
int alt,
|
||||
@Nullable RuleContext context)
|
||||
@Nullable PredictionContext context)
|
||||
{
|
||||
this.state = state;
|
||||
this.alt = alt;
|
||||
|
@ -110,11 +110,11 @@ public class ATNConfig {
|
|||
this(c, state, c.context, semanticContext);
|
||||
}
|
||||
|
||||
public ATNConfig(@NotNull ATNConfig c, @NotNull ATNState state, @Nullable RuleContext context) {
|
||||
public ATNConfig(@NotNull ATNConfig c, @NotNull ATNState state, @Nullable PredictionContext context) {
|
||||
this(c, state, context, c.semanticContext);
|
||||
}
|
||||
|
||||
public ATNConfig(@NotNull ATNConfig c, @NotNull ATNState state, @Nullable RuleContext context,
|
||||
public ATNConfig(@NotNull ATNConfig c, @NotNull ATNState state, @Nullable PredictionContext context,
|
||||
SemanticContext semanticContext)
|
||||
{
|
||||
this.state = state;
|
||||
|
@ -125,10 +125,18 @@ public class ATNConfig {
|
|||
this.semanticContext = semanticContext;
|
||||
}
|
||||
|
||||
// public ATNConfig(@NotNull ATNConfig c, @Nullable RuleContext context) {
|
||||
// public ATNConfig(@NotNull ATNConfig c, @Nullable PredictionContext context) {
|
||||
// this(c, c.state, context);
|
||||
// }
|
||||
|
||||
public ATNConfig getAsCached() {
|
||||
if (this.context == null || this.context.isCached()) {
|
||||
return this;
|
||||
}
|
||||
|
||||
return new ATNConfig(this, this.state, this.context.getAsCached(), this.semanticContext);
|
||||
}
|
||||
|
||||
/** An ATN configuration is equal to another if both have
|
||||
* the same state, they predict the same alternative, and
|
||||
* syntactic/semantic contexts are the same.
|
||||
|
|
|
@ -62,7 +62,8 @@ public class LL1Analyzer {
|
|||
Set<ATNConfig> lookBusy = new HashSet<ATNConfig>();
|
||||
boolean seeThruPreds = false; // fail to get lookahead upon pred
|
||||
_LOOK(s.transition(alt - 1).target,
|
||||
RuleContext.EMPTY,
|
||||
PredictionContext.EMPTY,
|
||||
false,
|
||||
look[alt], lookBusy, seeThruPreds);
|
||||
if ( look[alt].size()==0 ) look[alt] = null;
|
||||
}
|
||||
|
@ -73,10 +74,10 @@ public class LL1Analyzer {
|
|||
* If ctx is null, EPSILON is in set if we can reach end of rule.
|
||||
*/
|
||||
@NotNull
|
||||
public IntervalSet LOOK(@NotNull ATNState s, @Nullable RuleContext ctx) {
|
||||
public IntervalSet LOOK(@NotNull ATNState s, @Nullable PredictionContext ctx) {
|
||||
IntervalSet r = new IntervalSet();
|
||||
boolean seeThruPreds = true; // ignore preds; get all lookahead
|
||||
_LOOK(s, ctx, r, new HashSet<ATNConfig>(), seeThruPreds);
|
||||
_LOOK(s, ctx != null ? ctx : PredictionContext.EMPTY, ctx == null, r, new HashSet<ATNConfig>(), seeThruPreds);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -87,7 +88,8 @@ public class LL1Analyzer {
|
|||
* rule. Add EPSILON to the set indicating we reached the end of the ruled out having
|
||||
* to match a token.
|
||||
*/
|
||||
protected void _LOOK(@NotNull ATNState s, @Nullable RuleContext ctx,
|
||||
protected void _LOOK(@NotNull ATNState s, @NotNull PredictionContext ctx,
|
||||
boolean epsilonStopState,
|
||||
@NotNull IntervalSet look,
|
||||
@NotNull Set<ATNConfig> lookBusy,
|
||||
boolean seeThruPreds)
|
||||
|
@ -97,7 +99,7 @@ public class LL1Analyzer {
|
|||
if ( !lookBusy.add(c) ) return;
|
||||
|
||||
if ( s instanceof RuleStopState ) {
|
||||
if ( ctx==null ) {
|
||||
if ( ctx.isEmpty() && epsilonStopState ) {
|
||||
look.add(Token.EPSILON);
|
||||
return;
|
||||
}
|
||||
|
@ -106,7 +108,7 @@ public class LL1Analyzer {
|
|||
RuleTransition rt = (RuleTransition)invokingState.transition(0);
|
||||
ATNState retState = rt.followState;
|
||||
// System.out.println("popping back to "+retState);
|
||||
_LOOK(retState, ctx.parent, look, lookBusy, seeThruPreds);
|
||||
_LOOK(retState, ctx.parent, epsilonStopState, look, lookBusy, seeThruPreds);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -115,12 +117,11 @@ public class LL1Analyzer {
|
|||
for (int i=0; i<n; i++) {
|
||||
Transition t = s.transition(i);
|
||||
if ( t.getClass() == RuleTransition.class ) {
|
||||
RuleContext newContext =
|
||||
new RuleContext(ctx, s.stateNumber, t.target.stateNumber);
|
||||
_LOOK(t.target, newContext, look, lookBusy, seeThruPreds);
|
||||
PredictionContext newContext = ctx.getChild(s.stateNumber);
|
||||
_LOOK(t.target, newContext, epsilonStopState, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t.isEpsilon() && seeThruPreds ) {
|
||||
_LOOK(t.target, ctx, look, lookBusy, seeThruPreds);
|
||||
_LOOK(t.target, ctx, epsilonStopState, look, lookBusy, seeThruPreds);
|
||||
}
|
||||
else if ( t.getClass() == WildcardTransition.class ) {
|
||||
look.addAll( IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, atn.maxTokenType) );
|
||||
|
|
|
@ -456,7 +456,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
protected OrderedHashSet<ATNConfig> computeStartState(@NotNull IntStream input,
|
||||
@NotNull ATNState p)
|
||||
{
|
||||
RuleContext initialContext = RuleContext.EMPTY;
|
||||
PredictionContext initialContext = PredictionContext.EMPTY;
|
||||
OrderedHashSet<ATNConfig> configs = new OrderedHashSet<ATNConfig>();
|
||||
for (int i=0; i<p.getNumberOfTransitions(); i++) {
|
||||
ATNState target = p.transition(i).target;
|
||||
|
@ -487,7 +487,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
configs.add(config);
|
||||
return;
|
||||
}
|
||||
RuleContext newContext = config.context.parent; // "pop" invoking state
|
||||
PredictionContext newContext = config.context.parent; // "pop" invoking state
|
||||
ATNState invokingState = atn.states.get(config.context.invokingState);
|
||||
RuleTransition rt = (RuleTransition)invokingState.transition(0);
|
||||
ATNState retState = rt.followState;
|
||||
|
@ -498,7 +498,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
|
||||
// optimization
|
||||
if ( !config.state.onlyHasEpsilonTransitions() ) {
|
||||
configs.add(config);
|
||||
configs.add(config.getAsCached());
|
||||
}
|
||||
|
||||
ATNState p = config.state;
|
||||
|
@ -514,8 +514,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
ATNState p = config.state;
|
||||
ATNConfig c = null;
|
||||
if ( t.getClass() == RuleTransition.class ) {
|
||||
RuleContext newContext =
|
||||
new RuleContext(config.context, p.stateNumber, t.target.stateNumber);
|
||||
PredictionContext newContext = config.context.getChild(p.stateNumber);
|
||||
c = new ATNConfig(config, t.target, newContext);
|
||||
}
|
||||
else if ( t.getClass() == PredicateTransition.class ) {
|
||||
|
|
|
@ -134,8 +134,8 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
if ( debug ) System.out.println("ATN decision "+dfa.decision+
|
||||
" exec LA(1)=="+ getLookaheadName(input) +
|
||||
", outerContext="+outerContext.toString(parser));
|
||||
RuleContext ctx = RuleContext.EMPTY;
|
||||
if ( useContext ) ctx = outerContext;
|
||||
PredictionContext ctx = PredictionContext.EMPTY;
|
||||
if ( useContext ) ctx = PredictionContext.fromRuleContext(outerContext);
|
||||
OrderedHashSet<ATNConfig> s0_closure =
|
||||
computeStartState(dfa.decision, dfa.atnStartState, ctx);
|
||||
dfa.s0 = addDFAState(dfa, s0_closure);
|
||||
|
@ -167,7 +167,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
@NotNull ATNState startState)
|
||||
{
|
||||
DFA dfa = new DFA(startState);
|
||||
RuleContext ctx = RuleContext.EMPTY;
|
||||
PredictionContext ctx = PredictionContext.EMPTY;
|
||||
OrderedHashSet<ATNConfig> s0_closure =
|
||||
computeStartState(dfa.decision, startState, ctx);
|
||||
return execATN(input, dfa, input.index(), s0_closure, false);
|
||||
|
@ -593,9 +593,9 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
|
||||
@NotNull
|
||||
public OrderedHashSet<ATNConfig> computeStartState(int decision, @NotNull ATNState p,
|
||||
@Nullable RuleContext ctx)
|
||||
@NotNull PredictionContext ctx)
|
||||
{
|
||||
RuleContext initialContext = ctx; // always at least the implicit call to start rule
|
||||
PredictionContext initialContext = ctx; // always at least the implicit call to start rule
|
||||
OrderedHashSet<ATNConfig> configs = new OrderedHashSet<ATNConfig>();
|
||||
prevAccept = null; // might reach end rule; track
|
||||
prevAcceptIndex = -1;
|
||||
|
@ -659,7 +659,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
if ( config.state instanceof RuleStopState ) {
|
||||
// We hit rule end. If we have context info, use it
|
||||
if ( config.context!=null && !config.context.isEmpty() ) {
|
||||
RuleContext newContext = config.context.parent; // "pop" invoking state
|
||||
PredictionContext newContext = config.context.parent; // "pop" invoking state
|
||||
ATNState invokingState = atn.states.get(config.context.invokingState);
|
||||
RuleTransition rt = (RuleTransition)invokingState.transition(0);
|
||||
ATNState retState = rt.followState;
|
||||
|
@ -678,7 +678,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
if ( debug ) System.out.println("NONGREEDY at stop state of "+
|
||||
getRuleName(config.state.ruleIndex));
|
||||
// don't purse past end of a rule for any nongreedy decision
|
||||
configs.add(config);
|
||||
configs.add(config.getAsCached());
|
||||
return;
|
||||
}
|
||||
if ( debug ) System.out.println("FALLING off rule "+
|
||||
|
@ -688,7 +688,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
|
||||
ATNState p = config.state;
|
||||
// optimization
|
||||
if ( !p.onlyHasEpsilonTransitions() ) configs.add(config);
|
||||
if ( !p.onlyHasEpsilonTransitions() ) configs.add(config.getAsCached());
|
||||
|
||||
for (int i=0; i<p.getNumberOfTransitions(); i++) {
|
||||
Transition t = p.transition(i);
|
||||
|
@ -748,7 +748,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
if ( parser != null ) {
|
||||
System.out.println("context surrounding pred is "+
|
||||
parser.getRuleInvocationStack());
|
||||
System.out.println("config.context="+config.context.toString(parser));
|
||||
System.out.println("config.context="+config.context.toString(parser, config.state.stateNumber));
|
||||
}
|
||||
}
|
||||
// We know the correct context in exactly one spot: in the original
|
||||
|
@ -756,7 +756,7 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
// when the context stack is empty and we've not dipped into
|
||||
// the outer context.
|
||||
boolean inContext =
|
||||
config.context==RuleContext.EMPTY && config.reachesIntoOuterContext==0;
|
||||
config.context==PredictionContext.EMPTY && config.reachesIntoOuterContext==0;
|
||||
// RuleContext ctx = null;
|
||||
// if ( inContext ) ctx = outerContext;
|
||||
|
||||
|
@ -803,8 +803,8 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
", ctx="+config.context);
|
||||
}
|
||||
ATNState p = config.state;
|
||||
RuleContext newContext =
|
||||
new RuleContext(config.context, p.stateNumber, t.target.stateNumber);
|
||||
PredictionContext newContext =
|
||||
config.context.getChild(p.stateNumber);
|
||||
return new ATNConfig(config, t.target, newContext);
|
||||
}
|
||||
|
||||
|
@ -902,8 +902,8 @@ public class ParserATNSimulator<Symbol> extends ATNSimulator {
|
|||
System.out.println("we reach state "+c.state.stateNumber+
|
||||
" in rule "+
|
||||
(parser !=null ? getRuleName(c.state.ruleIndex) :"n/a")+
|
||||
" alts "+goal.alt+","+c.alt+" from ctx "+goal.context.toString(parser)
|
||||
+" and "+ c.context.toString(parser));
|
||||
" alts "+goal.alt+","+c.alt+" from ctx "+goal.context.toString(parser, goal.state.stateNumber)
|
||||
+" and "+ c.context.toString(parser, c.state.stateNumber));
|
||||
}
|
||||
if ( ambigAlts==null ) ambigAlts = new HashSet<Integer>();
|
||||
ambigAlts.add(goal.alt);
|
||||
|
|
|
@ -0,0 +1,240 @@
|
|||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
import org.antlr.v4.runtime.BaseRecognizer;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class PredictionContext {
|
||||
private static final AtomicInteger NEXT_ID = new AtomicInteger(1);
|
||||
|
||||
public static final PredictionContext EMPTY = new PredictionContext();
|
||||
|
||||
private static final boolean DISABLE_CACHE = false;
|
||||
public static final Map<Long, PredictionContext> contextCache = new HashMap<Long, PredictionContext>();
|
||||
|
||||
@Nullable
|
||||
public final PredictionContext parent;
|
||||
|
||||
public final int invokingState;
|
||||
|
||||
private final int id;
|
||||
private final int cachedHashCode;
|
||||
|
||||
private PredictionContext() {
|
||||
this(null, -1, true);
|
||||
}
|
||||
|
||||
private PredictionContext(@Nullable PredictionContext parent, int invokingState, boolean cached) {
|
||||
this.parent = parent;
|
||||
this.invokingState = invokingState;
|
||||
this.id = cached ? NEXT_ID.getAndIncrement() : 0;
|
||||
|
||||
int hashCode = 7;
|
||||
hashCode = 5 * hashCode + (parent != null ? parent.hashCode() : 0);
|
||||
hashCode = 5 * hashCode + invokingState;
|
||||
hashCode = Math.max(1, Math.abs(hashCode));
|
||||
this.cachedHashCode = cached ? -hashCode : hashCode;
|
||||
}
|
||||
|
||||
public static PredictionContext fromRuleContext(@NotNull RuleContext outerContext) {
|
||||
return fromRuleContext(outerContext, false);
|
||||
}
|
||||
|
||||
public static PredictionContext fromRuleContext(@NotNull RuleContext outerContext, boolean cached) {
|
||||
if (outerContext.isEmpty()) {
|
||||
return PredictionContext.EMPTY;
|
||||
}
|
||||
|
||||
PredictionContext parent;
|
||||
if (outerContext.parent != null) {
|
||||
parent = PredictionContext.fromRuleContext(outerContext.parent, cached);
|
||||
} else {
|
||||
parent = PredictionContext.EMPTY;
|
||||
}
|
||||
|
||||
return parent.getChild(outerContext.invokingState, cached);
|
||||
}
|
||||
|
||||
public PredictionContext getAsCached() {
|
||||
if (DISABLE_CACHE || isCached()) {
|
||||
return this;
|
||||
}
|
||||
|
||||
assert parent != null;
|
||||
return parent.getAsCached().getChild(invokingState, true);
|
||||
}
|
||||
|
||||
public final PredictionContext getChild(int invokingState) {
|
||||
return getChild(invokingState, false);
|
||||
}
|
||||
|
||||
public PredictionContext getChild(int invokingState, boolean cached) {
|
||||
if (DISABLE_CACHE || !isCached() || invokingState < 0) {
|
||||
return new PredictionContext(this, invokingState, false);
|
||||
}
|
||||
|
||||
long parent = (long)(this.id) << 32;
|
||||
long key = parent + invokingState;
|
||||
|
||||
synchronized (contextCache) {
|
||||
PredictionContext child = contextCache.get(key);
|
||||
|
||||
if (child == null) {
|
||||
child = new PredictionContext(this, invokingState, cached);
|
||||
if (cached) {
|
||||
contextCache.put(key, child);
|
||||
}
|
||||
}
|
||||
|
||||
return child;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isCached() {
|
||||
return isEmpty() || cachedHashCode < 0;
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return parent == null;
|
||||
}
|
||||
|
||||
/** Two contexts conflict() if they are equals() or one is a stack suffix
|
||||
* of the other. For example, contexts [21 12 $] and [21 9 $] do not
|
||||
* conflict, but [21 $] and [21 12 $] do conflict. Note that I should
|
||||
* probably not show the $ in this case. There is a dummy node for each
|
||||
* stack that just means empty; $ is a marker that's all.
|
||||
*
|
||||
* This is used in relation to checking conflicts associated with a
|
||||
* single NFA state's configurations within a single DFA state.
|
||||
* If there are configurations s and t within a DFA state such that
|
||||
* s.state=t.state && s.alt != t.alt && s.ctx conflicts t.ctx then
|
||||
* the DFA state predicts more than a single alt--it's nondeterministic.
|
||||
* Two contexts conflict if they are the same or if one is a suffix
|
||||
* of the other.
|
||||
*
|
||||
* When comparing contexts, if one context has a stack and the other
|
||||
* does not then they should be considered the same context. The only
|
||||
* way for an NFA state p to have an empty context and a nonempty context
|
||||
* is the case when closure falls off end of rule without a call stack
|
||||
* and re-enters the rule with a context. This resolves the issue I
|
||||
* discussed with Sriram Srinivasan Feb 28, 2005 about not terminating
|
||||
* fast enough upon nondeterminism.
|
||||
*/
|
||||
public boolean conflictsWith(PredictionContext other) {
|
||||
return this.suffix(other); // || this.equals(other);
|
||||
}
|
||||
|
||||
/** [$] suffix any context
|
||||
* [21 $] suffix [21 12 $]
|
||||
* [21 12 $] suffix [21 $]
|
||||
* [21 18 $] suffix [21 18 12 9 $]
|
||||
* [21 18 12 9 $] suffix [21 18 $]
|
||||
* [21 12 $] not suffix [21 9 $]
|
||||
*
|
||||
* Example "[21 $] suffix [21 12 $]" means: rule r invoked current rule
|
||||
* from state 21. Rule s invoked rule r from state 12 which then invoked
|
||||
* current rule also via state 21. While the context prior to state 21
|
||||
* is different, the fact that both contexts emanate from state 21 implies
|
||||
* that they are now going to track perfectly together. Once they
|
||||
* converged on state 21, there is no way they can separate. In other
|
||||
* words, the prior stack state is not consulted when computing where to
|
||||
* go in the closure operation. ?$ and ??$ are considered the same stack.
|
||||
* If ? is popped off then $ and ?$ remain; they are now an empty and
|
||||
* nonempty context comparison. So, if one stack is a suffix of
|
||||
* another, then it will still degenerate to the simple empty stack
|
||||
* comparison case.
|
||||
*/
|
||||
protected boolean suffix(PredictionContext other) {
|
||||
PredictionContext sp = this;
|
||||
// if one of the contexts is empty, it never enters loop and returns true
|
||||
while ( sp.parent!=null && other.parent!=null ) {
|
||||
if ( sp.invokingState != other.invokingState ) {
|
||||
return false;
|
||||
}
|
||||
sp = sp.parent;
|
||||
other = other.parent;
|
||||
}
|
||||
//System.out.println("suffix");
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Math.abs(cachedHashCode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
} else if (!(o instanceof PredictionContext)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PredictionContext other = (PredictionContext)o;
|
||||
if ( this.hashCode() != other.hashCode() ) {
|
||||
return false; // can't be same if hash is different
|
||||
}
|
||||
|
||||
PredictionContext sp = this;
|
||||
while ( sp!=null && other!=null ) {
|
||||
if (sp == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sp.isCached() && other.isCached()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( sp.invokingState != other.invokingState) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sp = sp.parent;
|
||||
other = other.parent;
|
||||
}
|
||||
|
||||
return sp == null && other == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return toString(null, -1);
|
||||
}
|
||||
|
||||
public String toString(BaseRecognizer<?> recognizer, int currentState) {
|
||||
return toString(recognizer, PredictionContext.EMPTY, currentState);
|
||||
}
|
||||
|
||||
public String toString(BaseRecognizer<?> recognizer, PredictionContext stop, int currentState) {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
PredictionContext p = this;
|
||||
int stateNumber = currentState;
|
||||
buf.append("[");
|
||||
while ( p != null && p != stop ) {
|
||||
if ( recognizer!=null ) {
|
||||
ATN atn = recognizer.getATN();
|
||||
ATNState s = atn.states.get(stateNumber);
|
||||
String ruleName = recognizer.getRuleNames()[s.ruleIndex];
|
||||
buf.append(ruleName);
|
||||
if ( p.parent != null ) buf.append(" ");
|
||||
// ATNState invoker = atn.states.get(ctx.invokingState);
|
||||
// RuleTransition rt = (RuleTransition)invoker.transition(0);
|
||||
// buf.append(recog.getRuleNames()[rt.target.ruleIndex]);
|
||||
}
|
||||
else {
|
||||
if ( !p.isEmpty() ) buf.append(p.invokingState);
|
||||
if ( p.parent != null && !p.parent.isEmpty() ) buf.append(" ");
|
||||
}
|
||||
stateNumber = p.invokingState;
|
||||
p = p.parent;
|
||||
}
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
|
@ -58,7 +58,7 @@ public class TreeParserATNFactory extends ParserATNFactory {
|
|||
for (int i=0; i<firstChildStates.size(); i++) {
|
||||
ATNState firstChild = firstChildStates.get(i);
|
||||
LL1Analyzer analyzer = new LL1Analyzer(atn);
|
||||
IntervalSet look = analyzer.LOOK(firstChild, RuleContext.EMPTY);
|
||||
IntervalSet look = analyzer.LOOK(firstChild, PredictionContext.EMPTY);
|
||||
TreePatternAST root = treePatternRootNodes.get(i);
|
||||
|
||||
if ( look.contains(Token.UP) ) {
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.antlr.v4.runtime.RuleContext;
|
|||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.atn.ATNState;
|
||||
import org.antlr.v4.runtime.atn.LL1Analyzer;
|
||||
import org.antlr.v4.runtime.atn.PredictionContext;
|
||||
import org.antlr.v4.runtime.misc.IntervalSet;
|
||||
import org.antlr.v4.tool.ast.GrammarAST;
|
||||
import org.antlr.v4.tool.ast.TreePatternAST;
|
||||
|
@ -80,7 +81,7 @@ public class MatchTree extends RuleElement {
|
|||
boolean isNullable(TreePatternAST rootNode) {
|
||||
ATNState firstChildState = rootNode.downState.transition(0).target;
|
||||
LL1Analyzer analyzer = new LL1Analyzer(firstChildState.atn);
|
||||
IntervalSet look = analyzer.LOOK(firstChildState, RuleContext.EMPTY);
|
||||
IntervalSet look = analyzer.LOOK(firstChildState, PredictionContext.EMPTY);
|
||||
return look.contains(Token.UP);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue