Merge pull request #235 from sharwell/prediction-fixes
Prediction fixes
This commit is contained in:
commit
c9ceb48a3c
|
@ -101,6 +101,10 @@ public abstract class ATNSimulator {
|
|||
|
||||
public abstract void reset();
|
||||
|
||||
public PredictionContextCache getSharedContextCache() {
|
||||
return sharedContextCache;
|
||||
}
|
||||
|
||||
public PredictionContext getCachedContext(PredictionContext context) {
|
||||
if ( sharedContextCache==null ) return context;
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.antlr.v4.runtime.misc.Interval;
|
|||
import org.antlr.v4.runtime.misc.NotNull;
|
||||
import org.antlr.v4.runtime.misc.Nullable;
|
||||
|
||||
import java.io.OutputStream;
|
||||
import java.util.Locale;
|
||||
|
||||
/** "dup" of ParserInterpreter */
|
||||
|
@ -158,7 +157,6 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
mode = Lexer.DEFAULT_MODE;
|
||||
}
|
||||
|
||||
// only called from test code from outside
|
||||
protected int matchATN(@NotNull CharStream input) {
|
||||
ATNState startState = atn.modeToStartState.get(mode);
|
||||
|
||||
|
@ -218,36 +216,13 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
// This optimization makes a lot of sense for loops within DFA.
|
||||
// A character will take us back to an existing DFA state
|
||||
// that already has lots of edges out of it. e.g., .* in comments.
|
||||
ATNConfigSet closure = s.configs;
|
||||
DFAState target = null;
|
||||
if ( s.edges != null && t >= MIN_DFA_EDGE && t <= MAX_DFA_EDGE ) {
|
||||
target = s.edges[t - MIN_DFA_EDGE];
|
||||
if (target == ERROR) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (debug && target != null) {
|
||||
System.out.println("reuse state "+s.stateNumber+
|
||||
" edge to "+target.stateNumber);
|
||||
}
|
||||
DFAState target = getExistingTargetState(s, t);
|
||||
if (target == null) {
|
||||
target = computeTargetState(input, s, t);
|
||||
}
|
||||
|
||||
if (target == null) {
|
||||
ATNConfigSet reach = new OrderedATNConfigSet();
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
getReachableConfigSet(input, closure, reach, t);
|
||||
|
||||
if ( reach.isEmpty() ) { // we got nowhere on t from s
|
||||
// we got nowhere on t, don't throw out this knowledge; it'd
|
||||
// cause a failover from DFA later.
|
||||
addDFAEdge(s, t, ERROR);
|
||||
break; // stop when we can't match any more char
|
||||
}
|
||||
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
target = addDFAEdge(s, t, reach);
|
||||
if (target == ERROR) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (target.isAcceptState) {
|
||||
|
@ -268,6 +243,64 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
return failOrAccept(prevAccept, input, s.configs, t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an existing target state for an edge in the DFA. If the target state
|
||||
* for the edge has not yet been computed or is otherwise not available,
|
||||
* this method returns {@code null}.
|
||||
*
|
||||
* @param s The current DFA state
|
||||
* @param t The next input symbol
|
||||
* @return The existing target DFA state for the given input symbol
|
||||
* {@code t}, or {@code null} if the target state for this edge is not
|
||||
* already cached
|
||||
*/
|
||||
@Nullable
|
||||
protected DFAState getExistingTargetState(@NotNull DFAState s, int t) {
|
||||
if (s.edges == null || t < MIN_DFA_EDGE || t > MAX_DFA_EDGE) {
|
||||
return null;
|
||||
}
|
||||
|
||||
DFAState target = s.edges[t - MIN_DFA_EDGE];
|
||||
if (debug && target != null) {
|
||||
System.out.println("reuse state "+s.stateNumber+
|
||||
" edge to "+target.stateNumber);
|
||||
}
|
||||
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute a target state for an edge in the DFA, and attempt to add the
|
||||
* computed state and corresponding edge to the DFA.
|
||||
*
|
||||
* @param input The input stream
|
||||
* @param s The current DFA state
|
||||
* @param t The next input symbol
|
||||
*
|
||||
* @return The computed target DFA state for the given input symbol
|
||||
* {@code t}. If {@code t} does not lead to a valid DFA state, this method
|
||||
* returns {@link #ERROR}.
|
||||
*/
|
||||
@NotNull
|
||||
protected DFAState computeTargetState(@NotNull CharStream input, @NotNull DFAState s, int t) {
|
||||
ATNConfigSet reach = new OrderedATNConfigSet();
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
getReachableConfigSet(input, s.configs, reach, t);
|
||||
|
||||
if ( reach.isEmpty() ) { // we got nowhere on t from s
|
||||
// we got nowhere on t, don't throw out this knowledge; it'd
|
||||
// cause a failover from DFA later.
|
||||
addDFAEdge(s, t, ERROR);
|
||||
// stop when we can't match any more char
|
||||
return ERROR;
|
||||
}
|
||||
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
return addDFAEdge(s, t, reach);
|
||||
}
|
||||
|
||||
protected int failOrAccept(SimState prevAccept, CharStream input,
|
||||
ATNConfigSet reach, int t)
|
||||
{
|
||||
|
@ -341,7 +374,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public ATNState getReachableTarget(Transition trans, int t) {
|
||||
protected ATNState getReachableTarget(Transition trans, int t) {
|
||||
if (trans.matches(t, Lexer.MIN_CHAR_VALUE, Lexer.MAX_CHAR_VALUE)) {
|
||||
return trans.target;
|
||||
}
|
||||
|
@ -444,7 +477,7 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
|
||||
// side-effect: can alter configs.hasSemanticContext
|
||||
@Nullable
|
||||
public LexerATNConfig getEpsilonTarget(@NotNull CharStream input,
|
||||
protected LexerATNConfig getEpsilonTarget(@NotNull CharStream input,
|
||||
@NotNull LexerATNConfig config,
|
||||
@NotNull Transition t,
|
||||
@NotNull ATNConfigSet configs,
|
||||
|
@ -650,8 +683,8 @@ public class LexerATNSimulator extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public DFA getDFA(int mode) {
|
||||
@NotNull
|
||||
public final DFA getDFA(int mode) {
|
||||
return decisionToDFA[mode];
|
||||
}
|
||||
|
||||
|
|
|
@ -1,175 +0,0 @@
|
|||
/*
|
||||
* [The "BSD license"]
|
||||
* Copyright (c) 2012 Terence Parr
|
||||
* Copyright (c) 2012 Sam Harwell
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.antlr.v4.runtime.atn;
|
||||
|
||||
public class ParserATNPathFinder /*extends ParserATNSimulator*/ {
|
||||
|
||||
// public ParserATNPathFinder(@Nullable Parser parser, @NotNull ATN atn, @NotNull DFA[] decisionToDFA) {
|
||||
// super(parser, atn, decisionToDFA);
|
||||
// }
|
||||
//
|
||||
// /** Given an input sequence, as a subset of the input stream, trace the path through the
|
||||
// * ATN starting at s. The path returned includes s and the final target of the last input
|
||||
// * symbol. If there are multiple paths through the ATN to the final state, it uses the first
|
||||
// * method finds. This is used to figure out how input sequence is matched in more than one
|
||||
// * way between the alternatives of a decision. It's only that decision we are concerned with
|
||||
// * and so if there are ambiguous decisions further along, we will ignore them for the
|
||||
// * purposes of computing the path to the final state. To figure out multiple paths for
|
||||
// * decision, use this method on the left edge of the alternatives of the decision in question.
|
||||
// *
|
||||
// * TODO: I haven't figured out what to do with nongreedy decisions yet
|
||||
// * TODO: preds. unless i create rule specific ctxs, i can't eval preds. also must eval args!
|
||||
// */
|
||||
// public TraceTree trace(@NotNull ATNState s, @Nullable RuleContext ctx,
|
||||
// TokenStream input, int start, int stop)
|
||||
// {
|
||||
// System.out.println("REACHES "+s.stateNumber+" start state");
|
||||
// List<TraceTree> leaves = new ArrayList<TraceTree>();
|
||||
// HashSet<ATNState>[] busy = new HashSet[stop-start+1];
|
||||
// for (int i = 0; i < busy.length; i++) {
|
||||
// busy[i] = new HashSet<ATNState>();
|
||||
// }
|
||||
// TraceTree path = _trace(s, ctx, ctx, input, start, start, stop, leaves, busy);
|
||||
// if ( path!=null ) path.leaves = leaves;
|
||||
// return path;
|
||||
// }
|
||||
//
|
||||
// /** Returns true if we found path */
|
||||
// public TraceTree _trace(@NotNull ATNState s, RuleContext initialContext, RuleContext ctx,
|
||||
// TokenStream input, int start, int i, int stop,
|
||||
// List<TraceTree> leaves, @NotNull Set<ATNState>[] busy)
|
||||
// {
|
||||
// TraceTree root = new TraceTree(s);
|
||||
// if ( i>stop ) {
|
||||
// leaves.add(root); // track final states
|
||||
// System.out.println("leaves=" + leaves);
|
||||
// return root;
|
||||
// }
|
||||
//
|
||||
// if ( !busy[i-start].add(s) ) {
|
||||
// System.out.println("already visited "+s.stateNumber+" at input "+i+"="+input.get(i).getText());
|
||||
// return null;
|
||||
// }
|
||||
// busy[i-start].add(s);
|
||||
//
|
||||
// System.out.println("TRACE "+s.stateNumber+" at input "+input.get(i).getText());
|
||||
//
|
||||
// if ( s instanceof RuleStopState) {
|
||||
// // We hit rule end. If we have context info, use it
|
||||
// if ( ctx!=null && !ctx.isEmpty() ) {
|
||||
// System.out.println("stop state "+s.stateNumber+", ctx="+ctx);
|
||||
// ATNState invokingState = atn.states.get(ctx.invokingState);
|
||||
// RuleTransition rt = (RuleTransition)invokingState.transition(0);
|
||||
// ATNState retState = rt.followState;
|
||||
// return _trace(retState, initialContext, ctx.parent, input, start, i, stop, leaves, busy);
|
||||
// }
|
||||
// else {
|
||||
// // else if we have no context info, just chase follow links (if greedy)
|
||||
// System.out.println("FALLING off rule "+getRuleName(s.ruleIndex));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// int n = s.getNumberOfTransitions();
|
||||
// boolean aGoodPath = false;
|
||||
// TraceTree found;
|
||||
// for (int j=0; j<n; j++) {
|
||||
// Transition t = s.transition(j);
|
||||
// if ( t.getClass() == RuleTransition.class ) {
|
||||
// RuleContext newContext =
|
||||
// new RuleContext(ctx, s.stateNumber);
|
||||
// found = _trace(t.target, initialContext, newContext, input, start, i, stop, leaves, busy);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// continue;
|
||||
// }
|
||||
// if ( t instanceof PredicateTransition ) {
|
||||
// found = predTransition(initialContext, ctx, input, start, i, stop, leaves, busy, root, t);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// continue;
|
||||
// }
|
||||
// if ( t.isEpsilon() ) {
|
||||
// found = _trace(t.target, initialContext, ctx, input, start, i, stop, leaves, busy);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// continue;
|
||||
// }
|
||||
// if ( t.getClass() == WildcardTransition.class ) {
|
||||
// System.out.println("REACHES " + t.target.stateNumber + " matching input " + input.get(i).getText());
|
||||
// found = _trace(t.target, initialContext, ctx, input, start, i+1, stop, leaves, busy);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// continue;
|
||||
// }
|
||||
// IntervalSet set = t.label();
|
||||
// if ( set!=null ) {
|
||||
// if ( t instanceof NotSetTransition ) {
|
||||
// if ( !set.contains(input.get(i).getType()) ) {
|
||||
// System.out.println("REACHES " + t.target.stateNumber + " matching input " + input.get(i).getText());
|
||||
// found = _trace(t.target, initialContext, ctx, input, start, i+1, stop, leaves, busy);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// }
|
||||
// }
|
||||
// else {
|
||||
// if ( set.contains(input.get(i).getType()) ) {
|
||||
// System.out.println("REACHES " + t.target.stateNumber + " matching input " + input.get(i).getText());
|
||||
// found = _trace(t.target, initialContext, ctx, input, start, i+1, stop, leaves, busy);
|
||||
// if ( found!=null ) {aGoodPath=true; root.addChild(found);}
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if ( aGoodPath ) return root; // found at least one transition leading to success
|
||||
// return null;
|
||||
// }
|
||||
//
|
||||
// public TraceTree predTransition(RuleContext initialContext, RuleContext ctx, TokenStream input, int start,
|
||||
// int i, int stop, List<TraceTree> leaves, Set<ATNState>[] busy,
|
||||
// TraceTree root, Transition t)
|
||||
// {
|
||||
// SemanticContext.Predicate pred = ((PredicateTransition) t).getPredicate();
|
||||
// boolean pass;
|
||||
// if ( pred.isCtxDependent ) {
|
||||
// if ( ctx instanceof ParserRuleContext && ctx==initialContext ) {
|
||||
// System.out.println("eval pred "+pred+"="+pred.eval(parser, ctx));
|
||||
// pass = pred.eval(parser, ctx);
|
||||
// }
|
||||
// else {
|
||||
// pass = true; // see thru ctx dependent when out of context
|
||||
// }
|
||||
// }
|
||||
// else {
|
||||
// System.out.println("eval pred "+pred+"="+pred.eval(parser, initialContext));
|
||||
// pass = pred.eval(parser, ctx);
|
||||
// }
|
||||
// if ( pass ) {
|
||||
// return _trace(t.target, initialContext, ctx, input, start, i, stop, leaves, busy);
|
||||
// }
|
||||
// return null;
|
||||
// }
|
||||
|
||||
}
|
|
@ -269,7 +269,8 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
public final DFA[] decisionToDFA;
|
||||
|
||||
/** SLL, LL, or LL + exact ambig detection? */
|
||||
protected PredictionMode mode = PredictionMode.LL;
|
||||
@NotNull
|
||||
private PredictionMode mode = PredictionMode.LL;
|
||||
|
||||
/** Each prediction operation uses a cache for merge of prediction contexts.
|
||||
* Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
|
||||
|
@ -348,7 +349,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
public int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
protected int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext outerContext)
|
||||
{
|
||||
if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY;
|
||||
|
@ -412,7 +413,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
conflict
|
||||
conflict + preds
|
||||
*/
|
||||
public int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
protected int execATN(@NotNull DFA dfa, @NotNull DFAState s0,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext outerContext)
|
||||
{
|
||||
|
@ -429,131 +430,31 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
int t = input.LA(1);
|
||||
|
||||
while (true) { // while more work
|
||||
DFAState D = null;
|
||||
if (previousD.edges != null && t + 1 >= 0 && t + 1 < previousD.edges.length) {
|
||||
D = previousD.edges[t + 1];
|
||||
if ( D == ERROR ) {
|
||||
throw noViableAlt(input, outerContext, previousD.configs, startIndex);
|
||||
}
|
||||
}
|
||||
|
||||
DFAState D = getExistingTargetState(previousD, t);
|
||||
if (D == null) {
|
||||
// System.out.println("REACH "+getLookaheadName(input));
|
||||
ATNConfigSet reach = computeReachSet(previousD.configs, t, false);
|
||||
if ( reach==null ) {
|
||||
// if any configs in previous dipped into outer context, that
|
||||
// means that input up to t actually finished entry rule
|
||||
// at least for SLL decision. Full LL doesn't dip into outer
|
||||
// so don't need special case.
|
||||
// We will get an error no matter what so delay until after
|
||||
// decision; better error message. Also, no reachable target
|
||||
// ATN states in SLL implies LL will also get nowhere.
|
||||
// If conflict in states that dip out, choose min since we
|
||||
// will get error no matter what.
|
||||
int alt = getAltThatFinishedDecisionEntryRule(previousD.configs);
|
||||
if ( alt!=ATN.INVALID_ALT_NUMBER ) {
|
||||
// return w/o altering DFA
|
||||
return alt;
|
||||
}
|
||||
|
||||
addDFAEdge(dfa, previousD, t, ERROR);
|
||||
throw noViableAlt(input, outerContext, previousD.configs, startIndex);
|
||||
}
|
||||
|
||||
// create new target state; we'll add to DFA after it's complete
|
||||
D = new DFAState(reach);
|
||||
|
||||
int predictedAlt = getUniqueAlt(reach);
|
||||
|
||||
if ( debug ) {
|
||||
Collection<BitSet> altSubSets = PredictionMode.getConflictingAltSubsets(reach);
|
||||
System.out.println("SLL altSubSets="+altSubSets+
|
||||
", configs="+reach+
|
||||
", predict="+predictedAlt+", allSubsetsConflict="+
|
||||
PredictionMode.allSubsetsConflict(altSubSets)+", conflictingAlts="+
|
||||
getConflictingAlts(reach));
|
||||
}
|
||||
|
||||
if ( predictedAlt!=ATN.INVALID_ALT_NUMBER ) {
|
||||
// NO CONFLICT, UNIQUELY PREDICTED ALT
|
||||
D.isAcceptState = true;
|
||||
D.configs.uniqueAlt = predictedAlt;
|
||||
D.prediction = predictedAlt;
|
||||
}
|
||||
else if ( PredictionMode.hasSLLConflictTerminatingPrediction(mode, reach) ) {
|
||||
// MORE THAN ONE VIABLE ALTERNATIVE
|
||||
D.configs.conflictingAlts = getConflictingAlts(reach);
|
||||
if ( mode == PredictionMode.SLL ) {
|
||||
// stop w/o failover for sure
|
||||
if ( outerContext == ParserRuleContext.EMPTY || // in grammar start rule
|
||||
!D.configs.dipsIntoOuterContext ) // didn't fall out of rule
|
||||
{
|
||||
// SPECIAL CASE WHERE SLL KNOWS CONFLICT IS AMBIGUITY
|
||||
// report even if preds
|
||||
reportAmbiguity(dfa, D, startIndex, input.index(),
|
||||
D.configs.conflictingAlts, D.configs);
|
||||
}
|
||||
// always stop at D
|
||||
D.isAcceptState = true;
|
||||
D.prediction = D.configs.conflictingAlts.nextSetBit(0);
|
||||
if ( debug ) System.out.println("SLL RESOLVED TO "+D.prediction+" for "+D);
|
||||
predictedAlt = D.prediction;
|
||||
// Falls through to check predicates below
|
||||
}
|
||||
else {
|
||||
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||
if ( D.configs.hasSemanticContext ) {
|
||||
predicateDFAState(D, atn.getDecisionState(dfa.decision));
|
||||
if (D.predicates != null) {
|
||||
int conflictIndex = input.index();
|
||||
if (conflictIndex != startIndex) {
|
||||
input.seek(startIndex);
|
||||
}
|
||||
|
||||
BitSet alts = evalSemanticContext(D.predicates, outerContext, true);
|
||||
if ( alts.cardinality()==1 ) {
|
||||
if ( debug ) System.out.println("Full LL avoided");
|
||||
return alts.nextSetBit(0);
|
||||
}
|
||||
|
||||
if (conflictIndex != startIndex) {
|
||||
// restore the index so reporting the fallback to full
|
||||
// context occurs with the index at the correct spot
|
||||
input.seek(conflictIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RETRY WITH FULL LL CONTEXT
|
||||
if ( debug ) System.out.println("RETRY with outerContext="+outerContext);
|
||||
ATNConfigSet s0_closure =
|
||||
computeStartState(dfa.atnStartState,
|
||||
outerContext,
|
||||
true);
|
||||
|
||||
// not accept state: isCtxSensitive
|
||||
D.requiresFullContext = true; // always force DFA to ATN simulate
|
||||
D.prediction = ATN.INVALID_ALT_NUMBER;
|
||||
D = addDFAEdge(dfa, previousD, t, D);
|
||||
|
||||
return execATNWithFullContext(dfa, D, s0_closure,
|
||||
input, startIndex,
|
||||
outerContext,
|
||||
D.configs.conflictingAlts.nextSetBit(0));
|
||||
}
|
||||
}
|
||||
|
||||
if ( D.isAcceptState && D.configs.hasSemanticContext ) {
|
||||
predicateDFAState(D, atn.getDecisionState(dfa.decision));
|
||||
if (D.predicates != null) {
|
||||
D.prediction = ATN.INVALID_ALT_NUMBER;
|
||||
}
|
||||
}
|
||||
|
||||
// all adds to dfa are done after we've created full D state
|
||||
D = addDFAEdge(dfa, previousD, t, D);
|
||||
D = computeTargetState(dfa, previousD, t);
|
||||
}
|
||||
else if ( D.requiresFullContext && mode != PredictionMode.SLL ) {
|
||||
|
||||
if (D == ERROR) {
|
||||
// if any configs in previous dipped into outer context, that
|
||||
// means that input up to t actually finished entry rule
|
||||
// at least for SLL decision. Full LL doesn't dip into outer
|
||||
// so don't need special case.
|
||||
// We will get an error no matter what so delay until after
|
||||
// decision; better error message. Also, no reachable target
|
||||
// ATN states in SLL implies LL will also get nowhere.
|
||||
// If conflict in states that dip out, choose min since we
|
||||
// will get error no matter what.
|
||||
int alt = getAltThatFinishedDecisionEntryRule(previousD.configs);
|
||||
if ( alt!=ATN.INVALID_ALT_NUMBER ) {
|
||||
// return w/o altering DFA
|
||||
return alt;
|
||||
}
|
||||
|
||||
throw noViableAlt(input, outerContext, previousD.configs, startIndex);
|
||||
}
|
||||
|
||||
if ( D.requiresFullContext && mode != PredictionMode.SLL ) {
|
||||
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||
if ( D.predicates!=null ) {
|
||||
if ( debug ) System.out.println("DFA state has preds in DFA sim LL failover");
|
||||
|
@ -619,6 +520,88 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an existing target state for an edge in the DFA. If the target state
|
||||
* for the edge has not yet been computed or is otherwise not available,
|
||||
* this method returns {@code null}.
|
||||
*
|
||||
* @param previousD The current DFA state
|
||||
* @param t The next input symbol
|
||||
* @return The existing target DFA state for the given input symbol
|
||||
* {@code t}, or {@code null} if the target state for this edge is not
|
||||
* already cached
|
||||
*/
|
||||
@Nullable
|
||||
protected DFAState getExistingTargetState(@NotNull DFAState previousD, int t) {
|
||||
DFAState[] edges = previousD.edges;
|
||||
if (edges == null || t + 1 < 0 || t + 1 >= edges.length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return edges[t + 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute a target state for an edge in the DFA, and attempt to add the
|
||||
* computed state and corresponding edge to the DFA.
|
||||
*
|
||||
* @param dfa The DFA
|
||||
* @param previousD The current DFA state
|
||||
* @param t The next input symbol
|
||||
*
|
||||
* @return The computed target DFA state for the given input symbol
|
||||
* {@code t}. If {@code t} does not lead to a valid DFA state, this method
|
||||
* returns {@link #ERROR}.
|
||||
*/
|
||||
@NotNull
|
||||
protected DFAState computeTargetState(@NotNull DFA dfa, @NotNull DFAState previousD, int t) {
|
||||
ATNConfigSet reach = computeReachSet(previousD.configs, t, false);
|
||||
if ( reach==null ) {
|
||||
addDFAEdge(dfa, previousD, t, ERROR);
|
||||
return ERROR;
|
||||
}
|
||||
|
||||
// create new target state; we'll add to DFA after it's complete
|
||||
DFAState D = new DFAState(reach);
|
||||
|
||||
int predictedAlt = getUniqueAlt(reach);
|
||||
|
||||
if ( debug ) {
|
||||
Collection<BitSet> altSubSets = PredictionMode.getConflictingAltSubsets(reach);
|
||||
System.out.println("SLL altSubSets="+altSubSets+
|
||||
", configs="+reach+
|
||||
", predict="+predictedAlt+", allSubsetsConflict="+
|
||||
PredictionMode.allSubsetsConflict(altSubSets)+", conflictingAlts="+
|
||||
getConflictingAlts(reach));
|
||||
}
|
||||
|
||||
if ( predictedAlt!=ATN.INVALID_ALT_NUMBER ) {
|
||||
// NO CONFLICT, UNIQUELY PREDICTED ALT
|
||||
D.isAcceptState = true;
|
||||
D.configs.uniqueAlt = predictedAlt;
|
||||
D.prediction = predictedAlt;
|
||||
}
|
||||
else if ( PredictionMode.hasSLLConflictTerminatingPrediction(mode, reach) ) {
|
||||
// MORE THAN ONE VIABLE ALTERNATIVE
|
||||
D.configs.conflictingAlts = getConflictingAlts(reach);
|
||||
D.requiresFullContext = true;
|
||||
// in SLL-only mode, we will stop at this state and return the minimum alt
|
||||
D.isAcceptState = true;
|
||||
D.prediction = D.configs.conflictingAlts.nextSetBit(0);
|
||||
}
|
||||
|
||||
if ( D.isAcceptState && D.configs.hasSemanticContext ) {
|
||||
predicateDFAState(D, atn.getDecisionState(dfa.decision));
|
||||
if (D.predicates != null) {
|
||||
D.prediction = ATN.INVALID_ALT_NUMBER;
|
||||
}
|
||||
}
|
||||
|
||||
// all adds to dfa are done after we've created full D state
|
||||
D = addDFAEdge(dfa, previousD, t, D);
|
||||
return D;
|
||||
}
|
||||
|
||||
protected void predicateDFAState(DFAState dfaState, DecisionState decisionState) {
|
||||
// We need to test all predicates, even in DFA states that
|
||||
// uniquely predict alternative.
|
||||
|
@ -640,12 +623,12 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
// comes back with reach.uniqueAlt set to a valid alt
|
||||
public int execATNWithFullContext(DFA dfa,
|
||||
DFAState D, // how far we got before failing over
|
||||
@NotNull ATNConfigSet s0,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext outerContext,
|
||||
int SLL_min_alt) // todo: is this in D as min ambig alts?
|
||||
protected int execATNWithFullContext(DFA dfa,
|
||||
DFAState D, // how far we got before failing over
|
||||
@NotNull ATNConfigSet s0,
|
||||
@NotNull TokenStream input, int startIndex,
|
||||
ParserRuleContext outerContext,
|
||||
int SLL_min_alt) // todo: is this in D as min ambig alts?
|
||||
{
|
||||
retry_with_context++;
|
||||
reportAttemptingFullContext(dfa, s0, startIndex, input.index());
|
||||
|
@ -946,7 +929,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public ATNConfigSet computeStartState(@NotNull ATNState p,
|
||||
protected ATNConfigSet computeStartState(@NotNull ATNState p,
|
||||
@Nullable RuleContext ctx,
|
||||
boolean fullCtx)
|
||||
{
|
||||
|
@ -965,7 +948,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public ATNState getReachableTarget(@NotNull Transition trans, int ttype) {
|
||||
protected ATNState getReachableTarget(@NotNull Transition trans, int ttype) {
|
||||
if (trans.matches(ttype, 0, atn.maxTokenType)) {
|
||||
return trans.target;
|
||||
}
|
||||
|
@ -973,7 +956,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return null;
|
||||
}
|
||||
|
||||
public SemanticContext[] getPredsForAmbigAlts(@NotNull BitSet ambigAlts,
|
||||
protected SemanticContext[] getPredsForAmbigAlts(@NotNull BitSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs,
|
||||
int nalts)
|
||||
{
|
||||
|
@ -1017,7 +1000,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return altToPred;
|
||||
}
|
||||
|
||||
public DFAState.PredPrediction[] getPredicatePredictions(BitSet ambigAlts,
|
||||
protected DFAState.PredPrediction[] getPredicatePredictions(BitSet ambigAlts,
|
||||
SemanticContext[] altToPred)
|
||||
{
|
||||
List<DFAState.PredPrediction> pairs = new ArrayList<DFAState.PredPrediction>();
|
||||
|
@ -1042,7 +1025,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return pairs.toArray(new DFAState.PredPrediction[pairs.size()]);
|
||||
}
|
||||
|
||||
public int getAltThatFinishedDecisionEntryRule(ATNConfigSet configs) {
|
||||
protected int getAltThatFinishedDecisionEntryRule(ATNConfigSet configs) {
|
||||
IntervalSet alts = new IntervalSet();
|
||||
for (ATNConfig c : configs) {
|
||||
if ( c.reachesIntoOuterContext>0 || (c.state instanceof RuleStopState && c.context.hasEmptyPath()) ) {
|
||||
|
@ -1059,7 +1042,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
* then we stop at the first predicate that evaluates to true. This
|
||||
* includes pairs with null predicates.
|
||||
*/
|
||||
public BitSet evalSemanticContext(@NotNull DFAState.PredPrediction[] predPredictions,
|
||||
protected BitSet evalSemanticContext(@NotNull DFAState.PredPrediction[] predPredictions,
|
||||
ParserRuleContext outerContext,
|
||||
boolean complete)
|
||||
{
|
||||
|
@ -1227,7 +1210,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public ATNConfig getEpsilonTarget(@NotNull ATNConfig config,
|
||||
protected ATNConfig getEpsilonTarget(@NotNull ATNConfig config,
|
||||
@NotNull Transition t,
|
||||
boolean collectPredicates,
|
||||
boolean inContext,
|
||||
|
@ -1255,13 +1238,13 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public ATNConfig actionTransition(@NotNull ATNConfig config, @NotNull ActionTransition t) {
|
||||
protected ATNConfig actionTransition(@NotNull ATNConfig config, @NotNull ActionTransition t) {
|
||||
if ( debug ) System.out.println("ACTION edge "+t.ruleIndex+":"+t.actionIndex);
|
||||
return new ATNConfig(config, t.target);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public ATNConfig predTransition(@NotNull ATNConfig config,
|
||||
protected ATNConfig predTransition(@NotNull ATNConfig config,
|
||||
@NotNull PredicateTransition pt,
|
||||
boolean collectPredicates,
|
||||
boolean inContext,
|
||||
|
@ -1309,7 +1292,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public ATNConfig ruleTransition(@NotNull ATNConfig config, @NotNull RuleTransition t) {
|
||||
protected ATNConfig ruleTransition(@NotNull ATNConfig config, @NotNull RuleTransition t) {
|
||||
if ( debug ) {
|
||||
System.out.println("CALL rule "+getRuleName(t.target.ruleIndex)+
|
||||
", ctx="+config.context);
|
||||
|
@ -1321,7 +1304,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return new ATNConfig(config, t.target, newContext);
|
||||
}
|
||||
|
||||
public BitSet getConflictingAlts(ATNConfigSet configs) {
|
||||
protected BitSet getConflictingAlts(ATNConfigSet configs) {
|
||||
Collection<BitSet> altsets = PredictionMode.getConflictingAltSubsets(configs);
|
||||
return PredictionMode.getAlts(altsets);
|
||||
}
|
||||
|
@ -1416,7 +1399,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
public NoViableAltException noViableAlt(@NotNull TokenStream input,
|
||||
protected NoViableAltException noViableAlt(@NotNull TokenStream input,
|
||||
@NotNull ParserRuleContext outerContext,
|
||||
@NotNull ATNConfigSet configs,
|
||||
int startIndex)
|
||||
|
@ -1427,7 +1410,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
configs, outerContext);
|
||||
}
|
||||
|
||||
public static int getUniqueAlt(@NotNull ATNConfigSet configs) {
|
||||
protected static int getUniqueAlt(@NotNull ATNConfigSet configs) {
|
||||
int alt = ATN.INVALID_ALT_NUMBER;
|
||||
for (ATNConfig c : configs) {
|
||||
if ( alt == ATN.INVALID_ALT_NUMBER ) {
|
||||
|
@ -1440,25 +1423,75 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
return alt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an edge to the DFA, if possible. This method calls
|
||||
* {@link #addDFAState} to ensure the {@code to} state is present in the
|
||||
* DFA. If {@code from} is {@code null}, or if {@code t} is outside the
|
||||
* range of edges that can be represented in the DFA tables, this method
|
||||
* returns without adding the edge to the DFA.
|
||||
* <p/>
|
||||
* If {@code to} is {@code null}, this method returns {@code null}.
|
||||
* Otherwise, this method returns the {@link DFAState} returned by calling
|
||||
* {@link #addDFAState} for the {@code to} state.
|
||||
*
|
||||
* @param dfa The DFA
|
||||
* @param from The source state for the edge
|
||||
* @param t The input symbol
|
||||
* @param to The target state for the edge
|
||||
*
|
||||
* @return If {@code to} is {@code null}, this method returns {@code null};
|
||||
* otherwise this method returns the result of calling {@link #addDFAState}
|
||||
* on {@code to}
|
||||
*/
|
||||
protected DFAState addDFAEdge(@NotNull DFA dfa,
|
||||
@Nullable DFAState from,
|
||||
int t,
|
||||
@Nullable DFAState to)
|
||||
{
|
||||
if ( debug ) System.out.println("EDGE "+from+" -> "+to+" upon "+getTokenName(t));
|
||||
if ( from==null || t < -1 || to == null ) return to;
|
||||
if ( debug ) {
|
||||
System.out.println("EDGE "+from+" -> "+to+" upon "+getTokenName(t));
|
||||
}
|
||||
|
||||
if (to == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
to = addDFAState(dfa, to); // used existing if possible not incoming
|
||||
if (from == null || t < -1 || t > atn.maxTokenType) {
|
||||
return to;
|
||||
}
|
||||
|
||||
synchronized (from) {
|
||||
if ( from.edges==null ) {
|
||||
from.edges = new DFAState[atn.maxTokenType+1+1]; // TODO: make adaptive
|
||||
from.edges = new DFAState[atn.maxTokenType+1+1];
|
||||
}
|
||||
|
||||
from.edges[t+1] = to; // connect
|
||||
}
|
||||
if ( debug ) System.out.println("DFA=\n"+dfa.toString(parser!=null?parser.getTokenNames():null));
|
||||
|
||||
if ( debug ) {
|
||||
System.out.println("DFA=\n"+dfa.toString(parser!=null?parser.getTokenNames():null));
|
||||
}
|
||||
|
||||
return to;
|
||||
}
|
||||
|
||||
/** Add D if not there and return D. Return previous if already present. */
|
||||
/**
|
||||
* Add state {@code D} to the DFA if it is not already present, and return
|
||||
* the actual instance stored in the DFA. If a state equivalent to {@code D}
|
||||
* is already in the DFA, the existing state is returned. Otherwise this
|
||||
* method returns {@code D} after adding it to the DFA.
|
||||
* <p/>
|
||||
* If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
|
||||
* does not change the DFA.
|
||||
*
|
||||
* @param dfa The dfa
|
||||
* @param D The DFA state to add
|
||||
* @return The state stored in the DFA. This will be either the existing
|
||||
* state if {@code D} is already in the DFA, or {@code D} itself if the
|
||||
* state was not already present.
|
||||
*/
|
||||
@NotNull
|
||||
protected DFAState addDFAState(@NotNull DFA dfa, @NotNull DFAState D) {
|
||||
if (D == ERROR) {
|
||||
return D;
|
||||
|
@ -1479,7 +1512,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
}
|
||||
|
||||
public void reportAttemptingFullContext(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
protected void reportAttemptingFullContext(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
if ( debug || retry_debug ) {
|
||||
Interval interval = Interval.of(startIndex, stopIndex);
|
||||
System.out.println("reportAttemptingFullContext decision="+dfa.decision+":"+configs+
|
||||
|
@ -1488,7 +1521,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
if ( parser!=null ) parser.getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, configs);
|
||||
}
|
||||
|
||||
public void reportContextSensitivity(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
protected void reportContextSensitivity(DFA dfa, ATNConfigSet configs, int startIndex, int stopIndex) {
|
||||
if ( debug || retry_debug ) {
|
||||
Interval interval = Interval.of(startIndex, stopIndex);
|
||||
System.out.println("reportContextSensitivity decision="+dfa.decision+":"+configs+
|
||||
|
@ -1498,7 +1531,7 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
}
|
||||
|
||||
/** If context sensitive parsing, we know it's ambiguity not conflict */
|
||||
public void reportAmbiguity(@NotNull DFA dfa, DFAState D, int startIndex, int stopIndex,
|
||||
protected void reportAmbiguity(@NotNull DFA dfa, DFAState D, int startIndex, int stopIndex,
|
||||
@NotNull BitSet ambigAlts,
|
||||
@NotNull ATNConfigSet configs)
|
||||
{
|
||||
|
@ -1528,9 +1561,12 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
ambigAlts, configs);
|
||||
}
|
||||
|
||||
public void setPredictionMode(PredictionMode mode) {
|
||||
public final void setPredictionMode(@NotNull PredictionMode mode) {
|
||||
this.mode = mode;
|
||||
}
|
||||
|
||||
public PredictionMode getPredictionMode() { return mode; }
|
||||
@NotNull
|
||||
public final PredictionMode getPredictionMode() {
|
||||
return mode;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,29 +91,6 @@ public class DFA {
|
|||
return result;
|
||||
}
|
||||
|
||||
public List<Set<ATNState>> getATNStatesAlongPath(ParserATNSimulator atn,
|
||||
List<DFAState> dfaStates,
|
||||
TokenStream input, int start, int stop)
|
||||
{
|
||||
List<Set<ATNState>> atnStates = new ArrayList<Set<ATNState>>();
|
||||
int i = start;
|
||||
for (DFAState D : dfaStates) {
|
||||
Set<ATNState> fullSet = D.configs.getStates();
|
||||
Set<ATNState> statesInvolved = new HashSet<ATNState>();
|
||||
for (ATNState astate : fullSet) {
|
||||
Transition t = astate.transition(0);
|
||||
ATNState target = atn.getReachableTarget(t, input.get(i).getType());
|
||||
if ( target!=null ) {
|
||||
statesInvolved.add(astate);
|
||||
}
|
||||
}
|
||||
System.out.println("statesInvolved upon "+input.get(i).getText()+"="+statesInvolved);
|
||||
i++;
|
||||
atnStates.add(statesInvolved);
|
||||
}
|
||||
return atnStates;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() { return toString(null); }
|
||||
|
||||
|
|
|
@ -79,20 +79,20 @@ public class DFASerializer {
|
|||
return label;
|
||||
}
|
||||
|
||||
String getStateString(DFAState s) {
|
||||
@NotNull
|
||||
protected String getStateString(@NotNull DFAState s) {
|
||||
int n = s.stateNumber;
|
||||
String stateStr = "s"+n;
|
||||
final String baseStateStr = (s.isAcceptState ? ":" : "") + "s" + n + (s.requiresFullContext ? "^" : "");
|
||||
if ( s.isAcceptState ) {
|
||||
if ( s.predicates!=null ) {
|
||||
stateStr = ":s"+n+"=>"+Arrays.toString(s.predicates);
|
||||
return baseStateStr + "=>" + Arrays.toString(s.predicates);
|
||||
}
|
||||
else {
|
||||
stateStr = ":s"+n+"=>"+s.prediction;
|
||||
return baseStateStr + "=>" + s.prediction;
|
||||
}
|
||||
}
|
||||
else if ( s.requiresFullContext) {
|
||||
stateStr = "s"+n+"^";
|
||||
else {
|
||||
return baseStateStr;
|
||||
}
|
||||
return stateStr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,14 +101,6 @@ public class ParserInterpreter {
|
|||
parser.sharedContextCache);
|
||||
}
|
||||
|
||||
public synchronized int predictATN(@NotNull DFA dfa, @NotNull TokenStream input,
|
||||
@Nullable ParserRuleContext outerContext,
|
||||
boolean useContext)
|
||||
{
|
||||
// sync to ensure this entry doesn't race for dfa access
|
||||
return atnSimulator.predictATN(dfa, input, outerContext);
|
||||
}
|
||||
|
||||
public int adaptivePredict(@NotNull TokenStream input, int decision,
|
||||
@Nullable ParserRuleContext outerContext)
|
||||
{
|
||||
|
|
|
@ -243,14 +243,14 @@ public class TestATNParserPrediction extends BaseTest {
|
|||
};
|
||||
String[] dfa = {
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-EOF->s2^\n",
|
||||
"s1-EOF->:s2^=>1\n",
|
||||
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-EOF->s2^\n" +
|
||||
"s1-EOF->:s2^=>1\n" +
|
||||
"s1-'b'->:s3=>3\n",
|
||||
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-EOF->s2^\n" +
|
||||
"s1-EOF->:s2^=>1\n" +
|
||||
"s1-'b'->:s3=>3\n",
|
||||
};
|
||||
checkDFAConstruction(lg, g, decision, inputs, dfa);
|
||||
|
@ -278,16 +278,16 @@ public class TestATNParserPrediction extends BaseTest {
|
|||
String[] dfa = {
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-'b'->s2\n" +
|
||||
"s2-EOF->s3^\n",
|
||||
"s2-EOF->:s3^=>1\n",
|
||||
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-'b'->s2\n" +
|
||||
"s2-EOF->s3^\n" +
|
||||
"s2-EOF->:s3^=>1\n" +
|
||||
"s2-'c'->:s4=>3\n",
|
||||
|
||||
"s0-'a'->s1\n" +
|
||||
"s1-'b'->s2\n" +
|
||||
"s2-EOF->s3^\n" +
|
||||
"s2-EOF->:s3^=>1\n" +
|
||||
"s2-'c'->:s4=>3\n",
|
||||
};
|
||||
checkDFAConstruction(lg, g, decision, inputs, dfa);
|
||||
|
@ -489,11 +489,7 @@ public class TestATNParserPrediction extends BaseTest {
|
|||
// ParserATNSimulator interp = new ParserATNSimulator(atn);
|
||||
TokenStream input = new IntTokenStream(types);
|
||||
ParserInterpreter interp = new ParserInterpreter(g, input);
|
||||
DecisionState startState = atn.decisionToState.get(decision);
|
||||
DFA dfa = new DFA(startState, decision);
|
||||
int alt = interp.predictATN(dfa, input, ParserRuleContext.EMPTY, false);
|
||||
|
||||
System.out.println(dot.getDOT(dfa, false));
|
||||
int alt = interp.adaptivePredict(input, decision, ParserRuleContext.EMPTY);
|
||||
|
||||
assertEquals(expectedAlt, alt);
|
||||
|
||||
|
@ -507,44 +503,6 @@ public class TestATNParserPrediction extends BaseTest {
|
|||
assertEquals(expectedAlt, alt);
|
||||
}
|
||||
|
||||
public synchronized DFA getDFA(LexerGrammar lg, Grammar g, String ruleName,
|
||||
String inputString, ParserRuleContext ctx)
|
||||
{
|
||||
// sync to ensure multiple tests don't race on dfa access
|
||||
Tool.internalOption_ShowATNConfigsInDFA = true;
|
||||
ATN lexatn = createATN(lg, true);
|
||||
LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn,null,null);
|
||||
|
||||
semanticProcess(lg);
|
||||
g.importVocab(lg);
|
||||
semanticProcess(g);
|
||||
|
||||
ParserATNFactory f = new ParserATNFactory(g);
|
||||
ATN atn = f.createATN();
|
||||
|
||||
// DOTGenerator dot = new DOTGenerator(g);
|
||||
// System.out.println(dot.getDOT(atn.ruleToStartState.get(g.getRule("a"))));
|
||||
// System.out.println(dot.getDOT(atn.ruleToStartState.get(g.getRule("b"))));
|
||||
// System.out.println(dot.getDOT(atn.ruleToStartState.get(g.getRule("e"))));
|
||||
|
||||
ParserATNSimulator interp =
|
||||
new ParserATNSimulator(atn, new DFA[atn.getNumberOfDecisions()],null);
|
||||
IntegerList types = getTokenTypesViaATN(inputString, lexInterp);
|
||||
System.out.println(types);
|
||||
TokenStream input = new IntTokenStream(types);
|
||||
try {
|
||||
DecisionState startState = atn.decisionToState.get(0);
|
||||
DFA dfa = new DFA(startState);
|
||||
// Rule r = g.getRule(ruleName);
|
||||
//ATNState startState = atn.ruleToStartState.get(r);
|
||||
interp.predictATN(dfa, input, ctx);
|
||||
}
|
||||
catch (NoViableAltException nvae) {
|
||||
nvae.printStackTrace(System.err);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void checkDFAConstruction(LexerGrammar lg, Grammar g, int decision,
|
||||
String[] inputString, String[] dfaString)
|
||||
{
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
"abc", true);
|
||||
String expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-ID->s1^\n"; // ctx sensitive
|
||||
"s0-ID->:s1^=>1\n"; // ctx sensitive
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:0 reportAttemptingFullContext d=0, input='abc'\n",
|
||||
this.stderrDuringParse);
|
||||
|
@ -78,7 +78,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
String expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
"s1-ID->:s2^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1, input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=1, input='34'\n",
|
||||
|
@ -89,7 +89,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
"s1-ID->:s2^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=1, input='34abc'\n" +
|
||||
"line 1:5 reportContextSensitivity d=1, input='34abc'\n",
|
||||
|
@ -112,7 +112,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
String expecting =
|
||||
"Decision 2:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n";
|
||||
"s1-ID->:s2^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:5 reportAttemptingFullContext d=2, input='34abc'\n" +
|
||||
"line 1:2 reportContextSensitivity d=2, input='34'\n" +
|
||||
|
@ -138,7 +138,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
String expecting =
|
||||
"Decision 0:\n" +
|
||||
"s0-INT->s1\n" +
|
||||
"s1-ID->s2^\n"; // Must point at accept state
|
||||
"s1-ID->:s2^=>1\n"; // Must point at accept state
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:3 reportAttemptingFullContext d=0, input='34abc'\n" +
|
||||
"line 1:0 reportContextSensitivity d=0, input='34'\n",
|
||||
|
@ -172,7 +172,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->s1^\n";
|
||||
"s0-'else'->:s1^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
// Technically, this input sequence is not ambiguous because else
|
||||
// uniquely predicts going into the optional subrule. else cannot
|
||||
|
@ -190,7 +190,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:29 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
|
@ -207,7 +207,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->s1^\n";
|
||||
"s0-'else'->:s1^=>1\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:29 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
"line 1:38 reportContextSensitivity d=1, input='elsefooelse'\n" +
|
||||
|
@ -222,7 +222,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
|
@ -238,7 +238,7 @@ public class TestFullContextParsing extends BaseTest {
|
|||
input, true);
|
||||
expecting =
|
||||
"Decision 1:\n" +
|
||||
"s0-'else'->s1^\n" +
|
||||
"s0-'else'->:s1^=>1\n" +
|
||||
"s0-'}'->:s2=>2\n";
|
||||
assertEquals(expecting, result);
|
||||
assertEquals("line 1:19 reportAttemptingFullContext d=1, input='else'\n" +
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.antlr.v4.runtime.atn.ATNConfig;
|
|||
import org.antlr.v4.runtime.atn.ATNConfigSet;
|
||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.atn.ParserATNSimulator;
|
||||
import org.antlr.v4.runtime.atn.PredictionContextCache;
|
||||
import org.antlr.v4.runtime.atn.PredictionMode;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
import org.antlr.v4.runtime.dfa.DFAState;
|
||||
|
@ -239,6 +240,13 @@ public class TestPerformance extends BaseTest {
|
|||
|
||||
private static final boolean SHOW_CONFIG_STATS = false;
|
||||
|
||||
/**
|
||||
* If {@code true}, detailed statistics for the number of DFA edges were
|
||||
* taken while parsing each file, as well as the number of DFA edges which
|
||||
* required on-the-fly computation.
|
||||
*/
|
||||
private static final boolean COMPUTE_TRANSITION_STATS = false;
|
||||
|
||||
private static final boolean REPORT_SYNTAX_ERRORS = true;
|
||||
private static final boolean REPORT_AMBIGUITIES = false;
|
||||
private static final boolean REPORT_FULL_CONTEXT = false;
|
||||
|
@ -741,6 +749,9 @@ public class TestPerformance extends BaseTest {
|
|||
lexer.setInputStream(input);
|
||||
} else {
|
||||
lexer = lexerCtor.newInstance(input);
|
||||
if (COMPUTE_TRANSITION_STATS) {
|
||||
lexer.setInterpreter(new StatisticsLexerATNSimulator(lexer, lexer.getATN(), lexer.getInterpreter().decisionToDFA, lexer.getInterpreter().getSharedContextCache()));
|
||||
}
|
||||
sharedLexers[thread] = lexer;
|
||||
if (!REUSE_LEXER_DFA) {
|
||||
Field decisionToDFAField = LexerATNSimulator.class.getDeclaredField("decisionToDFA");
|
||||
|
@ -775,6 +786,9 @@ public class TestPerformance extends BaseTest {
|
|||
parser.setInputStream(tokens);
|
||||
} else {
|
||||
parser = parserCtor.newInstance(tokens);
|
||||
if (COMPUTE_TRANSITION_STATS) {
|
||||
parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), parser.getInterpreter().decisionToDFA, parser.getInterpreter().getSharedContextCache()));
|
||||
}
|
||||
sharedParsers[thread] = parser;
|
||||
}
|
||||
|
||||
|
@ -835,6 +849,9 @@ public class TestPerformance extends BaseTest {
|
|||
parser.setInputStream(tokens);
|
||||
} else {
|
||||
parser = parserCtor.newInstance(tokens);
|
||||
if (COMPUTE_TRANSITION_STATS) {
|
||||
parser.setInterpreter(new StatisticsParserATNSimulator(parser, parser.getATN(), parser.getInterpreter().decisionToDFA, parser.getInterpreter().getSharedContextCache()));
|
||||
}
|
||||
sharedParsers[thread] = parser;
|
||||
}
|
||||
|
||||
|
@ -894,7 +911,12 @@ public class TestPerformance extends BaseTest {
|
|||
public final long endTime;
|
||||
|
||||
public final int lexerDFASize;
|
||||
public final long lexerTotalTransitions;
|
||||
public final long lexerComputedTransitions;
|
||||
|
||||
public final int parserDFASize;
|
||||
public final long parserTotalTransitions;
|
||||
public final long parserComputedTransitions;
|
||||
|
||||
public FileParseResult(String sourceName, int checksum, @Nullable ParseTree parseTree, int tokenCount, long startTime, Lexer lexer, Parser parser) {
|
||||
this.sourceName = sourceName;
|
||||
|
@ -906,10 +928,17 @@ public class TestPerformance extends BaseTest {
|
|||
|
||||
if (lexer != null) {
|
||||
LexerATNSimulator interpreter = lexer.getInterpreter();
|
||||
if (interpreter instanceof StatisticsLexerATNSimulator) {
|
||||
lexerTotalTransitions = ((StatisticsLexerATNSimulator)interpreter).totalTransitions;
|
||||
lexerComputedTransitions = ((StatisticsLexerATNSimulator)interpreter).computedTransitions;
|
||||
} else {
|
||||
lexerTotalTransitions = 0;
|
||||
lexerComputedTransitions = 0;
|
||||
}
|
||||
|
||||
int dfaSize = 0;
|
||||
for (DFA dfa : interpreter.decisionToDFA) {
|
||||
if (dfa != null && dfa.states != null) {
|
||||
if (dfa != null) {
|
||||
dfaSize += dfa.states.size();
|
||||
}
|
||||
}
|
||||
|
@ -917,14 +946,23 @@ public class TestPerformance extends BaseTest {
|
|||
lexerDFASize = dfaSize;
|
||||
} else {
|
||||
lexerDFASize = 0;
|
||||
lexerTotalTransitions = 0;
|
||||
lexerComputedTransitions = 0;
|
||||
}
|
||||
|
||||
if (parser != null) {
|
||||
ParserATNSimulator interpreter = parser.getInterpreter();
|
||||
if (interpreter instanceof StatisticsParserATNSimulator) {
|
||||
parserTotalTransitions = ((StatisticsParserATNSimulator)interpreter).totalTransitions;
|
||||
parserComputedTransitions = ((StatisticsParserATNSimulator)interpreter).computedTransitions;
|
||||
} else {
|
||||
parserTotalTransitions = 0;
|
||||
parserComputedTransitions = 0;
|
||||
}
|
||||
|
||||
int dfaSize = 0;
|
||||
for (DFA dfa : interpreter.decisionToDFA) {
|
||||
if (dfa != null && dfa.states != null) {
|
||||
if (dfa != null) {
|
||||
dfaSize += dfa.states.size();
|
||||
}
|
||||
}
|
||||
|
@ -932,10 +970,64 @@ public class TestPerformance extends BaseTest {
|
|||
parserDFASize = dfaSize;
|
||||
} else {
|
||||
parserDFASize = 0;
|
||||
parserTotalTransitions = 0;
|
||||
parserComputedTransitions = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class StatisticsLexerATNSimulator extends LexerATNSimulator {
|
||||
|
||||
public long totalTransitions;
|
||||
public long computedTransitions;
|
||||
|
||||
public StatisticsLexerATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) {
|
||||
super(atn, decisionToDFA, sharedContextCache);
|
||||
}
|
||||
|
||||
public StatisticsLexerATNSimulator(Lexer recog, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) {
|
||||
super(recog, atn, decisionToDFA, sharedContextCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DFAState getExistingTargetState(DFAState s, int t) {
|
||||
totalTransitions++;
|
||||
return super.getExistingTargetState(s, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DFAState computeTargetState(CharStream input, DFAState s, int t) {
|
||||
computedTransitions++;
|
||||
return super.computeTargetState(input, s, t);
|
||||
}
|
||||
}
|
||||
|
||||
private static class StatisticsParserATNSimulator extends ParserATNSimulator {
|
||||
|
||||
public long totalTransitions;
|
||||
public long computedTransitions;
|
||||
|
||||
public StatisticsParserATNSimulator(ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) {
|
||||
super(atn, decisionToDFA, sharedContextCache);
|
||||
}
|
||||
|
||||
public StatisticsParserATNSimulator(Parser parser, ATN atn, DFA[] decisionToDFA, PredictionContextCache sharedContextCache) {
|
||||
super(parser, atn, decisionToDFA, sharedContextCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DFAState getExistingTargetState(DFAState previousD, int t) {
|
||||
totalTransitions++;
|
||||
return super.getExistingTargetState(previousD, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DFAState computeTargetState(DFA dfa, DFAState previousD, int t) {
|
||||
computedTransitions++;
|
||||
return super.computeTargetState(dfa, previousD, t);
|
||||
}
|
||||
}
|
||||
|
||||
private static class DescriptiveErrorListener extends BaseErrorListener {
|
||||
public static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener();
|
||||
|
||||
|
|
Loading…
Reference in New Issue