diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserInterpreter.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ParserInterpreter.java deleted file mode 100644 index 4b121f39f..000000000 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ParserInterpreter.java +++ /dev/null @@ -1,657 +0,0 @@ -/* - [The "BSD license"] - Copyright (c) 2011 Terence Parr - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.antlr.v4.runtime.atn; - -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.dfa.*; -import org.antlr.v4.runtime.misc.OrderedHashSet; -import org.stringtemplate.v4.misc.MultiMap; - -import java.util.*; - -public class ParserInterpreter extends ATNInterpreter { - public static boolean debug = false; - public static boolean dfa_debug = false; - - public static int ATN_failover = 0; - public static int predict_calls = 0; - - protected BaseRecognizer parser; - - public Map ctxToDFAs; - public Map[] decisionToDFAPerCtx; // TODO: USE THIS ONE - public DFA[] decisionToDFA; - protected boolean userWantsCtxSensitive = false; - - protected Set closureBusy = new HashSet(); - - public ParserInterpreter(ATN atn) { - super(atn); - ctxToDFAs = new HashMap(); - decisionToDFA = new DFA[atn.getNumberOfDecisions()]; - } - - public ParserInterpreter(BaseRecognizer parser, ATN atn) { - super(atn); - this.parser = parser; - ctxToDFAs = new HashMap(); - decisionToDFA = new DFA[atn.getNumberOfDecisions()+1]; -// DOTGenerator dot = new DOTGenerator(null); -// System.out.println(dot.getDOT(atn.rules.get(0), parser.getRuleNames())); -// System.out.println(dot.getDOT(atn.rules.get(1), parser.getRuleNames())); - } - - public int adaptivePredict(TokenStream input, int decision, RuleContext ctx) { - predict_calls++; - DFA dfa = decisionToDFA[decision]; - if ( dfa==null || dfa.s0==null ) { - ATNState startState = atn.decisionToState.get(decision); - decisionToDFA[decision] = dfa = new DFA(startState); - dfa.decision = decision; - return predictATN(dfa, input, decision, ctx, false); - } - else { - //dump(dfa); - // start with the DFA - int m = input.mark(); - int alt = execDFA(input, dfa, dfa.s0, ctx); - input.seek(m); - return alt; - } - } - - public int predictATN(DFA dfa, TokenStream input, - int decision, - RuleContext originalContext, - boolean useContext) - { - if ( originalContext==null ) originalContext = RuleContext.EMPTY; - RuleContext ctx = RuleContext.EMPTY; - if ( useContext ) ctx = originalContext; - OrderedHashSet s0_closure = computeStartState(dfa.atnStartState, ctx); - dfa.s0 = addDFAState(dfa, s0_closure); - if ( prevAccept!=null ) { - dfa.s0.isAcceptState = true; - dfa.s0.prediction = prevAccept.alt; - } - - int alt = 0; - int m = input.mark(); - try { - alt = execATN(input, dfa, m, s0_closure, originalContext, useContext); - } - catch (NoViableAltException nvae) { dumpDeadEndConfigs(nvae); throw nvae; } - finally { - input.seek(m); - } - if ( debug ) System.out.println("DFA after predictATN: "+dfa.toString()); - return alt; - } - - // doesn't create DFA when matching - public int matchATN(TokenStream input, ATNState startState) { - DFA dfa = new DFA(startState); - RuleContext ctx = new ParserRuleContext(); - OrderedHashSet s0_closure = computeStartState(startState, ctx); - return execATN(input, dfa, input.index(), s0_closure, ctx, false); - } - - public int execDFA(TokenStream input, DFA dfa, DFAState s0, RuleContext originalContext) { - if ( dfa_debug ) System.out.println("DFA decision "+dfa.decision+" exec LA(1)=="+input.LT(1)); -// dump(dfa); - DFAState prevAcceptState = null; - DFAState s = s0; - int t = input.LA(1); - int start = input.index(); - loop: - while ( true ) { - if ( dfa_debug ) System.out.println("DFA state "+s.stateNumber+" LA(1)=="+t); - // TODO: ctxSensitive - if ( s.isCtxSensitive ) { - Integer predI = s.ctxToPrediction.get(originalContext); - if ( dfa_debug ) System.out.println("ctx sensitive state "+originalContext+"->"+predI+ - " in "+s); - if ( predI!=null ) return predI; -// System.out.println("start all over with ATN; can't use DFA"); - // start all over with ATN; can't use DFA - input.seek(start); - DFA throwAwayDFA = new DFA(dfa.atnStartState); - int alt = execATN(input, throwAwayDFA, start, s0.configs, originalContext, false); - s.ctxToPrediction.put(originalContext, alt); - return alt; - } - if ( s.isAcceptState ) { - if ( dfa_debug ) System.out.println("accept; predict "+s.prediction +" in state "+s.stateNumber); - prevAcceptState = s; - // keep going unless we're at EOF or state only has one alt number - // mentioned in configs; check if something else could match - if ( s.complete || t==CharStream.EOF ) break; - } - // if no edge, pop over to ATN interpreter, update DFA and return - if ( s.edges == null || t >= s.edges.length || s.edges[t+1] == null ) { - if ( dfa_debug ) System.out.println("no edge for "+t); - int alt = -1; - if ( dfa_debug ) { - System.out.println("ATN exec upon "+ - input.toString(start,input.index())+ - " at DFA state "+s.stateNumber); - } - try { - alt = execATN(input, dfa, start, s.configs, originalContext, false); - // this adds edge even if next state is accept for - // same alt; e.g., s0-A->:s1=>2-B->:s2=>2 - // TODO: This next stuff kills edge, but extra states remain. :( - if ( s.isAcceptState && alt!=-1 ) { - DFAState d = s.edges[input.LA(1)+1]; - if ( d.isAcceptState && d.prediction==s.prediction ) { - // we can carve it out. - s.edges[input.LA(1)+1] = ERROR; // IGNORE really not error - } - } - } - catch (NoViableAltException nvae) { - alt = -1; - } - if ( dfa_debug ) { - System.out.println("back from DFA update, alt="+alt+", dfa=\n"+dfa); - //dump(dfa); - } - if ( alt==-1 ) { - addDFAEdge(s, t, ERROR); - break loop; // dead end; no where to go, fall back on prev if any - } - // action already executed - if ( dfa_debug ) System.out.println("DFA decision "+dfa.decision+ - " predicts "+alt); - return alt; // we've updated DFA, exec'd action, and have our deepest answer - } - DFAState target = s.edges[t+1]; - if ( target == ERROR ) break; - s = target; - input.consume(); - t = input.LA(1); - } - if ( prevAcceptState==null ) { - System.out.println("!!! no viable alt in dfa"); - return -1; - } - if ( dfa_debug ) System.out.println("DFA decision "+dfa.decision+ - " predicts "+prevAcceptState.prediction); - return prevAcceptState.prediction; - } - - public int execATN(TokenStream input, - DFA dfa, - int startIndex, - OrderedHashSet s0, - RuleContext originalContext, - boolean useContext) - { - if ( debug ) System.out.println("ATN decision "+dfa.decision+" exec LA(1)=="+input.LT(1)); - ATN_failover++; - OrderedHashSet closure = new OrderedHashSet(); - - closure.addAll(s0); - - if ( debug ) System.out.println("start state closure="+closure); - - int t = input.LA(1); - if ( t==Token.EOF && prevAccept!=null ) { - // computeStartState must have reached end of rule - return prevAccept.alt; - } - - prevAccept = null; - prevAcceptIndex = -1; - OrderedHashSet reach = new OrderedHashSet(); - - do { // while more work - if ( debug ) System.out.println("in reach starting closure: " + closure); - int ncl = closure.size(); - for (int ci=0; ci ambigAlts = getAmbiguousAlts(reach); - if ( ambigAlts!=null ) { - if ( debug ) { - ATNState loc = atn.states.get(originalContext.s); - String rname = "n/a"; - if ( parser !=null ) rname = parser.getRuleNames()[loc.ruleIndex]; - System.out.println("AMBIG in "+rname+" for alt "+ambigAlts+" upon "+ - input.toString(startIndex, input.index())); - } - dfa.conflict = true; // at least one DFA state is ambiguous - if ( !userWantsCtxSensitive ) reportConflict(startIndex, input.index(), ambigAlts, reach); - -// ATNState loc = atn.states.get(originalContext.s); -// String rname = recog.getRuleNames()[loc.ruleIndex]; -// System.out.println("AMBIG orig="+originalContext.toString((BaseRecognizer)recog)+" for alt "+ambigAlts+" upon "+ -// input.toString(startIndex, input.index())); - if ( !userWantsCtxSensitive || useContext ) { - resolveToMinAlt(reach, ambigAlts); - } - else { - return retryWithContext(input, dfa, startIndex, originalContext, - closure, t, reach, ambigAlts); - } - } - - // if reach predicts single alt, can stop - - int uniqueAlt = getUniqueAlt(reach); - if ( uniqueAlt!=ATN.INVALID_ALT_NUMBER ) { - if ( debug ) System.out.println("PREDICT alt "+uniqueAlt+ - " decision "+dfa.decision+ - " at index "+input.index()); - addDFAEdge(dfa, closure, t, reach); - makeAcceptState(dfa, reach, uniqueAlt); - return uniqueAlt; - } - - if ( reach.size()==0 ) { - break; - } - - // If we matched t anywhere, need to consume and add closer-t->reach DFA edge - // else error if no previous accept - input.consume(); - addDFAEdge(dfa, closure, t, reach); - t = input.LA(1); - - // swap to avoid reallocating space - OrderedHashSet tmp = reach; - reach = closure; - closure = tmp; - reach.clear(); - } while ( true ); - - if ( prevAccept==null ) { - System.out.println("no viable token at input "+input.LT(1)+", index "+input.index()); - NoViableAltException nvae = new NoViableAltException(parser, input, closure, originalContext); - nvae.startIndex = startIndex; - throw nvae; - } - - if ( debug ) System.out.println("PREDICT " + prevAccept + " index " + prevAccept.alt); - return prevAccept.alt; - } - - protected int resolveToMinAlt(OrderedHashSet reach, Set ambigAlts) { - int min = getMinAlt(ambigAlts); - // if predicting, create DFA accept state for resolved alt - ambigAlts.remove(min); - // kill dead alts so we don't chase them ever - killAlts(ambigAlts, reach); - if ( debug ) System.out.println("RESOLVED TO "+reach); - return min; - } - - public int retryWithContext(TokenStream input, - DFA dfa, - int startIndex, - RuleContext originalContext, - OrderedHashSet closure, - int t, - OrderedHashSet reach, - Set ambigAlts) - { - // ASSUMES PREDICT ONLY - // retry using context, if any; if none, kill all but min as before - if ( debug ) System.out.println("RETRY with ctx="+ originalContext); - int min = getMinAlt(ambigAlts); - if ( originalContext==RuleContext.EMPTY ) { - // no point in retrying with ctx since it's same. - // this implies that we have a true ambiguity - reportAmbiguity(startIndex, input.index(), ambigAlts, reach); - return min; - } - // otherwise we have to retry with context, filling in tmp DFA. - // if it comes back with conflict, we have a true ambiguity - input.seek(startIndex); // rewind - DFA ctx_dfa = new DFA(dfa.atnStartState); - int ctx_alt = predictATN(ctx_dfa, input, dfa.decision, originalContext, true); - if ( debug ) System.out.println("retry predicts "+ctx_alt+" vs "+getMinAlt(ambigAlts)+ - " with conflict="+ctx_dfa.conflict+ - " dfa="+ctx_dfa); - if ( ctx_dfa.conflict ) reportAmbiguity(startIndex, input.index(), ambigAlts, reach); - else reportContextSensitivity(startIndex, input.index(), ambigAlts, reach); - // it's not context-sensitive; true ambig. fall thru to strip dead alts - - int predictedAlt = ctx_alt; - DFAState reachTarget = addDFAEdge(dfa, closure, t, reach); - reachTarget.isCtxSensitive = true; - if ( reachTarget.ctxToPrediction==null ) { - reachTarget.ctxToPrediction = new LinkedHashMap(); - } - reachTarget.ctxToPrediction.put(originalContext, predictedAlt); -// System.out.println("RESOLVE to "+predictedAlt); - //System.out.println(reachTarget.ctxToPrediction.size()+" size of ctx map"); - return predictedAlt; - } - - public OrderedHashSet computeStartState(ATNState p, RuleContext ctx) { - RuleContext initialContext = null; - initialContext = ctx; // always at least the implicit call to start rule - OrderedHashSet configs = new OrderedHashSet(); - prevAccept = null; // might reach end rule; track - prevAcceptIndex = -1; - - for (int i=0; i configs) { - closureBusy.clear(); - closure(config, configs, closureBusy); - } - - protected void closure(ATNConfig config, - OrderedHashSet configs, - Set closureBusy) - { - if ( debug ) System.out.println("closure("+config+")"); - - if ( closureBusy.contains(config) ) return; // avoid infinite recursion - closureBusy.add(config); - - if ( config.state instanceof RuleStopState ) { - // We hit rule end. If we have context info, use it - if ( config.context!=null && !config.context.isEmpty() ) { - RuleContext newContext = config.context.parent; // "pop" invoking state - ATNState invokingState = atn.states.get(config.context.invokingState); - RuleTransition rt = (RuleTransition)invokingState.transition(0); - ATNState retState = rt.followState; - ATNConfig c = new ATNConfig(retState, config.alt, newContext); - closure(c, configs, closureBusy); - return; - } - // else if we have no context info, just chase follow links - } - - ATNState p = config.state; - // optimization - if ( !p.onlyHasEpsilonTransitions() ) configs.add(config); - - for (int i=0; i=0 ) { - if ( debug ) System.out.println("DO ACTION "+at.ruleIndex+":"+at.actionIndex); - parser.action(at.ruleIndex, at.actionIndex); - } - else { - // non-forced action traversed to get to t.target - if ( debug && !config.traversedAction ) { - System.out.println("NONFORCED; pruning future pred eval derived from s"+ - config.state.stateNumber); - } - c.traversedAction = true; - } - } - else if ( t.isEpsilon() ) { - c = new ATNConfig(config, t.target); - } - return c; - } - - public void reportConflict(int startIndex, int stopIndex, Set alts, OrderedHashSet configs) { - if ( parser!=null ) parser.reportConflict(startIndex, stopIndex, alts, configs); - } - - public void reportContextSensitivity(int startIndex, int stopIndex, Set alts, OrderedHashSet configs) { - if ( parser!=null ) parser.reportContextSensitivity(startIndex, stopIndex, alts, configs); - } - - /** If context sensitive parsing, we know it's ambiguity not conflict */ - public void reportAmbiguity(int startIndex, int stopIndex, Set alts, OrderedHashSet configs) { - if ( parser!=null ) parser.reportAmbiguity(startIndex, stopIndex, alts, configs); - } - - public static int getUniqueAlt(Collection configs) { - int alt = ATN.INVALID_ALT_NUMBER; - for (ATNConfig c : configs) { - if ( alt == ATN.INVALID_ALT_NUMBER ) { - alt = c.alt; // found first alt - } - else if ( c.alt!=alt ) { - return ATN.INVALID_ALT_NUMBER; - } - } - return alt; - } - - public Set getAmbiguousAlts(OrderedHashSet configs) { -// System.err.println("check ambiguous "+configs); - Set ambigAlts = null; - int numConfigs = configs.size(); - // First get a list of configurations for each state. - // Most of the time, each state will have one associated configuration. - MultiMap stateToConfigListMap = - new MultiMap(); - for (ATNConfig c : configs) { - stateToConfigListMap.map(c.state.stateNumber, c); - } - // potential conflicts are states with > 1 configuration and diff alts - for (List configsPerAlt : stateToConfigListMap.values()) { - ATNConfig goal = configsPerAlt.get(0); - int size = configsPerAlt.size(); - for (int i=1; i< size; i++) { - ATNConfig c = configsPerAlt.get(i); - if ( c.alt!=goal.alt ) { - //System.out.println("chk stack "+goal+", "+c); - boolean sameCtx = - (goal.context==null&&c.context==null) || - goal.context.equals(c.context) || - c.context.conflictsWith(goal.context); - if ( sameCtx ) { - if ( debug ) { - System.out.println("we reach state "+c.state.stateNumber+ - " in rule "+ - (parser !=null ? parser.getRuleNames()[c.state.ruleIndex]:"n/a")+ - " alts "+goal.alt+","+c.alt+" from ctx "+goal.context.toString((BaseRecognizer) parser) - +" and "+ - c.context.toString((BaseRecognizer) parser)); - } - if ( ambigAlts==null ) ambigAlts = new HashSet(); - ambigAlts.add(goal.alt); - ambigAlts.add(c.alt); - } - } - } - } - if ( ambigAlts!=null ) { - //System.err.println("ambig upon "+input.toString(startIndex, input.index())); - } - return ambigAlts; - } - - public static int getMinAlt(Set ambigAlts) { - int min = Integer.MAX_VALUE; - for (int alt : ambigAlts) { - if ( alt < min ) min = alt; - } - return min; - } - - public static void killAlts(Set alts, OrderedHashSet configs) { - int i = 0; - while ( i p, - int t, - OrderedHashSet q) - { -// System.out.println("MOVE "+p+" -> "+q+" upon "+getTokenName(t)); - DFAState from = addDFAState(dfa, p); - DFAState to = addDFAState(dfa, q); - addDFAEdge(from, t, to); - return to; - } - - protected void addDFAEdge(DFAState p, int t, DFAState q) { - if ( p==null ) return; - if ( p.edges==null ) { - p.edges = new DFAState[atn.maxTokenType+1+1]; // TODO: make adaptive - } - p.edges[t+1] = q; // connect - } - - /** See comment on LexerInterpreter.addDFAState. */ - protected DFAState addDFAState(DFA dfa, OrderedHashSet configs) { - DFAState proposed = new DFAState(configs); - DFAState existing = dfa.states.get(proposed); - if ( existing!=null ) return existing; - - DFAState newState = proposed; - - boolean traversedPredicate = false; - for (ATNConfig c : configs) { - if ( c.traversedPredicate ) {traversedPredicate = true; break;} - } - - if ( traversedPredicate ) return null; // cannot cache - - newState.stateNumber = dfa.states.size(); - newState.configs = new OrderedHashSet(); - newState.configs.addAll(configs); - dfa.states.put(newState, newState); - return newState; - } - - public void makeAcceptState(DFA dfa, OrderedHashSet reach, int uniqueAlt) { - DFAState accept = dfa.states.get(new DFAState(reach)); - if ( accept==null ) return; - accept.isAcceptState = true; - accept.prediction = uniqueAlt; - accept.complete = true; - } - - public String getTokenName(int t) { - if ( t==-1 ) return "EOF"; - if ( parser!=null && parser.getTokenNames()!=null ) return parser.getTokenNames()[t]+"<"+t+">"; - return String.valueOf(t); - } - - public void setContextSensitive(boolean ctxSensitive) { - this.userWantsCtxSensitive = ctxSensitive; - } - - public void dumpDeadEndConfigs(NoViableAltException nvae) { - System.err.println("dead end configs: "); - for (ATNConfig c : nvae.deadEndConfigs) { - Transition t = c.state.transition(0); - String trans = ""; - if ( t instanceof AtomTransition) { - AtomTransition at = (AtomTransition)t; - trans = "Atom "+getTokenName(at.label); - } - else if ( t instanceof SetTransition ) { - SetTransition st = (SetTransition)t; - boolean not = st instanceof NotSetTransition; - trans = (not?"~":"")+"Set "+st.set.toString(); - } - System.err.println(c.toString(parser, true)+":"+trans); - } - } -}