Merge pull request #1218 from ericvergnaud/python-performance-issue
Python performance issue
This commit is contained in:
commit
47e268dfea
|
@ -788,7 +788,8 @@ public class ParserATNSimulator extends ATNSimulator {
|
|||
protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t,
|
||||
boolean fullCtx)
|
||||
{
|
||||
if ( debug ) System.out.println("in computeReachSet, starting closure: " + closure);
|
||||
if ( debug )
|
||||
System.out.println("in computeReachSet, starting closure: " + closure);
|
||||
|
||||
if (mergeCache == null) {
|
||||
mergeCache = new DoubleKeyMap<PredictionContext, PredictionContext, PredictionContext>();
|
||||
|
|
|
@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
|
|||
|
||||
See www.antlr.org for more information on ANTLR
|
||||
|
||||
See https://raw.githubusercontent.com/antlr/antlr4/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
||||
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
||||
|
||||
|
||||
|
|
|
@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
|
|||
|
||||
See www.antlr.org for more information on ANTLR
|
||||
|
||||
See https://theantlrguy.atlassian.net/wiki/display/ANTLR4/JavaScript+Target for more information on using ANTLR in JavaScript
|
||||
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
||||
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#/
|
||||
from io import StringIO
|
||||
from antlr4.RuleContext import RuleContext
|
||||
from antlr4.atn.ATN import ATN
|
||||
from antlr4.atn.ATNState import ATNState
|
||||
|
||||
|
||||
|
@ -98,7 +99,7 @@ def calculateHashCode(parent, returnState):
|
|||
|
||||
def calculateListsHashCode(parents, returnStates ):
|
||||
h = 0
|
||||
for parent, returnState in parents, returnStates:
|
||||
for parent, returnState in zip(parents, returnStates):
|
||||
h = hash((h, calculateHashCode(parent, returnState)))
|
||||
return h
|
||||
|
||||
|
@ -254,6 +255,10 @@ class ArrayPredictionContext(PredictionContext):
|
|||
buf.write(u"]")
|
||||
return buf.getvalue()
|
||||
|
||||
def __hash__(self):
|
||||
return self.cachedHashCode
|
||||
|
||||
|
||||
|
||||
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
||||
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
||||
|
@ -328,18 +333,18 @@ def merge(a, b, rootIsWildcard, mergeCache):
|
|||
#/
|
||||
def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
||||
if mergeCache is not None:
|
||||
previous = mergeCache.get(a,b)
|
||||
previous = mergeCache.get((a,b), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
previous = mergeCache.get(b,a)
|
||||
previous = mergeCache.get((b,a), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
|
||||
rootMerge = mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge is not None:
|
||||
merged = mergeRoot(a, b, rootIsWildcard)
|
||||
if merged is not None:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, rootMerge)
|
||||
return rootMerge
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
|
||||
if a.returnState==b.returnState:
|
||||
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
|
@ -352,10 +357,10 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
|||
# merge parents x and y, giving array node with x,y then remainders
|
||||
# of those graphs. dup a, a' points at merged array
|
||||
# new joined parent so create new singleton pointing to it, a'
|
||||
a_ = SingletonPredictionContext.create(parent, a.returnState)
|
||||
merged = SingletonPredictionContext.create(parent, a.returnState)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
else: # a != b payloads differ
|
||||
# see if we can collapse parents due to $+x parents if local ctx
|
||||
singleParent = None
|
||||
|
@ -365,26 +370,24 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
|||
# sort payloads and use same parent
|
||||
payloads = [ a.returnState, b.returnState ]
|
||||
if a.returnState > b.returnState:
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
payloads = [ b.returnState, a.returnState ]
|
||||
parents = [singleParent, singleParent]
|
||||
a_ = ArrayPredictionContext(parents, payloads)
|
||||
merged = ArrayPredictionContext(parents, payloads)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
# parents differ and can't merge them. Just pack together
|
||||
# into array; can't merge.
|
||||
# ax + by = [ax,by]
|
||||
payloads = [ a.returnState, b.returnState ]
|
||||
parents = [ a.parentCtx, b.parentCtx ]
|
||||
if a.returnState > b.returnState: # sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
payloads = [ b.returnState, a.returnState ]
|
||||
parents = [ b.parentCtx, a.parentCtx ]
|
||||
a_ = ArrayPredictionContext(parents, payloads)
|
||||
merged = ArrayPredictionContext(parents, payloads)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
|
||||
|
||||
#
|
||||
|
@ -466,10 +469,10 @@ def mergeRoot(a, b, rootIsWildcard):
|
|||
#/
|
||||
def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
||||
if mergeCache is not None:
|
||||
previous = mergeCache.get(a,b)
|
||||
previous = mergeCache.get((a,b), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
previous = mergeCache.get(b,a)
|
||||
previous = mergeCache.get((b,a), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
|
||||
|
@ -478,8 +481,8 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
|||
j = 0 # walks b
|
||||
k = 0 # walks target M array
|
||||
|
||||
mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates))
|
||||
mergedParents = [] * len(mergedReturnStates)
|
||||
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
|
||||
mergedParents = [None] * len(mergedReturnStates)
|
||||
# walk and merge to yield mergedParents, mergedReturnStates
|
||||
while i<len(a.returnStates) and j<len(b.returnStates):
|
||||
a_parent = a.parents[i]
|
||||
|
@ -525,30 +528,30 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
|||
# trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents): # write index < last position; trim
|
||||
if k == 1: # for just one merged element, return singleton top
|
||||
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,a_)
|
||||
return a_
|
||||
mergeCache[(a,b)] = merged
|
||||
return merged
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
|
||||
M = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
# if we created same array as a or b, return that instead
|
||||
# TODO: track whether this is possible above during merge sort for speed
|
||||
if M==a:
|
||||
if merged==a:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,a)
|
||||
mergeCache[(a,b)] = a
|
||||
return a
|
||||
if M==b:
|
||||
if merged==b:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,b)
|
||||
mergeCache[(a,b)] = b
|
||||
return b
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,M)
|
||||
return M
|
||||
mergeCache[(a,b)] = merged
|
||||
return merged
|
||||
|
||||
|
||||
#
|
||||
|
@ -642,6 +645,6 @@ def getAllContextNodes(context, nodes=None, visited=None):
|
|||
visited.put(context, context)
|
||||
nodes.add(context)
|
||||
for i in range(0, len(context)):
|
||||
getAllContextNodes(context.getParent(i), nodes, visited);
|
||||
getAllContextNodes(context.getParent(i), nodes, visited)
|
||||
return nodes
|
||||
|
||||
|
|
|
@ -95,6 +95,19 @@ class ATNConfig(object):
|
|||
def __hash__(self):
|
||||
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
||||
|
||||
def hashCodeForConfigSet(self):
|
||||
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
|
||||
|
||||
def equalsForConfigSet(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
elif not isinstance(other, ATNConfig):
|
||||
return False
|
||||
else:
|
||||
return self.state.stateNumber==other.state.stateNumber \
|
||||
and self.alt==other.alt \
|
||||
and self.semanticContext==other.semanticContext
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self)
|
||||
|
||||
|
@ -144,6 +157,18 @@ class LexerATNConfig(ATNConfig):
|
|||
return False
|
||||
return super(LexerATNConfig, self).__eq__(other)
|
||||
|
||||
|
||||
|
||||
def hashCodeForConfigSet(self):
|
||||
return hash(self)
|
||||
|
||||
|
||||
|
||||
def equalsForConfigSet(self, other):
|
||||
return self==other
|
||||
|
||||
|
||||
|
||||
def checkNonGreedyDecision(self, source, target):
|
||||
return source.passedThroughNonGreedyDecision \
|
||||
or isinstance(target, DecisionState) and target.nonGreedy
|
||||
|
|
|
@ -105,8 +105,8 @@ class ATNConfigSet(object):
|
|||
rootIsWildcard = not self.fullCtx
|
||||
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
||||
# no need to check for existing.context, config.context in cache
|
||||
# since only way to create new graphs is "call rule" and here. We
|
||||
# cache at both places.
|
||||
# since only way to create new graphs is "call rule" and here.
|
||||
# We cache at both places.
|
||||
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
||||
# make sure to preserve the precedence filter suppression during the merge
|
||||
if config.precedenceFilterSuppressed:
|
||||
|
@ -115,11 +115,11 @@ class ATNConfigSet(object):
|
|||
return True
|
||||
|
||||
def getOrAdd(self, config):
|
||||
h = hash(config)
|
||||
h = config.hashCodeForConfigSet()
|
||||
l = self.configLookup.get(h, None)
|
||||
if l is not None:
|
||||
for c in l:
|
||||
if c==config:
|
||||
if config.equalsForConfigSet(c):
|
||||
return c
|
||||
if l is None:
|
||||
l = [config]
|
||||
|
|
|
@ -130,7 +130,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
def matchATN(self, input):
|
||||
startState = self.atn.modeToStartState[self.mode]
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
||||
|
||||
old_mode = self.mode
|
||||
|
@ -144,13 +144,13 @@ class LexerATNSimulator(ATNSimulator):
|
|||
|
||||
predict = self.execATN(input, next)
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
||||
|
||||
return predict
|
||||
|
||||
def execATN(self, input, ds0):
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("start state closure=" + str(ds0.configs))
|
||||
|
||||
if ds0.isAcceptState:
|
||||
|
@ -161,8 +161,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
s = ds0 # s is current/from DFA state
|
||||
|
||||
while True: # while more work
|
||||
if self.debug:
|
||||
print("execATN loop starting closure: %s\n", s.configs)
|
||||
if LexerATNSimulator.debug:
|
||||
print("execATN loop starting closure:", str(s.configs))
|
||||
|
||||
# As we move src->trg, src->trg, we keep track of the previous trg to
|
||||
# avoid looking up the DFA state again, which is expensive.
|
||||
|
@ -223,8 +223,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
return None
|
||||
|
||||
target = s.edges[t - self.MIN_DFA_EDGE]
|
||||
if self.debug and target is not None:
|
||||
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
|
||||
if LexerATNSimulator.debug and target is not None:
|
||||
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
|
||||
|
||||
return target
|
||||
|
||||
|
@ -280,8 +280,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
||||
continue
|
||||
|
||||
if self.debug:
|
||||
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
|
||||
if LexerATNSimulator.debug:
|
||||
print("testing", self.getTokenName(t), "at", str(cfg))
|
||||
|
||||
for trans in cfg.state.transitions: # for each transition
|
||||
target = self.getReachableTarget(trans, t)
|
||||
|
@ -298,8 +298,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
skipAlt = cfg.alt
|
||||
|
||||
def accept(self, input, lexerActionExecutor, startIndex, index, line, charPos):
|
||||
if self.debug:
|
||||
print("ACTION %s\n", lexerActionExecutor)
|
||||
if LexerATNSimulator.debug:
|
||||
print("ACTION", lexerActionExecutor)
|
||||
|
||||
# seek to after last char in token
|
||||
input.seek(index)
|
||||
|
@ -334,15 +334,15 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# {@code false}.
|
||||
def closure(self, input, config, configs, currentAltReachedAcceptState,
|
||||
speculative, treatEofAsEpsilon):
|
||||
if self.debug:
|
||||
print("closure("+config.toString(self.recog, True)+")")
|
||||
if LexerATNSimulator.debug:
|
||||
print("closure(" + str(config) + ")")
|
||||
|
||||
if isinstance( config.state, RuleStopState ):
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
if self.recog is not None:
|
||||
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
|
||||
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
|
||||
else:
|
||||
print("closure at rule stop %s\n", config)
|
||||
print("closure at rule stop", str(config))
|
||||
|
||||
if config.context is None or config.context.hasEmptyPath():
|
||||
if config.context is None or config.context.isEmpty():
|
||||
|
@ -404,7 +404,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# states reached by traversing predicates. Since this is when we
|
||||
# test them, we cannot cash the DFA state target of ID.
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
||||
configs.hasSemanticContext = True
|
||||
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
||||
|
@ -516,7 +516,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# Only track edges within the DFA bounds
|
||||
return to
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
||||
|
||||
if from_.edges is None:
|
||||
|
|
|
@ -308,7 +308,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
pass
|
||||
|
||||
def adaptivePredict(self, input, decision, outerContext):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("adaptivePredict decision " + str(decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
" line " + str(input.LT(1).line) + ":" +
|
||||
|
@ -336,10 +336,10 @@ class ParserATNSimulator(ATNSimulator):
|
|||
if s0 is None:
|
||||
if outerContext is None:
|
||||
outerContext = ParserRuleContext.EMPTY
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("predictATN decision " + str(dfa.decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
", outerContext=" + outerContext.toString(self.parser))
|
||||
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
|
||||
|
||||
# If this is not a precedence DFA, we check the ATN start state
|
||||
# to determine if this ATN start state is the decision for the
|
||||
|
@ -368,8 +368,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
dfa.s0 = s0
|
||||
|
||||
alt = self.execATN(dfa, s0, input, index, outerContext)
|
||||
if self.debug:
|
||||
print("DFA after predictATN: " + dfa.toString(self.parser.tokenNames))
|
||||
if ParserATNSimulator.debug:
|
||||
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
|
||||
return alt
|
||||
finally:
|
||||
self._dfa = None
|
||||
|
@ -408,14 +408,14 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# conflict + preds
|
||||
#
|
||||
def execATN(self, dfa, s0, input, startIndex, outerContext ):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("execATN decision " + str(dfa.decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
||||
|
||||
previousD = s0
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("s0 = " + str(s0))
|
||||
|
||||
t = input.LA(1)
|
||||
|
@ -445,7 +445,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||
conflictingAlts = None
|
||||
if D.predicates is not None:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("DFA state has preds in DFA sim LL failover")
|
||||
conflictIndex = input.index
|
||||
if conflictIndex != startIndex:
|
||||
|
@ -453,7 +453,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
||||
if len(conflictingAlts)==1:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("Full LL avoided")
|
||||
return min(conflictingAlts)
|
||||
|
||||
|
@ -462,7 +462,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# context occurs with the index at the correct spot
|
||||
input.seek(conflictIndex)
|
||||
|
||||
if self.dfa_debug:
|
||||
if ParserATNSimulator.dfa_debug:
|
||||
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
||||
fullCtx = True
|
||||
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
||||
|
@ -534,7 +534,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
predictedAlt = self.getUniqueAlt(reach)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
||||
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
||||
|
@ -586,8 +586,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
input,
|
||||
startIndex,
|
||||
outerContext):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
print("execATNWithFullContext "+s0)
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("execATNWithFullContext", str(s0))
|
||||
fullCtx = True
|
||||
foundExactAmbig = False
|
||||
reach = None
|
||||
|
@ -616,7 +616,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
raise e
|
||||
|
||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
||||
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
||||
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
||||
|
@ -685,7 +685,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return predictedAlt
|
||||
|
||||
def computeReachSet(self, closure, t, fullCtx):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("in computeReachSet, starting closure: " + str(closure))
|
||||
|
||||
if self.mergeCache is None:
|
||||
|
@ -707,7 +707,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
# First figure out where we can reach on input t
|
||||
for c in closure:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("testing " + self.getTokenName(t) + " at " + str(c))
|
||||
|
||||
if isinstance(c.state, RuleStopState):
|
||||
|
@ -967,7 +967,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# nonambig alts are null in altToPred
|
||||
if nPredAlts==0:
|
||||
altToPred = None
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
||||
return altToPred
|
||||
|
||||
|
@ -1093,11 +1093,11 @@ class ParserATNSimulator(ATNSimulator):
|
|||
break
|
||||
continue
|
||||
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
||||
if self.debug or self.dfa_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
||||
|
||||
if predicateEvaluationResult:
|
||||
if self.debug or self.dfa_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||
print("PREDICT " + str(pair.alt))
|
||||
predictions.add(pair.alt)
|
||||
if not complete:
|
||||
|
@ -1119,8 +1119,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
|
||||
def closureCheckingStopState(self, config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon):
|
||||
if self.debug:
|
||||
print("closure(" + config.toString(self.parser,True) + ")")
|
||||
if ParserATNSimulator.debug:
|
||||
print("closure(" + str(config) + ")")
|
||||
|
||||
if isinstance(config.state, RuleStopState):
|
||||
# We hit rule end. If we have context info, use it
|
||||
|
@ -1134,7 +1134,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
continue
|
||||
else:
|
||||
# we have no context info, just chase follow links (if greedy)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||
self.closure_(config, configs, closureBusy, collectPredicates,
|
||||
fullCtx, depth, treatEofAsEpsilon)
|
||||
|
@ -1154,7 +1154,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return
|
||||
else:
|
||||
# else if we have no context info, just chase follow links (if greedy)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||
|
||||
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
||||
|
@ -1196,7 +1196,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
c.reachesIntoOuterContext += 1
|
||||
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
||||
newDepth -= 1
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("dips into outer ctx: " + str(c))
|
||||
elif isinstance(t, RuleTransition):
|
||||
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||
|
@ -1237,12 +1237,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
||||
|
||||
def actionTransition(self, config, t):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
||||
return ATNConfig(state=t.target, config=config)
|
||||
|
||||
def precedenceTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
||||
str(pt.precedence) + ">=_p, ctx dependent=true")
|
||||
if self.parser is not None:
|
||||
|
@ -1267,12 +1267,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
else:
|
||||
c = ATNConfig(state=pt.target, config=config)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("config from pred transition=" + str(c))
|
||||
return c
|
||||
|
||||
def predTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
||||
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
||||
if self.parser is not None:
|
||||
|
@ -1297,12 +1297,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
else:
|
||||
c = ATNConfig(state=pt.target, config=config)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("config from pred transition=" + str(c))
|
||||
return c
|
||||
|
||||
def ruleTransition(self, config, t):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
||||
returnState = t.followState
|
||||
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
||||
|
@ -1360,12 +1360,11 @@ class ParserATNSimulator(ATNSimulator):
|
|||
def getTokenName(self, t):
|
||||
if t==Token.EOF:
|
||||
return u"EOF"
|
||||
if self.parser is not None and self.parser.tokenNames is not None:
|
||||
if t >= len(self.parser.tokenNames):
|
||||
print(str(t) + " ttype out of range: " + str_list(self.parser.tokenNames))
|
||||
print(str_list(self.parser.getInputStream().getTokens()))
|
||||
if self.parser is not None and \
|
||||
self.parser.literalNames is not None and \
|
||||
t < len(self.parser.literalNames):
|
||||
return self.parser.literalNames[t] + u"<" + unicode(t) + ">"
|
||||
else:
|
||||
return self.parser.tokensNames[t] + u"<" + unicode(t) + ">"
|
||||
return unicode(t)
|
||||
|
||||
def getLookaheadName(self, input):
|
||||
|
@ -1421,7 +1420,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# on {@code to}
|
||||
#
|
||||
def addDFAEdge(self, dfa, from_, t, to):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
||||
|
||||
if to is None:
|
||||
|
@ -1435,8 +1434,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
||||
from_.edges[t+1] = to # connect
|
||||
|
||||
if self.debug:
|
||||
names = None if self.parser is None else self.parser.tokenNames
|
||||
if ParserATNSimulator.debug:
|
||||
names = None if self.parser is None else self.parser.literalNames
|
||||
print("DFA=\n" + dfa.toString(names))
|
||||
|
||||
return to
|
||||
|
@ -1470,12 +1469,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
D.configs.optimizeConfigs(self)
|
||||
D.configs.setReadonly(True)
|
||||
dfa.states[D] = D
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("adding new DFA state: " + str(D))
|
||||
return D
|
||||
|
||||
def reportAttemptingFullContext(self, dfa, conflictingAlts, configs, startIndex, stopIndex):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
interval = range(startIndex, stopIndex + 1)
|
||||
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||
", input=" + self.parser.getTokenStream().getText(interval))
|
||||
|
@ -1483,7 +1482,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
|
||||
def reportContextSensitivity(self, dfa, prediction, configs, startIndex, stopIndex):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
interval = range(startIndex, stopIndex + 1)
|
||||
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||
", input=" + self.parser.getTokenStream().getText(interval))
|
||||
|
@ -1493,7 +1492,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# If context sensitive parsing, we know it's ambiguity not conflict#
|
||||
def reportAmbiguity(self, dfa, D, startIndex, stopIndex,
|
||||
exact, ambigAlts, configs ):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
||||
# int i = 1;
|
||||
# for (Transition t : dfa.atnStartState.transitions) {
|
||||
|
|
|
@ -467,7 +467,8 @@ class PredictionMode(object):
|
|||
def getUniqueAlt(cls, altsets):
|
||||
all = cls.getAlts(altsets)
|
||||
if len(all)==1:
|
||||
return all[0]
|
||||
for one in all:
|
||||
return one
|
||||
else:
|
||||
return ATN.INVALID_ALT_NUMBER
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ def calculateHashCode(parent:PredictionContext, returnState:int):
|
|||
|
||||
def calculateListsHashCode(parents:[], returnStates:[] ):
|
||||
h = 0
|
||||
for parent, returnState in parents, returnStates:
|
||||
for parent, returnState in zip(parents, returnStates):
|
||||
h = hash((h, calculateHashCode(parent, returnState)))
|
||||
return h
|
||||
|
||||
|
@ -242,7 +242,7 @@ class ArrayPredictionContext(PredictionContext):
|
|||
if self.returnStates[i]==PredictionContext.EMPTY_RETURN_STATE:
|
||||
buf.write("$")
|
||||
continue
|
||||
buf.write(self.returnStates[i])
|
||||
buf.write(str(self.returnStates[i]))
|
||||
if self.parents[i] is not None:
|
||||
buf.write(' ')
|
||||
buf.write(str(self.parents[i]))
|
||||
|
@ -251,6 +251,10 @@ class ArrayPredictionContext(PredictionContext):
|
|||
buf.write("]")
|
||||
return buf.getvalue()
|
||||
|
||||
def __hash__(self):
|
||||
return self.cachedHashCode
|
||||
|
||||
|
||||
|
||||
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
||||
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
||||
|
@ -325,18 +329,18 @@ def merge(a:PredictionContext, b:PredictionContext, rootIsWildcard:bool, mergeCa
|
|||
#/
|
||||
def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
||||
if mergeCache is not None:
|
||||
previous = mergeCache.get(a,b)
|
||||
previous = mergeCache.get((a,b), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
previous = mergeCache.get(b,a)
|
||||
previous = mergeCache.get((b,a), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
|
||||
rootMerge = mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge is not None:
|
||||
merged = mergeRoot(a, b, rootIsWildcard)
|
||||
if merged is not None:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, rootMerge)
|
||||
return rootMerge
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
|
||||
if a.returnState==b.returnState:
|
||||
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
|
@ -349,10 +353,10 @@ def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext,
|
|||
# merge parents x and y, giving array node with x,y then remainders
|
||||
# of those graphs. dup a, a' points at merged array
|
||||
# new joined parent so create new singleton pointing to it, a'
|
||||
a_ = SingletonPredictionContext.create(parent, a.returnState)
|
||||
merged = SingletonPredictionContext.create(parent, a.returnState)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
else: # a != b payloads differ
|
||||
# see if we can collapse parents due to $+x parents if local ctx
|
||||
singleParent = None
|
||||
|
@ -362,26 +366,24 @@ def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext,
|
|||
# sort payloads and use same parent
|
||||
payloads = [ a.returnState, b.returnState ]
|
||||
if a.returnState > b.returnState:
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
payloads = [ b.returnState, a.returnState ]
|
||||
parents = [singleParent, singleParent]
|
||||
a_ = ArrayPredictionContext(parents, payloads)
|
||||
merged = ArrayPredictionContext(parents, payloads)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
# parents differ and can't merge them. Just pack together
|
||||
# into array; can't merge.
|
||||
# ax + by = [ax,by]
|
||||
payloads = [ a.returnState, b.returnState ]
|
||||
parents = [ a.parentCtx, b.parentCtx ]
|
||||
if a.returnState > b.returnState: # sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
payloads = [ b.returnState, a.returnState ]
|
||||
parents = [ b.parentCtx, a.parentCtx ]
|
||||
a_ = ArrayPredictionContext(parents, payloads)
|
||||
merged = ArrayPredictionContext(parents, payloads)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
mergeCache[(a, b)] = merged
|
||||
return merged
|
||||
|
||||
|
||||
#
|
||||
|
@ -463,10 +465,10 @@ def mergeRoot(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIs
|
|||
#/
|
||||
def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
||||
if mergeCache is not None:
|
||||
previous = mergeCache.get(a,b)
|
||||
previous = mergeCache.get((a,b), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
previous = mergeCache.get(b,a)
|
||||
previous = mergeCache.get((b,a), None)
|
||||
if previous is not None:
|
||||
return previous
|
||||
|
||||
|
@ -475,8 +477,8 @@ def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildca
|
|||
j = 0 # walks b
|
||||
k = 0 # walks target M array
|
||||
|
||||
mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates))
|
||||
mergedParents = [] * len(mergedReturnStates)
|
||||
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
|
||||
mergedParents = [None] * len(mergedReturnStates)
|
||||
# walk and merge to yield mergedParents, mergedReturnStates
|
||||
while i<len(a.returnStates) and j<len(b.returnStates):
|
||||
a_parent = a.parents[i]
|
||||
|
@ -522,30 +524,30 @@ def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildca
|
|||
# trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents): # write index < last position; trim
|
||||
if k == 1: # for just one merged element, return singleton top
|
||||
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,a_)
|
||||
return a_
|
||||
mergeCache[(a,b)] = merged
|
||||
return merged
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
|
||||
M = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
# if we created same array as a or b, return that instead
|
||||
# TODO: track whether this is possible above during merge sort for speed
|
||||
if M==a:
|
||||
if merged==a:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,a)
|
||||
mergeCache[(a,b)] = a
|
||||
return a
|
||||
if M==b:
|
||||
if merged==b:
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,b)
|
||||
mergeCache[(a,b)] = b
|
||||
return b
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a,b,M)
|
||||
return M
|
||||
mergeCache[(a,b)] = merged
|
||||
return merged
|
||||
|
||||
|
||||
#
|
||||
|
|
|
@ -59,7 +59,6 @@ class ATNConfig(object):
|
|||
semantic = config.semanticContext
|
||||
if semantic is None:
|
||||
semantic = SemanticContext.NONE
|
||||
|
||||
# The ATN state associated with this configuration#/
|
||||
self.state = state
|
||||
# What alt (or lexer rule) is predicted by this configuration#/
|
||||
|
@ -100,6 +99,19 @@ class ATNConfig(object):
|
|||
def __hash__(self):
|
||||
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
||||
|
||||
def hashCodeForConfigSet(self):
|
||||
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
|
||||
|
||||
def equalsForConfigSet(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
elif not isinstance(other, ATNConfig):
|
||||
return False
|
||||
else:
|
||||
return self.state.stateNumber==other.state.stateNumber \
|
||||
and self.alt==other.alt \
|
||||
and self.semanticContext==other.semanticContext
|
||||
|
||||
def __str__(self):
|
||||
with StringIO() as buf:
|
||||
buf.write('(')
|
||||
|
@ -150,6 +162,18 @@ class LexerATNConfig(ATNConfig):
|
|||
return False
|
||||
return super().__eq__(other)
|
||||
|
||||
|
||||
|
||||
def hashCodeForConfigSet(self):
|
||||
return hash(self)
|
||||
|
||||
|
||||
|
||||
def equalsForConfigSet(self, other):
|
||||
return self==other
|
||||
|
||||
|
||||
|
||||
def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState):
|
||||
return source.passedThroughNonGreedyDecision \
|
||||
or isinstance(target, DecisionState) and target.nonGreedy
|
||||
|
|
|
@ -108,8 +108,8 @@ class ATNConfigSet(object):
|
|||
rootIsWildcard = not self.fullCtx
|
||||
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
||||
# no need to check for existing.context, config.context in cache
|
||||
# since only way to create new graphs is "call rule" and here. We
|
||||
# cache at both places.
|
||||
# since only way to create new graphs is "call rule" and here.
|
||||
# We cache at both places.
|
||||
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
||||
# make sure to preserve the precedence filter suppression during the merge
|
||||
if config.precedenceFilterSuppressed:
|
||||
|
@ -118,11 +118,11 @@ class ATNConfigSet(object):
|
|||
return True
|
||||
|
||||
def getOrAdd(self, config:ATNConfig):
|
||||
h = hash(config)
|
||||
h = config.hashCodeForConfigSet()
|
||||
l = self.configLookup.get(h, None)
|
||||
if l is not None:
|
||||
for c in l:
|
||||
if c==config:
|
||||
if config.equalsForConfigSet(c):
|
||||
return c
|
||||
if l is None:
|
||||
l = [config]
|
||||
|
|
|
@ -136,7 +136,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
def matchATN(self, input:InputStream):
|
||||
startState = self.atn.modeToStartState[self.mode]
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
||||
|
||||
old_mode = self.mode
|
||||
|
@ -150,13 +150,13 @@ class LexerATNSimulator(ATNSimulator):
|
|||
|
||||
predict = self.execATN(input, next)
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
||||
|
||||
return predict
|
||||
|
||||
def execATN(self, input:InputStream, ds0:DFAState):
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("start state closure=" + str(ds0.configs))
|
||||
|
||||
if ds0.isAcceptState:
|
||||
|
@ -167,8 +167,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
s = ds0 # s is current/from DFA state
|
||||
|
||||
while True: # while more work
|
||||
if self.debug:
|
||||
print("execATN loop starting closure: %s\n", s.configs)
|
||||
if LexerATNSimulator.debug:
|
||||
print("execATN loop starting closure:", str(s.configs))
|
||||
|
||||
# As we move src->trg, src->trg, we keep track of the previous trg to
|
||||
# avoid looking up the DFA state again, which is expensive.
|
||||
|
@ -229,8 +229,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
return None
|
||||
|
||||
target = s.edges[t - self.MIN_DFA_EDGE]
|
||||
if self.debug and target is not None:
|
||||
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
|
||||
if LexerATNSimulator.debug and target is not None:
|
||||
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
|
||||
|
||||
return target
|
||||
|
||||
|
@ -286,8 +286,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
||||
continue
|
||||
|
||||
if self.debug:
|
||||
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
|
||||
if LexerATNSimulator.debug:
|
||||
print("testing", self.getTokenName(t), "at", str(cfg))
|
||||
|
||||
for trans in cfg.state.transitions: # for each transition
|
||||
target = self.getReachableTarget(trans, t)
|
||||
|
@ -304,8 +304,8 @@ class LexerATNSimulator(ATNSimulator):
|
|||
skipAlt = cfg.alt
|
||||
|
||||
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
|
||||
if self.debug:
|
||||
print("ACTION %s\n", lexerActionExecutor)
|
||||
if LexerATNSimulator.debug:
|
||||
print("ACTION", lexerActionExecutor)
|
||||
|
||||
# seek to after last char in token
|
||||
input.seek(index)
|
||||
|
@ -340,15 +340,15 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# {@code false}.
|
||||
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
|
||||
speculative:bool, treatEofAsEpsilon:bool):
|
||||
if self.debug:
|
||||
print("closure("+config.toString(self.recog, True)+")")
|
||||
if LexerATNSimulator.debug:
|
||||
print("closure(" + str(config) + ")")
|
||||
|
||||
if isinstance( config.state, RuleStopState ):
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
if self.recog is not None:
|
||||
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
|
||||
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
|
||||
else:
|
||||
print("closure at rule stop %s\n", config)
|
||||
print("closure at rule stop", str(config))
|
||||
|
||||
if config.context is None or config.context.hasEmptyPath():
|
||||
if config.context is None or config.context.isEmpty():
|
||||
|
@ -411,7 +411,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# states reached by traversing predicates. Since this is when we
|
||||
# test them, we cannot cash the DFA state target of ID.
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
||||
configs.hasSemanticContext = True
|
||||
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
||||
|
@ -523,7 +523,7 @@ class LexerATNSimulator(ATNSimulator):
|
|||
# Only track edges within the DFA bounds
|
||||
return to
|
||||
|
||||
if self.debug:
|
||||
if LexerATNSimulator.debug:
|
||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
||||
|
||||
if from_.edges is None:
|
||||
|
|
|
@ -313,7 +313,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
pass
|
||||
|
||||
def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("adaptivePredict decision " + str(decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
" line " + str(input.LT(1).line) + ":" +
|
||||
|
@ -341,10 +341,10 @@ class ParserATNSimulator(ATNSimulator):
|
|||
if s0 is None:
|
||||
if outerContext is None:
|
||||
outerContext = ParserRuleContext.EMPTY
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("predictATN decision " + str(dfa.decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
", outerContext=" + outerContext.toString(self.parser))
|
||||
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
|
||||
|
||||
# If this is not a precedence DFA, we check the ATN start state
|
||||
# to determine if this ATN start state is the decision for the
|
||||
|
@ -373,8 +373,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
dfa.s0 = s0
|
||||
|
||||
alt = self.execATN(dfa, s0, input, index, outerContext)
|
||||
if self.debug:
|
||||
print("DFA after predictATN: " + dfa.toString(self.parser.tokenNames))
|
||||
if ParserATNSimulator.debug:
|
||||
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
|
||||
return alt
|
||||
finally:
|
||||
self._dfa = None
|
||||
|
@ -413,14 +413,14 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# conflict + preds
|
||||
#
|
||||
def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("execATN decision " + str(dfa.decision) +
|
||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
||||
|
||||
previousD = s0
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("s0 = " + str(s0))
|
||||
|
||||
t = input.LA(1)
|
||||
|
@ -450,7 +450,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||
conflictingAlts = None
|
||||
if D.predicates is not None:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("DFA state has preds in DFA sim LL failover")
|
||||
conflictIndex = input.index
|
||||
if conflictIndex != startIndex:
|
||||
|
@ -458,7 +458,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
||||
if len(conflictingAlts)==1:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("Full LL avoided")
|
||||
return min(conflictingAlts)
|
||||
|
||||
|
@ -467,7 +467,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# context occurs with the index at the correct spot
|
||||
input.seek(conflictIndex)
|
||||
|
||||
if self.dfa_debug:
|
||||
if ParserATNSimulator.dfa_debug:
|
||||
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
||||
fullCtx = True
|
||||
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
||||
|
@ -539,7 +539,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
predictedAlt = self.getUniqueAlt(reach)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
||||
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
||||
|
@ -591,8 +591,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
input:TokenStream,
|
||||
startIndex:int,
|
||||
outerContext:ParserRuleContext):
|
||||
if self.debug or self.debug_list_atn_decisions:
|
||||
print("execATNWithFullContext "+s0)
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||
print("execATNWithFullContext", str(s0))
|
||||
fullCtx = True
|
||||
foundExactAmbig = False
|
||||
reach = None
|
||||
|
@ -621,7 +621,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
raise e
|
||||
|
||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
||||
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
||||
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
||||
|
@ -690,7 +690,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return predictedAlt
|
||||
|
||||
def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("in computeReachSet, starting closure: " + str(closure))
|
||||
|
||||
if self.mergeCache is None:
|
||||
|
@ -712,7 +712,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
# First figure out where we can reach on input t
|
||||
for c in closure:
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("testing " + self.getTokenName(t) + " at " + str(c))
|
||||
|
||||
if isinstance(c.state, RuleStopState):
|
||||
|
@ -972,7 +972,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# nonambig alts are null in altToPred
|
||||
if nPredAlts==0:
|
||||
altToPred = None
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
||||
return altToPred
|
||||
|
||||
|
@ -1098,11 +1098,11 @@ class ParserATNSimulator(ATNSimulator):
|
|||
break
|
||||
continue
|
||||
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
||||
if self.debug or self.dfa_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
||||
|
||||
if predicateEvaluationResult:
|
||||
if self.debug or self.dfa_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||
print("PREDICT " + str(pair.alt))
|
||||
predictions.add(pair.alt)
|
||||
if not complete:
|
||||
|
@ -1124,8 +1124,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
|
||||
|
||||
def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
|
||||
if self.debug:
|
||||
print("closure(" + config.toString(self.parser,True) + ")")
|
||||
if ParserATNSimulator.debug:
|
||||
print("closure(" + str(config) + ")")
|
||||
|
||||
if isinstance(config.state, RuleStopState):
|
||||
# We hit rule end. If we have context info, use it
|
||||
|
@ -1139,7 +1139,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
continue
|
||||
else:
|
||||
# we have no context info, just chase follow links (if greedy)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||
self.closure_(config, configs, closureBusy, collectPredicates,
|
||||
fullCtx, depth, treatEofAsEpsilon)
|
||||
|
@ -1159,7 +1159,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return
|
||||
else:
|
||||
# else if we have no context info, just chase follow links (if greedy)
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||
|
||||
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
||||
|
@ -1201,7 +1201,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
c.reachesIntoOuterContext += 1
|
||||
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
||||
newDepth -= 1
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("dips into outer ctx: " + str(c))
|
||||
elif isinstance(t, RuleTransition):
|
||||
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||
|
@ -1242,12 +1242,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
||||
|
||||
def actionTransition(self, config:ATNConfig, t:ActionTransition):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
||||
return ATNConfig(state=t.target, config=config)
|
||||
|
||||
def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
||||
str(pt.precedence) + ">=_p, ctx dependent=true")
|
||||
if self.parser is not None:
|
||||
|
@ -1272,12 +1272,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
else:
|
||||
c = ATNConfig(state=pt.target, config=config)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("config from pred transition=" + str(c))
|
||||
return c
|
||||
|
||||
def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
||||
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
||||
if self.parser is not None:
|
||||
|
@ -1302,12 +1302,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
else:
|
||||
c = ATNConfig(state=pt.target, config=config)
|
||||
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("config from pred transition=" + str(c))
|
||||
return c
|
||||
|
||||
def ruleTransition(self, config:ATNConfig, t:RuleTransition):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
||||
returnState = t.followState
|
||||
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
||||
|
@ -1365,12 +1365,11 @@ class ParserATNSimulator(ATNSimulator):
|
|||
def getTokenName(self, t:int):
|
||||
if t==Token.EOF:
|
||||
return "EOF"
|
||||
if self.parser is not None and self.parser.tokenNames is not None:
|
||||
if t >= len(self.parser.tokenNames):
|
||||
print(str(t) + " ttype out of range: " + str_list(self.parser.tokenNames))
|
||||
print(str_list(self.parser.getInputStream().getTokens()))
|
||||
if self.parser is not None and \
|
||||
self.parser.literalNames is not None and \
|
||||
t < len(self.parser.literalNames):
|
||||
return self.parser.literalNames[t] + "<" + str(t) + ">"
|
||||
else:
|
||||
return self.parser.tokensNames[t] + "<" + str(t) + ">"
|
||||
return str(t)
|
||||
|
||||
def getLookaheadName(self, input:TokenStream):
|
||||
|
@ -1426,7 +1425,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# on {@code to}
|
||||
#
|
||||
def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
||||
|
||||
if to is None:
|
||||
|
@ -1440,8 +1439,8 @@ class ParserATNSimulator(ATNSimulator):
|
|||
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
||||
from_.edges[t+1] = to # connect
|
||||
|
||||
if self.debug:
|
||||
names = None if self.parser is None else self.parser.tokenNames
|
||||
if ParserATNSimulator.debug:
|
||||
names = None if self.parser is None else self.parser.literalNames
|
||||
print("DFA=\n" + dfa.toString(names))
|
||||
|
||||
return to
|
||||
|
@ -1475,12 +1474,12 @@ class ParserATNSimulator(ATNSimulator):
|
|||
D.configs.optimizeConfigs(self)
|
||||
D.configs.setReadonly(True)
|
||||
dfa.states[D] = D
|
||||
if self.debug:
|
||||
if ParserATNSimulator.debug:
|
||||
print("adding new DFA state: " + str(D))
|
||||
return D
|
||||
|
||||
def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
interval = range(startIndex, stopIndex + 1)
|
||||
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||
", input=" + self.parser.getTokenStream().getText(interval))
|
||||
|
@ -1488,7 +1487,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
|
||||
def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
interval = range(startIndex, stopIndex + 1)
|
||||
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||
", input=" + self.parser.getTokenStream().getText(interval))
|
||||
|
@ -1498,7 +1497,7 @@ class ParserATNSimulator(ATNSimulator):
|
|||
# If context sensitive parsing, we know it's ambiguity not conflict#
|
||||
def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
|
||||
exact:bool, ambigAlts:set, configs:ATNConfigSet ):
|
||||
if self.debug or self.retry_debug:
|
||||
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
||||
# int i = 1;
|
||||
# for (Transition t : dfa.atnStartState.transitions) {
|
||||
|
|
|
@ -470,7 +470,8 @@ class PredictionMode(Enum):
|
|||
def getUniqueAlt(cls, altsets:list):
|
||||
all = cls.getAlts(altsets)
|
||||
if len(all)==1:
|
||||
return all[0]
|
||||
for one in all:
|
||||
return one
|
||||
else:
|
||||
return ATN.INVALID_ALT_NUMBER
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ while True:
|
|||
|
||||
AltBlock(choice, preamble, alts, error) ::= <<
|
||||
self.state = <choice.stateNumber>
|
||||
self._errHandler.sync(self);
|
||||
self._errHandler.sync(self)
|
||||
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
||||
<preamble; separator="\n">
|
||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||
|
@ -389,7 +389,7 @@ if la_ == <i>:
|
|||
|
||||
OptionalBlock(choice, alts, error) ::= <<
|
||||
self.state = <choice.stateNumber>
|
||||
self._errHandler.sync(self);
|
||||
self._errHandler.sync(self)
|
||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||
<alts:{alt |
|
||||
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
||||
|
|
|
@ -383,7 +383,7 @@ while True:
|
|||
|
||||
AltBlock(choice, preamble, alts, error) ::= <<
|
||||
self.state = <choice.stateNumber>
|
||||
self._errHandler.sync(self);
|
||||
self._errHandler.sync(self)
|
||||
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
||||
<preamble; separator="\n">
|
||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||
|
@ -397,7 +397,7 @@ if la_ == <i>:
|
|||
|
||||
OptionalBlock(choice, alts, error) ::= <<
|
||||
self.state = <choice.stateNumber>
|
||||
self._errHandler.sync(self);
|
||||
self._errHandler.sync(self)
|
||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||
<alts:{alt |
|
||||
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
||||
|
|
Loading…
Reference in New Issue