Merge pull request #1218 from ericvergnaud/python-performance-issue
Python performance issue
This commit is contained in:
commit
47e268dfea
|
@ -788,7 +788,8 @@ public class ParserATNSimulator extends ATNSimulator {
|
||||||
protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t,
|
protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t,
|
||||||
boolean fullCtx)
|
boolean fullCtx)
|
||||||
{
|
{
|
||||||
if ( debug ) System.out.println("in computeReachSet, starting closure: " + closure);
|
if ( debug )
|
||||||
|
System.out.println("in computeReachSet, starting closure: " + closure);
|
||||||
|
|
||||||
if (mergeCache == null) {
|
if (mergeCache == null) {
|
||||||
mergeCache = new DoubleKeyMap<PredictionContext, PredictionContext, PredictionContext>();
|
mergeCache = new DoubleKeyMap<PredictionContext, PredictionContext, PredictionContext>();
|
||||||
|
|
|
@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
|
||||||
|
|
||||||
See www.antlr.org for more information on ANTLR
|
See www.antlr.org for more information on ANTLR
|
||||||
|
|
||||||
See https://raw.githubusercontent.com/antlr/antlr4/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,6 @@ This runtime has been tested in Node.js, Safari, Firefox, Chrome and IE.
|
||||||
|
|
||||||
See www.antlr.org for more information on ANTLR
|
See www.antlr.org for more information on ANTLR
|
||||||
|
|
||||||
See https://theantlrguy.atlassian.net/wiki/display/ANTLR4/JavaScript+Target for more information on using ANTLR in JavaScript
|
See https://github.com/antlr/antlr4/blob/master/doc/javascript-target.md for more information on using ANTLR in JavaScript
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#/
|
#/
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from antlr4.RuleContext import RuleContext
|
from antlr4.RuleContext import RuleContext
|
||||||
|
from antlr4.atn.ATN import ATN
|
||||||
from antlr4.atn.ATNState import ATNState
|
from antlr4.atn.ATNState import ATNState
|
||||||
|
|
||||||
|
|
||||||
|
@ -98,7 +99,7 @@ def calculateHashCode(parent, returnState):
|
||||||
|
|
||||||
def calculateListsHashCode(parents, returnStates ):
|
def calculateListsHashCode(parents, returnStates ):
|
||||||
h = 0
|
h = 0
|
||||||
for parent, returnState in parents, returnStates:
|
for parent, returnState in zip(parents, returnStates):
|
||||||
h = hash((h, calculateHashCode(parent, returnState)))
|
h = hash((h, calculateHashCode(parent, returnState)))
|
||||||
return h
|
return h
|
||||||
|
|
||||||
|
@ -254,6 +255,10 @@ class ArrayPredictionContext(PredictionContext):
|
||||||
buf.write(u"]")
|
buf.write(u"]")
|
||||||
return buf.getvalue()
|
return buf.getvalue()
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return self.cachedHashCode
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
||||||
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
||||||
|
@ -328,18 +333,18 @@ def merge(a, b, rootIsWildcard, mergeCache):
|
||||||
#/
|
#/
|
||||||
def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
previous = mergeCache.get(a,b)
|
previous = mergeCache.get((a,b), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
previous = mergeCache.get(b,a)
|
previous = mergeCache.get((b,a), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
|
|
||||||
rootMerge = mergeRoot(a, b, rootIsWildcard)
|
merged = mergeRoot(a, b, rootIsWildcard)
|
||||||
if rootMerge is not None:
|
if merged is not None:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, rootMerge)
|
mergeCache[(a, b)] = merged
|
||||||
return rootMerge
|
return merged
|
||||||
|
|
||||||
if a.returnState==b.returnState:
|
if a.returnState==b.returnState:
|
||||||
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||||
|
@ -352,10 +357,10 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
||||||
# merge parents x and y, giving array node with x,y then remainders
|
# merge parents x and y, giving array node with x,y then remainders
|
||||||
# of those graphs. dup a, a' points at merged array
|
# of those graphs. dup a, a' points at merged array
|
||||||
# new joined parent so create new singleton pointing to it, a'
|
# new joined parent so create new singleton pointing to it, a'
|
||||||
a_ = SingletonPredictionContext.create(parent, a.returnState)
|
merged = SingletonPredictionContext.create(parent, a.returnState)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
else: # a != b payloads differ
|
else: # a != b payloads differ
|
||||||
# see if we can collapse parents due to $+x parents if local ctx
|
# see if we can collapse parents due to $+x parents if local ctx
|
||||||
singleParent = None
|
singleParent = None
|
||||||
|
@ -365,26 +370,24 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
||||||
# sort payloads and use same parent
|
# sort payloads and use same parent
|
||||||
payloads = [ a.returnState, b.returnState ]
|
payloads = [ a.returnState, b.returnState ]
|
||||||
if a.returnState > b.returnState:
|
if a.returnState > b.returnState:
|
||||||
payloads[0] = b.returnState
|
payloads = [ b.returnState, a.returnState ]
|
||||||
payloads[1] = a.returnState
|
|
||||||
parents = [singleParent, singleParent]
|
parents = [singleParent, singleParent]
|
||||||
a_ = ArrayPredictionContext(parents, payloads)
|
merged = ArrayPredictionContext(parents, payloads)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
# parents differ and can't merge them. Just pack together
|
# parents differ and can't merge them. Just pack together
|
||||||
# into array; can't merge.
|
# into array; can't merge.
|
||||||
# ax + by = [ax,by]
|
# ax + by = [ax,by]
|
||||||
payloads = [ a.returnState, b.returnState ]
|
payloads = [ a.returnState, b.returnState ]
|
||||||
parents = [ a.parentCtx, b.parentCtx ]
|
parents = [ a.parentCtx, b.parentCtx ]
|
||||||
if a.returnState > b.returnState: # sort by payload
|
if a.returnState > b.returnState: # sort by payload
|
||||||
payloads[0] = b.returnState
|
payloads = [ b.returnState, a.returnState ]
|
||||||
payloads[1] = a.returnState
|
|
||||||
parents = [ b.parentCtx, a.parentCtx ]
|
parents = [ b.parentCtx, a.parentCtx ]
|
||||||
a_ = ArrayPredictionContext(parents, payloads)
|
merged = ArrayPredictionContext(parents, payloads)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -466,10 +469,10 @@ def mergeRoot(a, b, rootIsWildcard):
|
||||||
#/
|
#/
|
||||||
def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
previous = mergeCache.get(a,b)
|
previous = mergeCache.get((a,b), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
previous = mergeCache.get(b,a)
|
previous = mergeCache.get((b,a), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
|
|
||||||
|
@ -478,8 +481,8 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
||||||
j = 0 # walks b
|
j = 0 # walks b
|
||||||
k = 0 # walks target M array
|
k = 0 # walks target M array
|
||||||
|
|
||||||
mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates))
|
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
|
||||||
mergedParents = [] * len(mergedReturnStates)
|
mergedParents = [None] * len(mergedReturnStates)
|
||||||
# walk and merge to yield mergedParents, mergedReturnStates
|
# walk and merge to yield mergedParents, mergedReturnStates
|
||||||
while i<len(a.returnStates) and j<len(b.returnStates):
|
while i<len(a.returnStates) and j<len(b.returnStates):
|
||||||
a_parent = a.parents[i]
|
a_parent = a.parents[i]
|
||||||
|
@ -525,30 +528,30 @@ def mergeArrays(a, b, rootIsWildcard, mergeCache):
|
||||||
# trim merged if we combined a few that had same stack tops
|
# trim merged if we combined a few that had same stack tops
|
||||||
if k < len(mergedParents): # write index < last position; trim
|
if k < len(mergedParents): # write index < last position; trim
|
||||||
if k == 1: # for just one merged element, return singleton top
|
if k == 1: # for just one merged element, return singleton top
|
||||||
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,a_)
|
mergeCache[(a,b)] = merged
|
||||||
return a_
|
return merged
|
||||||
mergedParents = mergedParents[0:k]
|
mergedParents = mergedParents[0:k]
|
||||||
mergedReturnStates = mergedReturnStates[0:k]
|
mergedReturnStates = mergedReturnStates[0:k]
|
||||||
|
|
||||||
M = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||||
|
|
||||||
# if we created same array as a or b, return that instead
|
# if we created same array as a or b, return that instead
|
||||||
# TODO: track whether this is possible above during merge sort for speed
|
# TODO: track whether this is possible above during merge sort for speed
|
||||||
if M==a:
|
if merged==a:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,a)
|
mergeCache[(a,b)] = a
|
||||||
return a
|
return a
|
||||||
if M==b:
|
if merged==b:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,b)
|
mergeCache[(a,b)] = b
|
||||||
return b
|
return b
|
||||||
combineCommonParents(mergedParents)
|
combineCommonParents(mergedParents)
|
||||||
|
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,M)
|
mergeCache[(a,b)] = merged
|
||||||
return M
|
return merged
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -642,6 +645,6 @@ def getAllContextNodes(context, nodes=None, visited=None):
|
||||||
visited.put(context, context)
|
visited.put(context, context)
|
||||||
nodes.add(context)
|
nodes.add(context)
|
||||||
for i in range(0, len(context)):
|
for i in range(0, len(context)):
|
||||||
getAllContextNodes(context.getParent(i), nodes, visited);
|
getAllContextNodes(context.getParent(i), nodes, visited)
|
||||||
return nodes
|
return nodes
|
||||||
|
|
||||||
|
|
|
@ -95,6 +95,19 @@ class ATNConfig(object):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
||||||
|
|
||||||
|
def hashCodeForConfigSet(self):
|
||||||
|
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
|
||||||
|
|
||||||
|
def equalsForConfigSet(self, other):
|
||||||
|
if self is other:
|
||||||
|
return True
|
||||||
|
elif not isinstance(other, ATNConfig):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return self.state.stateNumber==other.state.stateNumber \
|
||||||
|
and self.alt==other.alt \
|
||||||
|
and self.semanticContext==other.semanticContext
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return unicode(self)
|
return unicode(self)
|
||||||
|
|
||||||
|
@ -144,6 +157,18 @@ class LexerATNConfig(ATNConfig):
|
||||||
return False
|
return False
|
||||||
return super(LexerATNConfig, self).__eq__(other)
|
return super(LexerATNConfig, self).__eq__(other)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def hashCodeForConfigSet(self):
|
||||||
|
return hash(self)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def equalsForConfigSet(self, other):
|
||||||
|
return self==other
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def checkNonGreedyDecision(self, source, target):
|
def checkNonGreedyDecision(self, source, target):
|
||||||
return source.passedThroughNonGreedyDecision \
|
return source.passedThroughNonGreedyDecision \
|
||||||
or isinstance(target, DecisionState) and target.nonGreedy
|
or isinstance(target, DecisionState) and target.nonGreedy
|
||||||
|
|
|
@ -105,8 +105,8 @@ class ATNConfigSet(object):
|
||||||
rootIsWildcard = not self.fullCtx
|
rootIsWildcard = not self.fullCtx
|
||||||
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
||||||
# no need to check for existing.context, config.context in cache
|
# no need to check for existing.context, config.context in cache
|
||||||
# since only way to create new graphs is "call rule" and here. We
|
# since only way to create new graphs is "call rule" and here.
|
||||||
# cache at both places.
|
# We cache at both places.
|
||||||
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
||||||
# make sure to preserve the precedence filter suppression during the merge
|
# make sure to preserve the precedence filter suppression during the merge
|
||||||
if config.precedenceFilterSuppressed:
|
if config.precedenceFilterSuppressed:
|
||||||
|
@ -115,11 +115,11 @@ class ATNConfigSet(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def getOrAdd(self, config):
|
def getOrAdd(self, config):
|
||||||
h = hash(config)
|
h = config.hashCodeForConfigSet()
|
||||||
l = self.configLookup.get(h, None)
|
l = self.configLookup.get(h, None)
|
||||||
if l is not None:
|
if l is not None:
|
||||||
for c in l:
|
for c in l:
|
||||||
if c==config:
|
if config.equalsForConfigSet(c):
|
||||||
return c
|
return c
|
||||||
if l is None:
|
if l is None:
|
||||||
l = [config]
|
l = [config]
|
||||||
|
|
|
@ -130,7 +130,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
def matchATN(self, input):
|
def matchATN(self, input):
|
||||||
startState = self.atn.modeToStartState[self.mode]
|
startState = self.atn.modeToStartState[self.mode]
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
||||||
|
|
||||||
old_mode = self.mode
|
old_mode = self.mode
|
||||||
|
@ -144,13 +144,13 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
predict = self.execATN(input, next)
|
predict = self.execATN(input, next)
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
||||||
|
|
||||||
return predict
|
return predict
|
||||||
|
|
||||||
def execATN(self, input, ds0):
|
def execATN(self, input, ds0):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("start state closure=" + str(ds0.configs))
|
print("start state closure=" + str(ds0.configs))
|
||||||
|
|
||||||
if ds0.isAcceptState:
|
if ds0.isAcceptState:
|
||||||
|
@ -161,8 +161,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
s = ds0 # s is current/from DFA state
|
s = ds0 # s is current/from DFA state
|
||||||
|
|
||||||
while True: # while more work
|
while True: # while more work
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("execATN loop starting closure: %s\n", s.configs)
|
print("execATN loop starting closure:", str(s.configs))
|
||||||
|
|
||||||
# As we move src->trg, src->trg, we keep track of the previous trg to
|
# As we move src->trg, src->trg, we keep track of the previous trg to
|
||||||
# avoid looking up the DFA state again, which is expensive.
|
# avoid looking up the DFA state again, which is expensive.
|
||||||
|
@ -223,8 +223,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
target = s.edges[t - self.MIN_DFA_EDGE]
|
target = s.edges[t - self.MIN_DFA_EDGE]
|
||||||
if self.debug and target is not None:
|
if LexerATNSimulator.debug and target is not None:
|
||||||
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
|
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
|
||||||
|
|
||||||
return target
|
return target
|
||||||
|
|
||||||
|
@ -280,8 +280,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
|
print("testing", self.getTokenName(t), "at", str(cfg))
|
||||||
|
|
||||||
for trans in cfg.state.transitions: # for each transition
|
for trans in cfg.state.transitions: # for each transition
|
||||||
target = self.getReachableTarget(trans, t)
|
target = self.getReachableTarget(trans, t)
|
||||||
|
@ -298,8 +298,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
skipAlt = cfg.alt
|
skipAlt = cfg.alt
|
||||||
|
|
||||||
def accept(self, input, lexerActionExecutor, startIndex, index, line, charPos):
|
def accept(self, input, lexerActionExecutor, startIndex, index, line, charPos):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("ACTION %s\n", lexerActionExecutor)
|
print("ACTION", lexerActionExecutor)
|
||||||
|
|
||||||
# seek to after last char in token
|
# seek to after last char in token
|
||||||
input.seek(index)
|
input.seek(index)
|
||||||
|
@ -334,15 +334,15 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# {@code false}.
|
# {@code false}.
|
||||||
def closure(self, input, config, configs, currentAltReachedAcceptState,
|
def closure(self, input, config, configs, currentAltReachedAcceptState,
|
||||||
speculative, treatEofAsEpsilon):
|
speculative, treatEofAsEpsilon):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("closure("+config.toString(self.recog, True)+")")
|
print("closure(" + str(config) + ")")
|
||||||
|
|
||||||
if isinstance( config.state, RuleStopState ):
|
if isinstance( config.state, RuleStopState ):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
if self.recog is not None:
|
if self.recog is not None:
|
||||||
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
|
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
|
||||||
else:
|
else:
|
||||||
print("closure at rule stop %s\n", config)
|
print("closure at rule stop", str(config))
|
||||||
|
|
||||||
if config.context is None or config.context.hasEmptyPath():
|
if config.context is None or config.context.hasEmptyPath():
|
||||||
if config.context is None or config.context.isEmpty():
|
if config.context is None or config.context.isEmpty():
|
||||||
|
@ -404,7 +404,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# states reached by traversing predicates. Since this is when we
|
# states reached by traversing predicates. Since this is when we
|
||||||
# test them, we cannot cash the DFA state target of ID.
|
# test them, we cannot cash the DFA state target of ID.
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
||||||
configs.hasSemanticContext = True
|
configs.hasSemanticContext = True
|
||||||
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
||||||
|
@ -516,7 +516,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# Only track edges within the DFA bounds
|
# Only track edges within the DFA bounds
|
||||||
return to
|
return to
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
||||||
|
|
||||||
if from_.edges is None:
|
if from_.edges is None:
|
||||||
|
|
|
@ -308,7 +308,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def adaptivePredict(self, input, decision, outerContext):
|
def adaptivePredict(self, input, decision, outerContext):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("adaptivePredict decision " + str(decision) +
|
print("adaptivePredict decision " + str(decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
" line " + str(input.LT(1).line) + ":" +
|
" line " + str(input.LT(1).line) + ":" +
|
||||||
|
@ -336,10 +336,10 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
if s0 is None:
|
if s0 is None:
|
||||||
if outerContext is None:
|
if outerContext is None:
|
||||||
outerContext = ParserRuleContext.EMPTY
|
outerContext = ParserRuleContext.EMPTY
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("predictATN decision " + str(dfa.decision) +
|
print("predictATN decision " + str(dfa.decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
", outerContext=" + outerContext.toString(self.parser))
|
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
|
||||||
|
|
||||||
# If this is not a precedence DFA, we check the ATN start state
|
# If this is not a precedence DFA, we check the ATN start state
|
||||||
# to determine if this ATN start state is the decision for the
|
# to determine if this ATN start state is the decision for the
|
||||||
|
@ -368,8 +368,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
dfa.s0 = s0
|
dfa.s0 = s0
|
||||||
|
|
||||||
alt = self.execATN(dfa, s0, input, index, outerContext)
|
alt = self.execATN(dfa, s0, input, index, outerContext)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("DFA after predictATN: " + dfa.toString(self.parser.tokenNames))
|
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
|
||||||
return alt
|
return alt
|
||||||
finally:
|
finally:
|
||||||
self._dfa = None
|
self._dfa = None
|
||||||
|
@ -408,14 +408,14 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# conflict + preds
|
# conflict + preds
|
||||||
#
|
#
|
||||||
def execATN(self, dfa, s0, input, startIndex, outerContext ):
|
def execATN(self, dfa, s0, input, startIndex, outerContext ):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("execATN decision " + str(dfa.decision) +
|
print("execATN decision " + str(dfa.decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
||||||
|
|
||||||
previousD = s0
|
previousD = s0
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("s0 = " + str(s0))
|
print("s0 = " + str(s0))
|
||||||
|
|
||||||
t = input.LA(1)
|
t = input.LA(1)
|
||||||
|
@ -445,7 +445,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||||
conflictingAlts = None
|
conflictingAlts = None
|
||||||
if D.predicates is not None:
|
if D.predicates is not None:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("DFA state has preds in DFA sim LL failover")
|
print("DFA state has preds in DFA sim LL failover")
|
||||||
conflictIndex = input.index
|
conflictIndex = input.index
|
||||||
if conflictIndex != startIndex:
|
if conflictIndex != startIndex:
|
||||||
|
@ -453,7 +453,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
||||||
if len(conflictingAlts)==1:
|
if len(conflictingAlts)==1:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("Full LL avoided")
|
print("Full LL avoided")
|
||||||
return min(conflictingAlts)
|
return min(conflictingAlts)
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# context occurs with the index at the correct spot
|
# context occurs with the index at the correct spot
|
||||||
input.seek(conflictIndex)
|
input.seek(conflictIndex)
|
||||||
|
|
||||||
if self.dfa_debug:
|
if ParserATNSimulator.dfa_debug:
|
||||||
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
||||||
fullCtx = True
|
fullCtx = True
|
||||||
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
||||||
|
@ -534,7 +534,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
predictedAlt = self.getUniqueAlt(reach)
|
predictedAlt = self.getUniqueAlt(reach)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||||
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
||||||
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
||||||
|
@ -586,8 +586,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
input,
|
input,
|
||||||
startIndex,
|
startIndex,
|
||||||
outerContext):
|
outerContext):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("execATNWithFullContext "+s0)
|
print("execATNWithFullContext", str(s0))
|
||||||
fullCtx = True
|
fullCtx = True
|
||||||
foundExactAmbig = False
|
foundExactAmbig = False
|
||||||
reach = None
|
reach = None
|
||||||
|
@ -616,7 +616,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
||||||
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
||||||
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
||||||
|
@ -685,7 +685,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return predictedAlt
|
return predictedAlt
|
||||||
|
|
||||||
def computeReachSet(self, closure, t, fullCtx):
|
def computeReachSet(self, closure, t, fullCtx):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("in computeReachSet, starting closure: " + str(closure))
|
print("in computeReachSet, starting closure: " + str(closure))
|
||||||
|
|
||||||
if self.mergeCache is None:
|
if self.mergeCache is None:
|
||||||
|
@ -707,7 +707,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
# First figure out where we can reach on input t
|
# First figure out where we can reach on input t
|
||||||
for c in closure:
|
for c in closure:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("testing " + self.getTokenName(t) + " at " + str(c))
|
print("testing " + self.getTokenName(t) + " at " + str(c))
|
||||||
|
|
||||||
if isinstance(c.state, RuleStopState):
|
if isinstance(c.state, RuleStopState):
|
||||||
|
@ -967,7 +967,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# nonambig alts are null in altToPred
|
# nonambig alts are null in altToPred
|
||||||
if nPredAlts==0:
|
if nPredAlts==0:
|
||||||
altToPred = None
|
altToPred = None
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
||||||
return altToPred
|
return altToPred
|
||||||
|
|
||||||
|
@ -1093,11 +1093,11 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
break
|
break
|
||||||
continue
|
continue
|
||||||
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
||||||
if self.debug or self.dfa_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||||
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
||||||
|
|
||||||
if predicateEvaluationResult:
|
if predicateEvaluationResult:
|
||||||
if self.debug or self.dfa_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||||
print("PREDICT " + str(pair.alt))
|
print("PREDICT " + str(pair.alt))
|
||||||
predictions.add(pair.alt)
|
predictions.add(pair.alt)
|
||||||
if not complete:
|
if not complete:
|
||||||
|
@ -1119,8 +1119,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
|
|
||||||
def closureCheckingStopState(self, config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon):
|
def closureCheckingStopState(self, config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("closure(" + config.toString(self.parser,True) + ")")
|
print("closure(" + str(config) + ")")
|
||||||
|
|
||||||
if isinstance(config.state, RuleStopState):
|
if isinstance(config.state, RuleStopState):
|
||||||
# We hit rule end. If we have context info, use it
|
# We hit rule end. If we have context info, use it
|
||||||
|
@ -1134,7 +1134,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# we have no context info, just chase follow links (if greedy)
|
# we have no context info, just chase follow links (if greedy)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||||
self.closure_(config, configs, closureBusy, collectPredicates,
|
self.closure_(config, configs, closureBusy, collectPredicates,
|
||||||
fullCtx, depth, treatEofAsEpsilon)
|
fullCtx, depth, treatEofAsEpsilon)
|
||||||
|
@ -1154,7 +1154,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# else if we have no context info, just chase follow links (if greedy)
|
# else if we have no context info, just chase follow links (if greedy)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||||
|
|
||||||
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
||||||
|
@ -1196,7 +1196,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
c.reachesIntoOuterContext += 1
|
c.reachesIntoOuterContext += 1
|
||||||
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
||||||
newDepth -= 1
|
newDepth -= 1
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("dips into outer ctx: " + str(c))
|
print("dips into outer ctx: " + str(c))
|
||||||
elif isinstance(t, RuleTransition):
|
elif isinstance(t, RuleTransition):
|
||||||
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||||
|
@ -1237,12 +1237,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
||||||
|
|
||||||
def actionTransition(self, config, t):
|
def actionTransition(self, config, t):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
||||||
return ATNConfig(state=t.target, config=config)
|
return ATNConfig(state=t.target, config=config)
|
||||||
|
|
||||||
def precedenceTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
def precedenceTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
||||||
str(pt.precedence) + ">=_p, ctx dependent=true")
|
str(pt.precedence) + ">=_p, ctx dependent=true")
|
||||||
if self.parser is not None:
|
if self.parser is not None:
|
||||||
|
@ -1267,12 +1267,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
else:
|
else:
|
||||||
c = ATNConfig(state=pt.target, config=config)
|
c = ATNConfig(state=pt.target, config=config)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("config from pred transition=" + str(c))
|
print("config from pred transition=" + str(c))
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def predTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
def predTransition(self, config, pt, collectPredicates, inContext, fullCtx):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
||||||
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
||||||
if self.parser is not None:
|
if self.parser is not None:
|
||||||
|
@ -1297,12 +1297,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
else:
|
else:
|
||||||
c = ATNConfig(state=pt.target, config=config)
|
c = ATNConfig(state=pt.target, config=config)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("config from pred transition=" + str(c))
|
print("config from pred transition=" + str(c))
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def ruleTransition(self, config, t):
|
def ruleTransition(self, config, t):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
||||||
returnState = t.followState
|
returnState = t.followState
|
||||||
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
||||||
|
@ -1360,13 +1360,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
def getTokenName(self, t):
|
def getTokenName(self, t):
|
||||||
if t==Token.EOF:
|
if t==Token.EOF:
|
||||||
return u"EOF"
|
return u"EOF"
|
||||||
if self.parser is not None and self.parser.tokenNames is not None:
|
if self.parser is not None and \
|
||||||
if t >= len(self.parser.tokenNames):
|
self.parser.literalNames is not None and \
|
||||||
print(str(t) + " ttype out of range: " + str_list(self.parser.tokenNames))
|
t < len(self.parser.literalNames):
|
||||||
print(str_list(self.parser.getInputStream().getTokens()))
|
return self.parser.literalNames[t] + u"<" + unicode(t) + ">"
|
||||||
else:
|
else:
|
||||||
return self.parser.tokensNames[t] + u"<" + unicode(t) + ">"
|
return unicode(t)
|
||||||
return unicode(t)
|
|
||||||
|
|
||||||
def getLookaheadName(self, input):
|
def getLookaheadName(self, input):
|
||||||
return self.getTokenName(input.LA(1))
|
return self.getTokenName(input.LA(1))
|
||||||
|
@ -1421,7 +1420,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# on {@code to}
|
# on {@code to}
|
||||||
#
|
#
|
||||||
def addDFAEdge(self, dfa, from_, t, to):
|
def addDFAEdge(self, dfa, from_, t, to):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
||||||
|
|
||||||
if to is None:
|
if to is None:
|
||||||
|
@ -1435,8 +1434,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
||||||
from_.edges[t+1] = to # connect
|
from_.edges[t+1] = to # connect
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
names = None if self.parser is None else self.parser.tokenNames
|
names = None if self.parser is None else self.parser.literalNames
|
||||||
print("DFA=\n" + dfa.toString(names))
|
print("DFA=\n" + dfa.toString(names))
|
||||||
|
|
||||||
return to
|
return to
|
||||||
|
@ -1470,12 +1469,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
D.configs.optimizeConfigs(self)
|
D.configs.optimizeConfigs(self)
|
||||||
D.configs.setReadonly(True)
|
D.configs.setReadonly(True)
|
||||||
dfa.states[D] = D
|
dfa.states[D] = D
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("adding new DFA state: " + str(D))
|
print("adding new DFA state: " + str(D))
|
||||||
return D
|
return D
|
||||||
|
|
||||||
def reportAttemptingFullContext(self, dfa, conflictingAlts, configs, startIndex, stopIndex):
|
def reportAttemptingFullContext(self, dfa, conflictingAlts, configs, startIndex, stopIndex):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
interval = range(startIndex, stopIndex + 1)
|
interval = range(startIndex, stopIndex + 1)
|
||||||
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||||
", input=" + self.parser.getTokenStream().getText(interval))
|
", input=" + self.parser.getTokenStream().getText(interval))
|
||||||
|
@ -1483,7 +1482,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||||
|
|
||||||
def reportContextSensitivity(self, dfa, prediction, configs, startIndex, stopIndex):
|
def reportContextSensitivity(self, dfa, prediction, configs, startIndex, stopIndex):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
interval = range(startIndex, stopIndex + 1)
|
interval = range(startIndex, stopIndex + 1)
|
||||||
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||||
", input=" + self.parser.getTokenStream().getText(interval))
|
", input=" + self.parser.getTokenStream().getText(interval))
|
||||||
|
@ -1493,7 +1492,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# If context sensitive parsing, we know it's ambiguity not conflict#
|
# If context sensitive parsing, we know it's ambiguity not conflict#
|
||||||
def reportAmbiguity(self, dfa, D, startIndex, stopIndex,
|
def reportAmbiguity(self, dfa, D, startIndex, stopIndex,
|
||||||
exact, ambigAlts, configs ):
|
exact, ambigAlts, configs ):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
||||||
# int i = 1;
|
# int i = 1;
|
||||||
# for (Transition t : dfa.atnStartState.transitions) {
|
# for (Transition t : dfa.atnStartState.transitions) {
|
||||||
|
|
|
@ -467,7 +467,8 @@ class PredictionMode(object):
|
||||||
def getUniqueAlt(cls, altsets):
|
def getUniqueAlt(cls, altsets):
|
||||||
all = cls.getAlts(altsets)
|
all = cls.getAlts(altsets)
|
||||||
if len(all)==1:
|
if len(all)==1:
|
||||||
return all[0]
|
for one in all:
|
||||||
|
return one
|
||||||
else:
|
else:
|
||||||
return ATN.INVALID_ALT_NUMBER
|
return ATN.INVALID_ALT_NUMBER
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ def calculateHashCode(parent:PredictionContext, returnState:int):
|
||||||
|
|
||||||
def calculateListsHashCode(parents:[], returnStates:[] ):
|
def calculateListsHashCode(parents:[], returnStates:[] ):
|
||||||
h = 0
|
h = 0
|
||||||
for parent, returnState in parents, returnStates:
|
for parent, returnState in zip(parents, returnStates):
|
||||||
h = hash((h, calculateHashCode(parent, returnState)))
|
h = hash((h, calculateHashCode(parent, returnState)))
|
||||||
return h
|
return h
|
||||||
|
|
||||||
|
@ -242,7 +242,7 @@ class ArrayPredictionContext(PredictionContext):
|
||||||
if self.returnStates[i]==PredictionContext.EMPTY_RETURN_STATE:
|
if self.returnStates[i]==PredictionContext.EMPTY_RETURN_STATE:
|
||||||
buf.write("$")
|
buf.write("$")
|
||||||
continue
|
continue
|
||||||
buf.write(self.returnStates[i])
|
buf.write(str(self.returnStates[i]))
|
||||||
if self.parents[i] is not None:
|
if self.parents[i] is not None:
|
||||||
buf.write(' ')
|
buf.write(' ')
|
||||||
buf.write(str(self.parents[i]))
|
buf.write(str(self.parents[i]))
|
||||||
|
@ -251,6 +251,10 @@ class ArrayPredictionContext(PredictionContext):
|
||||||
buf.write("]")
|
buf.write("]")
|
||||||
return buf.getvalue()
|
return buf.getvalue()
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return self.cachedHashCode
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
||||||
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
||||||
|
@ -325,18 +329,18 @@ def merge(a:PredictionContext, b:PredictionContext, rootIsWildcard:bool, mergeCa
|
||||||
#/
|
#/
|
||||||
def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
previous = mergeCache.get(a,b)
|
previous = mergeCache.get((a,b), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
previous = mergeCache.get(b,a)
|
previous = mergeCache.get((b,a), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
|
|
||||||
rootMerge = mergeRoot(a, b, rootIsWildcard)
|
merged = mergeRoot(a, b, rootIsWildcard)
|
||||||
if rootMerge is not None:
|
if merged is not None:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, rootMerge)
|
mergeCache[(a, b)] = merged
|
||||||
return rootMerge
|
return merged
|
||||||
|
|
||||||
if a.returnState==b.returnState:
|
if a.returnState==b.returnState:
|
||||||
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||||
|
@ -349,10 +353,10 @@ def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext,
|
||||||
# merge parents x and y, giving array node with x,y then remainders
|
# merge parents x and y, giving array node with x,y then remainders
|
||||||
# of those graphs. dup a, a' points at merged array
|
# of those graphs. dup a, a' points at merged array
|
||||||
# new joined parent so create new singleton pointing to it, a'
|
# new joined parent so create new singleton pointing to it, a'
|
||||||
a_ = SingletonPredictionContext.create(parent, a.returnState)
|
merged = SingletonPredictionContext.create(parent, a.returnState)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
else: # a != b payloads differ
|
else: # a != b payloads differ
|
||||||
# see if we can collapse parents due to $+x parents if local ctx
|
# see if we can collapse parents due to $+x parents if local ctx
|
||||||
singleParent = None
|
singleParent = None
|
||||||
|
@ -362,26 +366,24 @@ def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext,
|
||||||
# sort payloads and use same parent
|
# sort payloads and use same parent
|
||||||
payloads = [ a.returnState, b.returnState ]
|
payloads = [ a.returnState, b.returnState ]
|
||||||
if a.returnState > b.returnState:
|
if a.returnState > b.returnState:
|
||||||
payloads[0] = b.returnState
|
payloads = [ b.returnState, a.returnState ]
|
||||||
payloads[1] = a.returnState
|
|
||||||
parents = [singleParent, singleParent]
|
parents = [singleParent, singleParent]
|
||||||
a_ = ArrayPredictionContext(parents, payloads)
|
merged = ArrayPredictionContext(parents, payloads)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
# parents differ and can't merge them. Just pack together
|
# parents differ and can't merge them. Just pack together
|
||||||
# into array; can't merge.
|
# into array; can't merge.
|
||||||
# ax + by = [ax,by]
|
# ax + by = [ax,by]
|
||||||
payloads = [ a.returnState, b.returnState ]
|
payloads = [ a.returnState, b.returnState ]
|
||||||
parents = [ a.parentCtx, b.parentCtx ]
|
parents = [ a.parentCtx, b.parentCtx ]
|
||||||
if a.returnState > b.returnState: # sort by payload
|
if a.returnState > b.returnState: # sort by payload
|
||||||
payloads[0] = b.returnState
|
payloads = [ b.returnState, a.returnState ]
|
||||||
payloads[1] = a.returnState
|
|
||||||
parents = [ b.parentCtx, a.parentCtx ]
|
parents = [ b.parentCtx, a.parentCtx ]
|
||||||
a_ = ArrayPredictionContext(parents, payloads)
|
merged = ArrayPredictionContext(parents, payloads)
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a, b, a_)
|
mergeCache[(a, b)] = merged
|
||||||
return a_
|
return merged
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -463,10 +465,10 @@ def mergeRoot(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIs
|
||||||
#/
|
#/
|
||||||
def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
previous = mergeCache.get(a,b)
|
previous = mergeCache.get((a,b), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
previous = mergeCache.get(b,a)
|
previous = mergeCache.get((b,a), None)
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
return previous
|
return previous
|
||||||
|
|
||||||
|
@ -475,8 +477,8 @@ def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildca
|
||||||
j = 0 # walks b
|
j = 0 # walks b
|
||||||
k = 0 # walks target M array
|
k = 0 # walks target M array
|
||||||
|
|
||||||
mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates))
|
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
|
||||||
mergedParents = [] * len(mergedReturnStates)
|
mergedParents = [None] * len(mergedReturnStates)
|
||||||
# walk and merge to yield mergedParents, mergedReturnStates
|
# walk and merge to yield mergedParents, mergedReturnStates
|
||||||
while i<len(a.returnStates) and j<len(b.returnStates):
|
while i<len(a.returnStates) and j<len(b.returnStates):
|
||||||
a_parent = a.parents[i]
|
a_parent = a.parents[i]
|
||||||
|
@ -522,30 +524,30 @@ def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildca
|
||||||
# trim merged if we combined a few that had same stack tops
|
# trim merged if we combined a few that had same stack tops
|
||||||
if k < len(mergedParents): # write index < last position; trim
|
if k < len(mergedParents): # write index < last position; trim
|
||||||
if k == 1: # for just one merged element, return singleton top
|
if k == 1: # for just one merged element, return singleton top
|
||||||
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,a_)
|
mergeCache[(a,b)] = merged
|
||||||
return a_
|
return merged
|
||||||
mergedParents = mergedParents[0:k]
|
mergedParents = mergedParents[0:k]
|
||||||
mergedReturnStates = mergedReturnStates[0:k]
|
mergedReturnStates = mergedReturnStates[0:k]
|
||||||
|
|
||||||
M = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||||
|
|
||||||
# if we created same array as a or b, return that instead
|
# if we created same array as a or b, return that instead
|
||||||
# TODO: track whether this is possible above during merge sort for speed
|
# TODO: track whether this is possible above during merge sort for speed
|
||||||
if M==a:
|
if merged==a:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,a)
|
mergeCache[(a,b)] = a
|
||||||
return a
|
return a
|
||||||
if M==b:
|
if merged==b:
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,b)
|
mergeCache[(a,b)] = b
|
||||||
return b
|
return b
|
||||||
combineCommonParents(mergedParents)
|
combineCommonParents(mergedParents)
|
||||||
|
|
||||||
if mergeCache is not None:
|
if mergeCache is not None:
|
||||||
mergeCache.put(a,b,M)
|
mergeCache[(a,b)] = merged
|
||||||
return M
|
return merged
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|
|
@ -59,7 +59,6 @@ class ATNConfig(object):
|
||||||
semantic = config.semanticContext
|
semantic = config.semanticContext
|
||||||
if semantic is None:
|
if semantic is None:
|
||||||
semantic = SemanticContext.NONE
|
semantic = SemanticContext.NONE
|
||||||
|
|
||||||
# The ATN state associated with this configuration#/
|
# The ATN state associated with this configuration#/
|
||||||
self.state = state
|
self.state = state
|
||||||
# What alt (or lexer rule) is predicted by this configuration#/
|
# What alt (or lexer rule) is predicted by this configuration#/
|
||||||
|
@ -100,6 +99,19 @@ class ATNConfig(object):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
||||||
|
|
||||||
|
def hashCodeForConfigSet(self):
|
||||||
|
return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
|
||||||
|
|
||||||
|
def equalsForConfigSet(self, other):
|
||||||
|
if self is other:
|
||||||
|
return True
|
||||||
|
elif not isinstance(other, ATNConfig):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return self.state.stateNumber==other.state.stateNumber \
|
||||||
|
and self.alt==other.alt \
|
||||||
|
and self.semanticContext==other.semanticContext
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
with StringIO() as buf:
|
with StringIO() as buf:
|
||||||
buf.write('(')
|
buf.write('(')
|
||||||
|
@ -150,6 +162,18 @@ class LexerATNConfig(ATNConfig):
|
||||||
return False
|
return False
|
||||||
return super().__eq__(other)
|
return super().__eq__(other)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def hashCodeForConfigSet(self):
|
||||||
|
return hash(self)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def equalsForConfigSet(self, other):
|
||||||
|
return self==other
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState):
|
def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState):
|
||||||
return source.passedThroughNonGreedyDecision \
|
return source.passedThroughNonGreedyDecision \
|
||||||
or isinstance(target, DecisionState) and target.nonGreedy
|
or isinstance(target, DecisionState) and target.nonGreedy
|
||||||
|
|
|
@ -108,8 +108,8 @@ class ATNConfigSet(object):
|
||||||
rootIsWildcard = not self.fullCtx
|
rootIsWildcard = not self.fullCtx
|
||||||
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
|
||||||
# no need to check for existing.context, config.context in cache
|
# no need to check for existing.context, config.context in cache
|
||||||
# since only way to create new graphs is "call rule" and here. We
|
# since only way to create new graphs is "call rule" and here.
|
||||||
# cache at both places.
|
# We cache at both places.
|
||||||
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
|
||||||
# make sure to preserve the precedence filter suppression during the merge
|
# make sure to preserve the precedence filter suppression during the merge
|
||||||
if config.precedenceFilterSuppressed:
|
if config.precedenceFilterSuppressed:
|
||||||
|
@ -118,11 +118,11 @@ class ATNConfigSet(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def getOrAdd(self, config:ATNConfig):
|
def getOrAdd(self, config:ATNConfig):
|
||||||
h = hash(config)
|
h = config.hashCodeForConfigSet()
|
||||||
l = self.configLookup.get(h, None)
|
l = self.configLookup.get(h, None)
|
||||||
if l is not None:
|
if l is not None:
|
||||||
for c in l:
|
for c in l:
|
||||||
if c==config:
|
if config.equalsForConfigSet(c):
|
||||||
return c
|
return c
|
||||||
if l is None:
|
if l is None:
|
||||||
l = [config]
|
l = [config]
|
||||||
|
|
|
@ -136,7 +136,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
def matchATN(self, input:InputStream):
|
def matchATN(self, input:InputStream):
|
||||||
startState = self.atn.modeToStartState[self.mode]
|
startState = self.atn.modeToStartState[self.mode]
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
||||||
|
|
||||||
old_mode = self.mode
|
old_mode = self.mode
|
||||||
|
@ -150,13 +150,13 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
predict = self.execATN(input, next)
|
predict = self.execATN(input, next)
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
||||||
|
|
||||||
return predict
|
return predict
|
||||||
|
|
||||||
def execATN(self, input:InputStream, ds0:DFAState):
|
def execATN(self, input:InputStream, ds0:DFAState):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("start state closure=" + str(ds0.configs))
|
print("start state closure=" + str(ds0.configs))
|
||||||
|
|
||||||
if ds0.isAcceptState:
|
if ds0.isAcceptState:
|
||||||
|
@ -167,8 +167,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
s = ds0 # s is current/from DFA state
|
s = ds0 # s is current/from DFA state
|
||||||
|
|
||||||
while True: # while more work
|
while True: # while more work
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("execATN loop starting closure: %s\n", s.configs)
|
print("execATN loop starting closure:", str(s.configs))
|
||||||
|
|
||||||
# As we move src->trg, src->trg, we keep track of the previous trg to
|
# As we move src->trg, src->trg, we keep track of the previous trg to
|
||||||
# avoid looking up the DFA state again, which is expensive.
|
# avoid looking up the DFA state again, which is expensive.
|
||||||
|
@ -229,8 +229,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
target = s.edges[t - self.MIN_DFA_EDGE]
|
target = s.edges[t - self.MIN_DFA_EDGE]
|
||||||
if self.debug and target is not None:
|
if LexerATNSimulator.debug and target is not None:
|
||||||
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
|
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
|
||||||
|
|
||||||
return target
|
return target
|
||||||
|
|
||||||
|
@ -286,8 +286,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
|
print("testing", self.getTokenName(t), "at", str(cfg))
|
||||||
|
|
||||||
for trans in cfg.state.transitions: # for each transition
|
for trans in cfg.state.transitions: # for each transition
|
||||||
target = self.getReachableTarget(trans, t)
|
target = self.getReachableTarget(trans, t)
|
||||||
|
@ -304,8 +304,8 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
skipAlt = cfg.alt
|
skipAlt = cfg.alt
|
||||||
|
|
||||||
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
|
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("ACTION %s\n", lexerActionExecutor)
|
print("ACTION", lexerActionExecutor)
|
||||||
|
|
||||||
# seek to after last char in token
|
# seek to after last char in token
|
||||||
input.seek(index)
|
input.seek(index)
|
||||||
|
@ -340,15 +340,15 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# {@code false}.
|
# {@code false}.
|
||||||
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
|
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
|
||||||
speculative:bool, treatEofAsEpsilon:bool):
|
speculative:bool, treatEofAsEpsilon:bool):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("closure("+config.toString(self.recog, True)+")")
|
print("closure(" + str(config) + ")")
|
||||||
|
|
||||||
if isinstance( config.state, RuleStopState ):
|
if isinstance( config.state, RuleStopState ):
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
if self.recog is not None:
|
if self.recog is not None:
|
||||||
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
|
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
|
||||||
else:
|
else:
|
||||||
print("closure at rule stop %s\n", config)
|
print("closure at rule stop", str(config))
|
||||||
|
|
||||||
if config.context is None or config.context.hasEmptyPath():
|
if config.context is None or config.context.hasEmptyPath():
|
||||||
if config.context is None or config.context.isEmpty():
|
if config.context is None or config.context.isEmpty():
|
||||||
|
@ -411,7 +411,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# states reached by traversing predicates. Since this is when we
|
# states reached by traversing predicates. Since this is when we
|
||||||
# test them, we cannot cash the DFA state target of ID.
|
# test them, we cannot cash the DFA state target of ID.
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
||||||
configs.hasSemanticContext = True
|
configs.hasSemanticContext = True
|
||||||
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
||||||
|
@ -523,7 +523,7 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
# Only track edges within the DFA bounds
|
# Only track edges within the DFA bounds
|
||||||
return to
|
return to
|
||||||
|
|
||||||
if self.debug:
|
if LexerATNSimulator.debug:
|
||||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
||||||
|
|
||||||
if from_.edges is None:
|
if from_.edges is None:
|
||||||
|
|
|
@ -313,7 +313,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
|
def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("adaptivePredict decision " + str(decision) +
|
print("adaptivePredict decision " + str(decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
" line " + str(input.LT(1).line) + ":" +
|
" line " + str(input.LT(1).line) + ":" +
|
||||||
|
@ -341,10 +341,10 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
if s0 is None:
|
if s0 is None:
|
||||||
if outerContext is None:
|
if outerContext is None:
|
||||||
outerContext = ParserRuleContext.EMPTY
|
outerContext = ParserRuleContext.EMPTY
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("predictATN decision " + str(dfa.decision) +
|
print("predictATN decision " + str(dfa.decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
", outerContext=" + outerContext.toString(self.parser))
|
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
|
||||||
|
|
||||||
# If this is not a precedence DFA, we check the ATN start state
|
# If this is not a precedence DFA, we check the ATN start state
|
||||||
# to determine if this ATN start state is the decision for the
|
# to determine if this ATN start state is the decision for the
|
||||||
|
@ -373,8 +373,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
dfa.s0 = s0
|
dfa.s0 = s0
|
||||||
|
|
||||||
alt = self.execATN(dfa, s0, input, index, outerContext)
|
alt = self.execATN(dfa, s0, input, index, outerContext)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("DFA after predictATN: " + dfa.toString(self.parser.tokenNames))
|
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
|
||||||
return alt
|
return alt
|
||||||
finally:
|
finally:
|
||||||
self._dfa = None
|
self._dfa = None
|
||||||
|
@ -413,14 +413,14 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# conflict + preds
|
# conflict + preds
|
||||||
#
|
#
|
||||||
def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
|
def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("execATN decision " + str(dfa.decision) +
|
print("execATN decision " + str(dfa.decision) +
|
||||||
" exec LA(1)==" + self.getLookaheadName(input) +
|
" exec LA(1)==" + self.getLookaheadName(input) +
|
||||||
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
|
||||||
|
|
||||||
previousD = s0
|
previousD = s0
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("s0 = " + str(s0))
|
print("s0 = " + str(s0))
|
||||||
|
|
||||||
t = input.LA(1)
|
t = input.LA(1)
|
||||||
|
@ -450,7 +450,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
|
||||||
conflictingAlts = None
|
conflictingAlts = None
|
||||||
if D.predicates is not None:
|
if D.predicates is not None:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("DFA state has preds in DFA sim LL failover")
|
print("DFA state has preds in DFA sim LL failover")
|
||||||
conflictIndex = input.index
|
conflictIndex = input.index
|
||||||
if conflictIndex != startIndex:
|
if conflictIndex != startIndex:
|
||||||
|
@ -458,7 +458,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
|
||||||
if len(conflictingAlts)==1:
|
if len(conflictingAlts)==1:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("Full LL avoided")
|
print("Full LL avoided")
|
||||||
return min(conflictingAlts)
|
return min(conflictingAlts)
|
||||||
|
|
||||||
|
@ -467,7 +467,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# context occurs with the index at the correct spot
|
# context occurs with the index at the correct spot
|
||||||
input.seek(conflictIndex)
|
input.seek(conflictIndex)
|
||||||
|
|
||||||
if self.dfa_debug:
|
if ParserATNSimulator.dfa_debug:
|
||||||
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
|
||||||
fullCtx = True
|
fullCtx = True
|
||||||
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
|
||||||
|
@ -539,7 +539,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
predictedAlt = self.getUniqueAlt(reach)
|
predictedAlt = self.getUniqueAlt(reach)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||||
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
|
||||||
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
|
||||||
|
@ -591,8 +591,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
input:TokenStream,
|
input:TokenStream,
|
||||||
startIndex:int,
|
startIndex:int,
|
||||||
outerContext:ParserRuleContext):
|
outerContext:ParserRuleContext):
|
||||||
if self.debug or self.debug_list_atn_decisions:
|
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
|
||||||
print("execATNWithFullContext "+s0)
|
print("execATNWithFullContext", str(s0))
|
||||||
fullCtx = True
|
fullCtx = True
|
||||||
foundExactAmbig = False
|
foundExactAmbig = False
|
||||||
reach = None
|
reach = None
|
||||||
|
@ -621,7 +621,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
|
||||||
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
|
||||||
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
|
||||||
|
@ -690,7 +690,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return predictedAlt
|
return predictedAlt
|
||||||
|
|
||||||
def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
|
def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("in computeReachSet, starting closure: " + str(closure))
|
print("in computeReachSet, starting closure: " + str(closure))
|
||||||
|
|
||||||
if self.mergeCache is None:
|
if self.mergeCache is None:
|
||||||
|
@ -712,7 +712,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
# First figure out where we can reach on input t
|
# First figure out where we can reach on input t
|
||||||
for c in closure:
|
for c in closure:
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("testing " + self.getTokenName(t) + " at " + str(c))
|
print("testing " + self.getTokenName(t) + " at " + str(c))
|
||||||
|
|
||||||
if isinstance(c.state, RuleStopState):
|
if isinstance(c.state, RuleStopState):
|
||||||
|
@ -972,7 +972,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# nonambig alts are null in altToPred
|
# nonambig alts are null in altToPred
|
||||||
if nPredAlts==0:
|
if nPredAlts==0:
|
||||||
altToPred = None
|
altToPred = None
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
print("getPredsForAmbigAlts result " + str_list(altToPred))
|
||||||
return altToPred
|
return altToPred
|
||||||
|
|
||||||
|
@ -1098,11 +1098,11 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
break
|
break
|
||||||
continue
|
continue
|
||||||
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
|
||||||
if self.debug or self.dfa_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||||
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
|
||||||
|
|
||||||
if predicateEvaluationResult:
|
if predicateEvaluationResult:
|
||||||
if self.debug or self.dfa_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
|
||||||
print("PREDICT " + str(pair.alt))
|
print("PREDICT " + str(pair.alt))
|
||||||
predictions.add(pair.alt)
|
predictions.add(pair.alt)
|
||||||
if not complete:
|
if not complete:
|
||||||
|
@ -1124,8 +1124,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
|
|
||||||
def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
|
def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("closure(" + config.toString(self.parser,True) + ")")
|
print("closure(" + str(config) + ")")
|
||||||
|
|
||||||
if isinstance(config.state, RuleStopState):
|
if isinstance(config.state, RuleStopState):
|
||||||
# We hit rule end. If we have context info, use it
|
# We hit rule end. If we have context info, use it
|
||||||
|
@ -1139,7 +1139,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# we have no context info, just chase follow links (if greedy)
|
# we have no context info, just chase follow links (if greedy)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||||
self.closure_(config, configs, closureBusy, collectPredicates,
|
self.closure_(config, configs, closureBusy, collectPredicates,
|
||||||
fullCtx, depth, treatEofAsEpsilon)
|
fullCtx, depth, treatEofAsEpsilon)
|
||||||
|
@ -1159,7 +1159,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# else if we have no context info, just chase follow links (if greedy)
|
# else if we have no context info, just chase follow links (if greedy)
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
|
||||||
|
|
||||||
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
|
||||||
|
@ -1201,7 +1201,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
c.reachesIntoOuterContext += 1
|
c.reachesIntoOuterContext += 1
|
||||||
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
|
||||||
newDepth -= 1
|
newDepth -= 1
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("dips into outer ctx: " + str(c))
|
print("dips into outer ctx: " + str(c))
|
||||||
elif isinstance(t, RuleTransition):
|
elif isinstance(t, RuleTransition):
|
||||||
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
# latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||||
|
@ -1242,12 +1242,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
|
||||||
|
|
||||||
def actionTransition(self, config:ATNConfig, t:ActionTransition):
|
def actionTransition(self, config:ATNConfig, t:ActionTransition):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
|
||||||
return ATNConfig(state=t.target, config=config)
|
return ATNConfig(state=t.target, config=config)
|
||||||
|
|
||||||
def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
|
||||||
str(pt.precedence) + ">=_p, ctx dependent=true")
|
str(pt.precedence) + ">=_p, ctx dependent=true")
|
||||||
if self.parser is not None:
|
if self.parser is not None:
|
||||||
|
@ -1272,12 +1272,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
else:
|
else:
|
||||||
c = ATNConfig(state=pt.target, config=config)
|
c = ATNConfig(state=pt.target, config=config)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("config from pred transition=" + str(c))
|
print("config from pred transition=" + str(c))
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
|
||||||
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
|
||||||
if self.parser is not None:
|
if self.parser is not None:
|
||||||
|
@ -1302,12 +1302,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
else:
|
else:
|
||||||
c = ATNConfig(state=pt.target, config=config)
|
c = ATNConfig(state=pt.target, config=config)
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("config from pred transition=" + str(c))
|
print("config from pred transition=" + str(c))
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def ruleTransition(self, config:ATNConfig, t:RuleTransition):
|
def ruleTransition(self, config:ATNConfig, t:RuleTransition):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
|
||||||
returnState = t.followState
|
returnState = t.followState
|
||||||
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
|
||||||
|
@ -1365,13 +1365,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
def getTokenName(self, t:int):
|
def getTokenName(self, t:int):
|
||||||
if t==Token.EOF:
|
if t==Token.EOF:
|
||||||
return "EOF"
|
return "EOF"
|
||||||
if self.parser is not None and self.parser.tokenNames is not None:
|
if self.parser is not None and \
|
||||||
if t >= len(self.parser.tokenNames):
|
self.parser.literalNames is not None and \
|
||||||
print(str(t) + " ttype out of range: " + str_list(self.parser.tokenNames))
|
t < len(self.parser.literalNames):
|
||||||
print(str_list(self.parser.getInputStream().getTokens()))
|
return self.parser.literalNames[t] + "<" + str(t) + ">"
|
||||||
else:
|
else:
|
||||||
return self.parser.tokensNames[t] + "<" + str(t) + ">"
|
return str(t)
|
||||||
return str(t)
|
|
||||||
|
|
||||||
def getLookaheadName(self, input:TokenStream):
|
def getLookaheadName(self, input:TokenStream):
|
||||||
return self.getTokenName(input.LA(1))
|
return self.getTokenName(input.LA(1))
|
||||||
|
@ -1426,7 +1425,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# on {@code to}
|
# on {@code to}
|
||||||
#
|
#
|
||||||
def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
|
def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
|
||||||
|
|
||||||
if to is None:
|
if to is None:
|
||||||
|
@ -1440,8 +1439,8 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
from_.edges = [None] * (self.atn.maxTokenType + 2)
|
||||||
from_.edges[t+1] = to # connect
|
from_.edges[t+1] = to # connect
|
||||||
|
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
names = None if self.parser is None else self.parser.tokenNames
|
names = None if self.parser is None else self.parser.literalNames
|
||||||
print("DFA=\n" + dfa.toString(names))
|
print("DFA=\n" + dfa.toString(names))
|
||||||
|
|
||||||
return to
|
return to
|
||||||
|
@ -1475,12 +1474,12 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
D.configs.optimizeConfigs(self)
|
D.configs.optimizeConfigs(self)
|
||||||
D.configs.setReadonly(True)
|
D.configs.setReadonly(True)
|
||||||
dfa.states[D] = D
|
dfa.states[D] = D
|
||||||
if self.debug:
|
if ParserATNSimulator.debug:
|
||||||
print("adding new DFA state: " + str(D))
|
print("adding new DFA state: " + str(D))
|
||||||
return D
|
return D
|
||||||
|
|
||||||
def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
interval = range(startIndex, stopIndex + 1)
|
interval = range(startIndex, stopIndex + 1)
|
||||||
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||||
", input=" + self.parser.getTokenStream().getText(interval))
|
", input=" + self.parser.getTokenStream().getText(interval))
|
||||||
|
@ -1488,7 +1487,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||||
|
|
||||||
def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
interval = range(startIndex, stopIndex + 1)
|
interval = range(startIndex, stopIndex + 1)
|
||||||
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
|
||||||
", input=" + self.parser.getTokenStream().getText(interval))
|
", input=" + self.parser.getTokenStream().getText(interval))
|
||||||
|
@ -1498,7 +1497,7 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
# If context sensitive parsing, we know it's ambiguity not conflict#
|
# If context sensitive parsing, we know it's ambiguity not conflict#
|
||||||
def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
|
def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
|
||||||
exact:bool, ambigAlts:set, configs:ATNConfigSet ):
|
exact:bool, ambigAlts:set, configs:ATNConfigSet ):
|
||||||
if self.debug or self.retry_debug:
|
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
|
||||||
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
|
||||||
# int i = 1;
|
# int i = 1;
|
||||||
# for (Transition t : dfa.atnStartState.transitions) {
|
# for (Transition t : dfa.atnStartState.transitions) {
|
||||||
|
|
|
@ -470,7 +470,8 @@ class PredictionMode(Enum):
|
||||||
def getUniqueAlt(cls, altsets:list):
|
def getUniqueAlt(cls, altsets:list):
|
||||||
all = cls.getAlts(altsets)
|
all = cls.getAlts(altsets)
|
||||||
if len(all)==1:
|
if len(all)==1:
|
||||||
return all[0]
|
for one in all:
|
||||||
|
return one
|
||||||
else:
|
else:
|
||||||
return ATN.INVALID_ALT_NUMBER
|
return ATN.INVALID_ALT_NUMBER
|
||||||
|
|
||||||
|
|
|
@ -375,7 +375,7 @@ while True:
|
||||||
|
|
||||||
AltBlock(choice, preamble, alts, error) ::= <<
|
AltBlock(choice, preamble, alts, error) ::= <<
|
||||||
self.state = <choice.stateNumber>
|
self.state = <choice.stateNumber>
|
||||||
self._errHandler.sync(self);
|
self._errHandler.sync(self)
|
||||||
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
||||||
<preamble; separator="\n">
|
<preamble; separator="\n">
|
||||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||||
|
@ -389,7 +389,7 @@ if la_ == <i>:
|
||||||
|
|
||||||
OptionalBlock(choice, alts, error) ::= <<
|
OptionalBlock(choice, alts, error) ::= <<
|
||||||
self.state = <choice.stateNumber>
|
self.state = <choice.stateNumber>
|
||||||
self._errHandler.sync(self);
|
self._errHandler.sync(self)
|
||||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||||
<alts:{alt |
|
<alts:{alt |
|
||||||
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
||||||
|
|
|
@ -383,7 +383,7 @@ while True:
|
||||||
|
|
||||||
AltBlock(choice, preamble, alts, error) ::= <<
|
AltBlock(choice, preamble, alts, error) ::= <<
|
||||||
self.state = <choice.stateNumber>
|
self.state = <choice.stateNumber>
|
||||||
self._errHandler.sync(self);
|
self._errHandler.sync(self)
|
||||||
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
|
||||||
<preamble; separator="\n">
|
<preamble; separator="\n">
|
||||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||||
|
@ -397,7 +397,7 @@ if la_ == <i>:
|
||||||
|
|
||||||
OptionalBlock(choice, alts, error) ::= <<
|
OptionalBlock(choice, alts, error) ::= <<
|
||||||
self.state = <choice.stateNumber>
|
self.state = <choice.stateNumber>
|
||||||
self._errHandler.sync(self);
|
self._errHandler.sync(self)
|
||||||
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
la_ = self._interp.adaptivePredict(self._input,<choice.decision>,self._ctx)
|
||||||
<alts:{alt |
|
<alts:{alt |
|
||||||
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
if la_ == <i><if(!choice.ast.greedy)>+1<endif>:
|
||||||
|
|
Loading…
Reference in New Issue