forked from jasder/antlr
use native tuple hash instead of str hash
This commit is contained in:
parent
164a34ba6e
commit
e6d27c7a7c
|
@ -73,6 +73,9 @@ class PredictionContext(object):
|
|||
def __init__(self, cachedHashCode):
|
||||
self.cachedHashCode = cachedHashCode
|
||||
|
||||
def __len__(self):
|
||||
return 0
|
||||
|
||||
# This means only the {@link #EMPTY} context is in set.
|
||||
def isEmpty(self):
|
||||
return self is self.EMPTY
|
||||
|
@ -80,6 +83,9 @@ class PredictionContext(object):
|
|||
def hasEmptyPath(self):
|
||||
return self.getReturnState(len(self) - 1) == self.EMPTY_RETURN_STATE
|
||||
|
||||
def getReturnState(self, index):
|
||||
raise "illegal!"
|
||||
|
||||
def __hash__(self):
|
||||
return self.cachedHashCode
|
||||
|
||||
|
@ -88,11 +94,13 @@ class PredictionContext(object):
|
|||
|
||||
|
||||
def calculateHashCode(parent, returnState):
|
||||
return hash( str(parent) + str(returnState))
|
||||
|
||||
def calculateEmptyHashCode():
|
||||
return hash("")
|
||||
return hash("") if parent is None else hash((hash(parent), returnState))
|
||||
|
||||
def calculateListsHashCode(parents, returnStates ):
|
||||
h = 0
|
||||
for parent, returnState in parents, returnStates:
|
||||
h = hash((h, calculateHashCode(parent, returnState)))
|
||||
return h
|
||||
|
||||
# Used to cache {@link PredictionContext} objects. Its used for the shared
|
||||
# context cash associated with contexts in DFA states. This cache
|
||||
|
@ -135,7 +143,7 @@ class SingletonPredictionContext(PredictionContext):
|
|||
|
||||
def __init__(self, parent, returnState):
|
||||
assert returnState!=ATNState.INVALID_STATE_NUMBER
|
||||
hashCode = calculateHashCode(parent, returnState) if parent is not None else calculateEmptyHashCode()
|
||||
hashCode = calculateHashCode(parent, returnState)
|
||||
super(SingletonPredictionContext, self).__init__(hashCode)
|
||||
self.parentCtx = parent
|
||||
self.returnState = returnState
|
||||
|
@ -185,15 +193,12 @@ class EmptyPredictionContext(SingletonPredictionContext):
|
|||
def isEmpty(self):
|
||||
return True
|
||||
|
||||
def getParent(self, index):
|
||||
return None
|
||||
|
||||
def getReturnState(self, index):
|
||||
return self.returnState
|
||||
|
||||
def __eq__(self, other):
|
||||
return self is other
|
||||
|
||||
def __hash__(self):
|
||||
return self.cachedHashCode
|
||||
|
||||
def __unicode__(self):
|
||||
return "$"
|
||||
|
||||
|
@ -206,7 +211,7 @@ class ArrayPredictionContext(PredictionContext):
|
|||
# returnState == {@link #EMPTY_RETURN_STATE}.
|
||||
|
||||
def __init__(self, parents, returnStates):
|
||||
super(ArrayPredictionContext, self).__init__(calculateHashCode(parents, returnStates))
|
||||
super(ArrayPredictionContext, self).__init__(calculateListsHashCode(parents, returnStates))
|
||||
assert parents is not None and len(parents)>0
|
||||
assert returnStates is not None and len(returnStates)>0
|
||||
self.parents = parents
|
||||
|
@ -305,9 +310,9 @@ def merge(a, b, rootIsWildcard, mergeCache):
|
|||
|
||||
# convert singleton so both are arrays to normalize
|
||||
if isinstance( a, SingletonPredictionContext ):
|
||||
a = ArrayPredictionContext(a)
|
||||
a = ArrayPredictionContext([a.parentCtx], [a.returnState])
|
||||
if isinstance( b, SingletonPredictionContext):
|
||||
b = ArrayPredictionContext(b)
|
||||
b = ArrayPredictionContext([b.parentCtx], [b.returnState])
|
||||
return mergeArrays(a, b, rootIsWildcard, mergeCache)
|
||||
|
||||
|
||||
|
@ -380,7 +385,7 @@ def mergeSingletons(a, b, rootIsWildcard, mergeCache):
|
|||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = [singleParent, singleParent]
|
||||
a_ = ArrayPredictionContext(parents, payloads);
|
||||
a_ = ArrayPredictionContext(parents, payloads)
|
||||
if mergeCache is not None:
|
||||
mergeCache.put(a, b, a_)
|
||||
return a_
|
||||
|
|
|
@ -93,10 +93,7 @@ class ATNConfig(object):
|
|||
and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed
|
||||
|
||||
def __hash__(self):
|
||||
return hash( str(self.state.stateNumber) + "/" +
|
||||
str(self.alt) + "/" +
|
||||
str(self.context) + "/" +
|
||||
str(self.semanticContext) )
|
||||
return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self)
|
||||
|
@ -132,9 +129,9 @@ class LexerATNConfig(ATNConfig):
|
|||
self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.state.stateNumber) + str(self.alt) + str(self.context) \
|
||||
+ str(self.semanticContext) + str(1 if self.passedThroughNonGreedyDecision else 0) \
|
||||
+ str(self.lexerActionExecutor))
|
||||
return hash((self.state.stateNumber, self.alt, self.context,
|
||||
self.semanticContext, self.passedThroughNonGreedyDecision,
|
||||
self.lexerActionExecutor))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
# graph-structured stack.
|
||||
#/
|
||||
from io import StringIO
|
||||
from antlr4.PredictionContext import PredictionContext, merge
|
||||
from antlr4.PredictionContext import merge
|
||||
from antlr4.Utils import str_list
|
||||
from antlr4.atn.ATN import ATN
|
||||
from antlr4.atn.SemanticContext import SemanticContext
|
||||
|
@ -174,10 +174,10 @@ class ATNConfigSet(object):
|
|||
return self.hashConfigs()
|
||||
|
||||
def hashConfigs(self):
|
||||
with StringIO() as buf:
|
||||
for cfg in self.configs:
|
||||
buf.write(unicode(cfg))
|
||||
return hash(buf.getvalue())
|
||||
h = 0
|
||||
for cfg in self.configs:
|
||||
h = hash((h, cfg))
|
||||
return h
|
||||
|
||||
def __len__(self):
|
||||
return len(self.configs)
|
||||
|
|
|
@ -47,7 +47,7 @@ class LexerAction(object):
|
|||
self.isPositionDependent = False
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType))
|
||||
return hash(self.actionType)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self is other
|
||||
|
@ -92,7 +92,7 @@ class LexerTypeAction(LexerAction):
|
|||
lexer.type = self.type
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.type))
|
||||
return hash((self.actionType, self.type))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -120,7 +120,7 @@ class LexerPushModeAction(LexerAction):
|
|||
lexer.pushMode(self.mode)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.mode))
|
||||
return hash((self.actionType, self.mode))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -188,7 +188,7 @@ class LexerModeAction(LexerAction):
|
|||
lexer.mode(self.mode)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.mode))
|
||||
return hash((self.actionType, self.mode))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -232,7 +232,7 @@ class LexerCustomAction(LexerAction):
|
|||
lexer.action(None, self.ruleIndex, self.actionIndex)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.ruleIndex) + str(self.actionIndex))
|
||||
return hash((self.actionType, self.ruleIndex, self.actionIndex))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -258,7 +258,7 @@ class LexerChannelAction(LexerAction):
|
|||
lexer._channel = self.channel
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.channel))
|
||||
return hash((self.actionType, self.channel))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -305,7 +305,7 @@ class LexerIndexedCustomAction(LexerAction):
|
|||
self.action.execute(lexer)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.actionType) + str(self.offset) + str(self.action))
|
||||
return hash((self.actionType, self.offset, self.action))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
|
|
@ -136,13 +136,7 @@ class Predicate(SemanticContext):
|
|||
return parser.sempred(localctx, self.ruleIndex, self.predIndex)
|
||||
|
||||
def __hash__(self):
|
||||
with StringIO() as buf:
|
||||
buf.write(unicode(self.ruleIndex))
|
||||
buf.write(u"/")
|
||||
buf.write(unicode(self.predIndex))
|
||||
buf.write(u"/")
|
||||
buf.write(unicode(self.isCtxDependent))
|
||||
return hash(buf.getvalue())
|
||||
return hash((self.ruleIndex, self.predIndex, self.isCtxDependent))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
|
@ -220,7 +214,10 @@ class AND(SemanticContext):
|
|||
return self.opnds == other.opnds
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.opnds)+ "/AND")
|
||||
h = 0
|
||||
for o in self.opnds:
|
||||
h = hash((h, o))
|
||||
return hash((h, "AND"))
|
||||
|
||||
#
|
||||
# {@inheritDoc}
|
||||
|
@ -308,7 +305,10 @@ class OR (SemanticContext):
|
|||
return self.opnds == other.opnds
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.opnds)+"/OR")
|
||||
h = 0
|
||||
for o in self.opnds:
|
||||
h = hash((h, o))
|
||||
return hash((h, "OR"))
|
||||
|
||||
# <p>
|
||||
# The evaluation of predicates by this context is short-circuiting, but
|
||||
|
|
Loading…
Reference in New Issue