Improve python3 performance via addition of __slots__
This commit is contained in:
parent
c79b0fd80c
commit
6990edd4d4
|
@ -27,6 +27,7 @@ class TokenStream(object):
|
||||||
|
|
||||||
|
|
||||||
class BufferedTokenStream(TokenStream):
|
class BufferedTokenStream(TokenStream):
|
||||||
|
__slots__ = ('tokenSource', 'tokens', 'index', 'fetchedEOF')
|
||||||
|
|
||||||
def __init__(self, tokenSource:Lexer):
|
def __init__(self, tokenSource:Lexer):
|
||||||
# The {@link TokenSource} from which tokens for this stream are fetched.
|
# The {@link TokenSource} from which tokens for this stream are fetched.
|
||||||
|
|
|
@ -15,6 +15,8 @@ class TokenFactory(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class CommonTokenFactory(TokenFactory):
|
class CommonTokenFactory(TokenFactory):
|
||||||
|
__slots__ = 'copyText'
|
||||||
|
|
||||||
#
|
#
|
||||||
# The default {@link CommonTokenFactory} instance.
|
# The default {@link CommonTokenFactory} instance.
|
||||||
#
|
#
|
||||||
|
|
|
@ -35,6 +35,7 @@ from antlr4.Token import Token
|
||||||
|
|
||||||
|
|
||||||
class CommonTokenStream(BufferedTokenStream):
|
class CommonTokenStream(BufferedTokenStream):
|
||||||
|
__slots__ = 'channel'
|
||||||
|
|
||||||
def __init__(self, lexer:Lexer, channel:int=Token.DEFAULT_CHANNEL):
|
def __init__(self, lexer:Lexer, channel:int=Token.DEFAULT_CHANNEL):
|
||||||
super().__init__(lexer)
|
super().__init__(lexer)
|
||||||
|
|
|
@ -14,6 +14,7 @@ from antlr4.InputStream import InputStream
|
||||||
|
|
||||||
|
|
||||||
class FileStream(InputStream):
|
class FileStream(InputStream):
|
||||||
|
__slots__ = 'fileName'
|
||||||
|
|
||||||
def __init__(self, fileName:str, encoding:str='ascii', errors:str='strict'):
|
def __init__(self, fileName:str, encoding:str='ascii', errors:str='strict'):
|
||||||
super().__init__(self.readDataFrom(fileName, encoding, errors))
|
super().__init__(self.readDataFrom(fileName, encoding, errors))
|
||||||
|
|
|
@ -12,6 +12,7 @@ from antlr4.Token import Token
|
||||||
|
|
||||||
|
|
||||||
class InputStream (object):
|
class InputStream (object):
|
||||||
|
__slots__ = ('name', 'strdata', '_index', 'data', '_size')
|
||||||
|
|
||||||
def __init__(self, data: str):
|
def __init__(self, data: str):
|
||||||
self.name = "<empty>"
|
self.name = "<empty>"
|
||||||
|
|
|
@ -11,6 +11,7 @@ from antlr4.Token import Token
|
||||||
IntervalSet = None
|
IntervalSet = None
|
||||||
|
|
||||||
class IntervalSet(object):
|
class IntervalSet(object):
|
||||||
|
__slots__ = ('intervals', 'readOnly')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.intervals = None
|
self.intervals = None
|
||||||
|
|
|
@ -14,6 +14,7 @@ from antlr4.atn.Transition import WildcardTransition, NotSetTransition, Abstract
|
||||||
|
|
||||||
|
|
||||||
class LL1Analyzer (object):
|
class LL1Analyzer (object):
|
||||||
|
__slots__ = 'atn'
|
||||||
|
|
||||||
#* Special value added to the lookahead sets to indicate that we hit
|
#* Special value added to the lookahead sets to indicate that we hit
|
||||||
# a predicate during analysis if {@code seeThruPreds==false}.
|
# a predicate during analysis if {@code seeThruPreds==false}.
|
||||||
|
|
|
@ -28,6 +28,11 @@ class TokenSource(object):
|
||||||
|
|
||||||
|
|
||||||
class Lexer(Recognizer, TokenSource):
|
class Lexer(Recognizer, TokenSource):
|
||||||
|
__slots__ = (
|
||||||
|
'_input', '_output', '_factory', '_tokenFactorySourcePair', '_token',
|
||||||
|
'_tokenStartCharIndex', '_tokenStartLine', '_tokenStartColumn',
|
||||||
|
'_hitEOF', '_channel', '_type', '_modeStack', '_mode', '_text'
|
||||||
|
)
|
||||||
|
|
||||||
DEFAULT_MODE = 0
|
DEFAULT_MODE = 0
|
||||||
MORE = -2
|
MORE = -2
|
||||||
|
@ -322,4 +327,3 @@ class Lexer(Recognizer, TokenSource):
|
||||||
else:
|
else:
|
||||||
# TODO: Do we lose character or line position information?
|
# TODO: Do we lose character or line position information?
|
||||||
self._input.consume()
|
self._input.consume()
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ from antlr4.Token import Token
|
||||||
|
|
||||||
|
|
||||||
class ListTokenSource(TokenSource):
|
class ListTokenSource(TokenSource):
|
||||||
|
__slots__ = ('tokens', 'sourceName', 'pos', 'eofToken', '_factory')
|
||||||
|
|
||||||
# Constructs a new {@link ListTokenSource} instance from the specified
|
# Constructs a new {@link ListTokenSource} instance from the specified
|
||||||
# collection of {@link Token} objects and source name.
|
# collection of {@link Token} objects and source name.
|
||||||
|
|
|
@ -23,6 +23,7 @@ from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
|
||||||
from antlr4.tree.Tree import ParseTreeListener, TerminalNode, ErrorNode
|
from antlr4.tree.Tree import ParseTreeListener, TerminalNode, ErrorNode
|
||||||
|
|
||||||
class TraceListener(ParseTreeListener):
|
class TraceListener(ParseTreeListener):
|
||||||
|
__slots__ = '_parser'
|
||||||
|
|
||||||
def __init__(self, parser):
|
def __init__(self, parser):
|
||||||
self._parser = parser
|
self._parser = parser
|
||||||
|
@ -44,7 +45,11 @@ class TraceListener(ParseTreeListener):
|
||||||
|
|
||||||
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
# self is all the parsing support code essentially; most of it is error recovery stuff.#
|
||||||
class Parser (Recognizer):
|
class Parser (Recognizer):
|
||||||
|
__slots__ = (
|
||||||
|
'_input', '_output', '_errHandler', '_precedenceStack', '_ctx',
|
||||||
|
'buildParseTrees', '_tracer', '_parseListeners', '_syntaxErrors'
|
||||||
|
|
||||||
|
)
|
||||||
# self field maps from the serialized ATN string to the deserialized {@link ATN} with
|
# self field maps from the serialized ATN string to the deserialized {@link ATN} with
|
||||||
# bypass alternatives.
|
# bypass alternatives.
|
||||||
#
|
#
|
||||||
|
|
|
@ -32,6 +32,11 @@ from antlr4.error.Errors import RecognitionException, UnsupportedOperationExcept
|
||||||
|
|
||||||
|
|
||||||
class ParserInterpreter(Parser):
|
class ParserInterpreter(Parser):
|
||||||
|
__slots__ = (
|
||||||
|
'grammarFileName', 'atn', 'tokenNames', 'ruleNames', 'decisionToDFA',
|
||||||
|
'sharedContextCache', '_parentContextStack',
|
||||||
|
'pushRecursionContextStates'
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, grammarFileName:str, tokenNames:list, ruleNames:list, atn:ATN, input:TokenStream):
|
def __init__(self, grammarFileName:str, tokenNames:list, ruleNames:list, atn:ATN, input:TokenStream):
|
||||||
super().__init__(input)
|
super().__init__(input)
|
||||||
|
|
|
@ -34,7 +34,7 @@ from antlr4.tree.Tree import ParseTreeListener, ParseTree, TerminalNodeImpl, Err
|
||||||
ParserRuleContext = None
|
ParserRuleContext = None
|
||||||
|
|
||||||
class ParserRuleContext(RuleContext):
|
class ParserRuleContext(RuleContext):
|
||||||
|
__slots__ = ('children', 'start', 'stop', 'exception')
|
||||||
def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ):
|
def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ):
|
||||||
super().__init__(parent, invokingStateNumber)
|
super().__init__(parent, invokingStateNumber)
|
||||||
#* If we are debugging or building a parse tree for a visitor,
|
#* If we are debugging or building a parse tree for a visitor,
|
||||||
|
|
|
@ -11,6 +11,7 @@ from antlr4.error.ErrorListener import ProxyErrorListener, ConsoleErrorListener
|
||||||
RecognitionException = None
|
RecognitionException = None
|
||||||
|
|
||||||
class Recognizer(object):
|
class Recognizer(object):
|
||||||
|
__slots__ = ('_listeners', '_interp', '_stateNumber')
|
||||||
|
|
||||||
tokenTypeMapCache = dict()
|
tokenTypeMapCache = dict()
|
||||||
ruleIndexMapCache = dict()
|
ruleIndexMapCache = dict()
|
||||||
|
|
|
@ -33,7 +33,7 @@ RuleContext = None
|
||||||
Parser = None
|
Parser = None
|
||||||
|
|
||||||
class RuleContext(RuleNode):
|
class RuleContext(RuleNode):
|
||||||
|
__slots__ = ('parentCtx', 'invokingState')
|
||||||
EMPTY = None
|
EMPTY = None
|
||||||
|
|
||||||
def __init__(self, parent:RuleContext=None, invokingState:int=-1):
|
def __init__(self, parent:RuleContext=None, invokingState:int=-1):
|
||||||
|
@ -225,4 +225,3 @@ class RuleContext(RuleNode):
|
||||||
|
|
||||||
buf.write("]")
|
buf.write("]")
|
||||||
return buf.getvalue()
|
return buf.getvalue()
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@ from io import StringIO
|
||||||
|
|
||||||
|
|
||||||
class Token (object):
|
class Token (object):
|
||||||
|
__slots__ = ('source', 'type', 'channel', 'start', 'stop', 'tokenIndex', 'line', 'column', '_text')
|
||||||
|
|
||||||
INVALID_TYPE = 0
|
INVALID_TYPE = 0
|
||||||
|
|
||||||
|
@ -68,7 +69,6 @@ class Token (object):
|
||||||
|
|
||||||
class CommonToken(Token):
|
class CommonToken(Token):
|
||||||
|
|
||||||
|
|
||||||
# An empty {@link Pair} which is used as the default value of
|
# An empty {@link Pair} which is used as the default value of
|
||||||
# {@link #source} for tokens that do not have a source.
|
# {@link #source} for tokens that do not have a source.
|
||||||
EMPTY_SOURCE = (None, None)
|
EMPTY_SOURCE = (None, None)
|
||||||
|
|
|
@ -11,6 +11,8 @@ from antlr4.CommonTokenStream import CommonTokenStream
|
||||||
|
|
||||||
|
|
||||||
class TokenStreamRewriter(object):
|
class TokenStreamRewriter(object):
|
||||||
|
__slots__ = ('tokens', 'programs', 'lastRewriteTokenIndexes')
|
||||||
|
|
||||||
DEFAULT_PROGRAM_NAME = "default"
|
DEFAULT_PROGRAM_NAME = "default"
|
||||||
PROGRAM_INIT_SIZE = 100
|
PROGRAM_INIT_SIZE = 100
|
||||||
MIN_TOKEN_INDEX = 0
|
MIN_TOKEN_INDEX = 0
|
||||||
|
@ -195,6 +197,7 @@ class TokenStreamRewriter(object):
|
||||||
return reduced
|
return reduced
|
||||||
|
|
||||||
class RewriteOperation(object):
|
class RewriteOperation(object):
|
||||||
|
__slots__ = ('tokens', 'index', 'text', 'instructionIndex')
|
||||||
|
|
||||||
def __init__(self, tokens, index, text=""):
|
def __init__(self, tokens, index, text=""):
|
||||||
"""
|
"""
|
||||||
|
@ -235,6 +238,7 @@ class TokenStreamRewriter(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class ReplaceOp(RewriteOperation):
|
class ReplaceOp(RewriteOperation):
|
||||||
|
__slots__ = 'last_index'
|
||||||
|
|
||||||
def __init__(self, from_idx, to_idx, tokens, text):
|
def __init__(self, from_idx, to_idx, tokens, text):
|
||||||
super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
|
super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
|
||||||
|
|
|
@ -12,6 +12,11 @@ from antlr4.atn.ATNState import ATNState, DecisionState
|
||||||
|
|
||||||
|
|
||||||
class ATN(object):
|
class ATN(object):
|
||||||
|
__slots__ = (
|
||||||
|
'grammarType', 'maxTokenType', 'states', 'decisionToState',
|
||||||
|
'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
|
||||||
|
'ruleToTokenType', 'lexerActions', 'modeToStartState'
|
||||||
|
)
|
||||||
|
|
||||||
INVALID_ALT_NUMBER = 0
|
INVALID_ALT_NUMBER = 0
|
||||||
|
|
||||||
|
@ -58,7 +63,7 @@ class ATN(object):
|
||||||
if s.nextTokenWithinRule is not None:
|
if s.nextTokenWithinRule is not None:
|
||||||
return s.nextTokenWithinRule
|
return s.nextTokenWithinRule
|
||||||
s.nextTokenWithinRule = self.nextTokensInContext(s, None)
|
s.nextTokenWithinRule = self.nextTokensInContext(s, None)
|
||||||
s.nextTokenWithinRule.readonly = True
|
s.nextTokenWithinRule.readOnly = True
|
||||||
return s.nextTokenWithinRule
|
return s.nextTokenWithinRule
|
||||||
|
|
||||||
def nextTokens(self, s:ATNState, ctx:RuleContext = None):
|
def nextTokens(self, s:ATNState, ctx:RuleContext = None):
|
||||||
|
|
|
@ -21,6 +21,10 @@ from antlr4.atn.SemanticContext import SemanticContext
|
||||||
ATNConfig = None
|
ATNConfig = None
|
||||||
|
|
||||||
class ATNConfig(object):
|
class ATNConfig(object):
|
||||||
|
__slots__ = (
|
||||||
|
'state', 'alt', 'context', 'semanticContext', 'reachesIntoOuterContext',
|
||||||
|
'precedenceFilterSuppressed'
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None):
|
def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None):
|
||||||
if config is not None:
|
if config is not None:
|
||||||
|
@ -110,6 +114,7 @@ class ATNConfig(object):
|
||||||
LexerATNConfig = None
|
LexerATNConfig = None
|
||||||
|
|
||||||
class LexerATNConfig(ATNConfig):
|
class LexerATNConfig(ATNConfig):
|
||||||
|
__slots__ = ('lexerActionExecutor', 'passedThroughNonGreedyDecision')
|
||||||
|
|
||||||
def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE,
|
def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE,
|
||||||
lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None):
|
lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None):
|
||||||
|
|
|
@ -20,6 +20,12 @@ from antlr4.error.Errors import UnsupportedOperationException, IllegalStateExcep
|
||||||
ATNSimulator = None
|
ATNSimulator = None
|
||||||
|
|
||||||
class ATNConfigSet(object):
|
class ATNConfigSet(object):
|
||||||
|
__slots__ = (
|
||||||
|
'configLookup', 'fullCtx', 'readonly', 'configs', 'uniqueAlt',
|
||||||
|
'conflictingAlts', 'hasSemanticContext', 'dipsIntoOuterContext',
|
||||||
|
'cachedHashCode'
|
||||||
|
)
|
||||||
|
|
||||||
#
|
#
|
||||||
# The reason that we need this is because we don't want the hash map to use
|
# The reason that we need this is because we don't want the hash map to use
|
||||||
# the standard hash code and equals. We need all configurations with the same
|
# the standard hash code and equals. We need all configurations with the same
|
||||||
|
@ -204,6 +210,3 @@ class OrderedATNConfigSet(ATNConfigSet):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
ATNDeserializationOptions = None
|
ATNDeserializationOptions = None
|
||||||
|
|
||||||
class ATNDeserializationOptions(object):
|
class ATNDeserializationOptions(object):
|
||||||
|
__slots__ = ('readOnly', 'verifyATN', 'generateRuleBypassTransitions')
|
||||||
|
|
||||||
defaultOptions = None
|
defaultOptions = None
|
||||||
|
|
||||||
|
@ -21,4 +22,3 @@ class ATNDeserializationOptions(object):
|
||||||
|
|
||||||
ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions()
|
ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions()
|
||||||
ATNDeserializationOptions.defaultOptions.readOnly = True
|
ATNDeserializationOptions.defaultOptions.readOnly = True
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ SERIALIZED_VERSION = 3
|
||||||
SERIALIZED_UUID = ADDED_UNICODE_SMP
|
SERIALIZED_UUID = ADDED_UNICODE_SMP
|
||||||
|
|
||||||
class ATNDeserializer (object):
|
class ATNDeserializer (object):
|
||||||
|
__slots__ = ('deserializationOptions', 'data', 'pos', 'uuid')
|
||||||
|
|
||||||
def __init__(self, options : ATNDeserializationOptions = None):
|
def __init__(self, options : ATNDeserializationOptions = None):
|
||||||
if options is None:
|
if options is None:
|
||||||
|
|
|
@ -10,6 +10,7 @@ from antlr4.dfa.DFAState import DFAState
|
||||||
|
|
||||||
|
|
||||||
class ATNSimulator(object):
|
class ATNSimulator(object):
|
||||||
|
__slots__ = ('atn', 'sharedContextCache', '__dict__')
|
||||||
|
|
||||||
# Must distinguish between missing edge and edge we know leads nowhere#/
|
# Must distinguish between missing edge and edge we know leads nowhere#/
|
||||||
ERROR = DFAState(configs=ATNConfigSet())
|
ERROR = DFAState(configs=ATNConfigSet())
|
||||||
|
@ -44,4 +45,3 @@ class ATNSimulator(object):
|
||||||
return context
|
return context
|
||||||
visited = dict()
|
visited = dict()
|
||||||
return getCachedPredictionContext(context, self.sharedContextCache, visited)
|
return getCachedPredictionContext(context, self.sharedContextCache, visited)
|
||||||
|
|
||||||
|
|
|
@ -69,6 +69,10 @@ from antlr4.atn.Transition import Transition
|
||||||
INITIAL_NUM_TRANSITIONS = 4
|
INITIAL_NUM_TRANSITIONS = 4
|
||||||
|
|
||||||
class ATNState(object):
|
class ATNState(object):
|
||||||
|
__slots__ = (
|
||||||
|
'atn', 'stateNumber', 'stateType', 'ruleIndex', 'epsilonOnlyTransitions',
|
||||||
|
'transitions', 'nextTokenWithinRule',
|
||||||
|
)
|
||||||
|
|
||||||
# constants for serialization
|
# constants for serialization
|
||||||
INVALID_TYPE = 0
|
INVALID_TYPE = 0
|
||||||
|
@ -148,7 +152,7 @@ class BasicState(ATNState):
|
||||||
|
|
||||||
|
|
||||||
class DecisionState(ATNState):
|
class DecisionState(ATNState):
|
||||||
|
__slots__ = ('decision', 'nonGreedy')
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.decision = -1
|
self.decision = -1
|
||||||
|
@ -156,6 +160,7 @@ class DecisionState(ATNState):
|
||||||
|
|
||||||
# The start of a regular {@code (...)} block.
|
# The start of a regular {@code (...)} block.
|
||||||
class BlockStartState(DecisionState):
|
class BlockStartState(DecisionState):
|
||||||
|
__slots__ = 'endState'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -169,6 +174,7 @@ class BasicBlockStartState(BlockStartState):
|
||||||
|
|
||||||
# Terminal node of a simple {@code (a|b|c)} block.
|
# Terminal node of a simple {@code (a|b|c)} block.
|
||||||
class BlockEndState(ATNState):
|
class BlockEndState(ATNState):
|
||||||
|
__slots__ = 'startState'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -187,6 +193,7 @@ class RuleStopState(ATNState):
|
||||||
self.stateType = self.RULE_STOP
|
self.stateType = self.RULE_STOP
|
||||||
|
|
||||||
class RuleStartState(ATNState):
|
class RuleStartState(ATNState):
|
||||||
|
__slots__ = ('stopState', 'isPrecedenceRule')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -209,6 +216,7 @@ class PlusLoopbackState(DecisionState):
|
||||||
# real decision-making note for {@code A+}.
|
# real decision-making note for {@code A+}.
|
||||||
#
|
#
|
||||||
class PlusBlockStartState(BlockStartState):
|
class PlusBlockStartState(BlockStartState):
|
||||||
|
__slots__ = 'loopBackState'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -230,6 +238,7 @@ class StarLoopbackState(ATNState):
|
||||||
|
|
||||||
|
|
||||||
class StarLoopEntryState(DecisionState):
|
class StarLoopEntryState(DecisionState):
|
||||||
|
__slots__ = ('loopBackState', 'isPrecedenceDecision')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -240,6 +249,7 @@ class StarLoopEntryState(DecisionState):
|
||||||
|
|
||||||
# Mark the end of a * or + loop.
|
# Mark the end of a * or + loop.
|
||||||
class LoopEndState(ATNState):
|
class LoopEndState(ATNState):
|
||||||
|
__slots__ = 'loopBackState'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
|
@ -34,6 +34,7 @@ from antlr4.dfa.DFAState import DFAState
|
||||||
from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
|
from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
|
||||||
|
|
||||||
class SimState(object):
|
class SimState(object):
|
||||||
|
__slots__ = ('index', 'line', 'column', 'dfaState')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reset()
|
self.reset()
|
||||||
|
@ -49,6 +50,10 @@ Lexer = None
|
||||||
LexerATNSimulator = None
|
LexerATNSimulator = None
|
||||||
|
|
||||||
class LexerATNSimulator(ATNSimulator):
|
class LexerATNSimulator(ATNSimulator):
|
||||||
|
__slots__ = (
|
||||||
|
'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode',
|
||||||
|
'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept'
|
||||||
|
)
|
||||||
|
|
||||||
debug = False
|
debug = False
|
||||||
dfa_debug = False
|
dfa_debug = False
|
||||||
|
@ -58,8 +63,6 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
|
|
||||||
ERROR = None
|
ERROR = None
|
||||||
|
|
||||||
match_calls = 0
|
|
||||||
|
|
||||||
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
|
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
|
||||||
super().__init__(atn, sharedContextCache)
|
super().__init__(atn, sharedContextCache)
|
||||||
self.decisionToDFA = decisionToDFA
|
self.decisionToDFA = decisionToDFA
|
||||||
|
@ -89,7 +92,6 @@ class LexerATNSimulator(ATNSimulator):
|
||||||
self.startIndex = simulator.startIndex
|
self.startIndex = simulator.startIndex
|
||||||
|
|
||||||
def match(self, input:InputStream , mode:int):
|
def match(self, input:InputStream , mode:int):
|
||||||
self.match_calls += 1
|
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
mark = input.mark()
|
mark = input.mark()
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -22,6 +22,7 @@ class LexerActionType(IntEnum):
|
||||||
TYPE = 7 #The type of a {@link LexerTypeAction} action.
|
TYPE = 7 #The type of a {@link LexerTypeAction} action.
|
||||||
|
|
||||||
class LexerAction(object):
|
class LexerAction(object):
|
||||||
|
__slots__ = ('actionType', 'isPositionDependent')
|
||||||
|
|
||||||
def __init__(self, action:LexerActionType):
|
def __init__(self, action:LexerActionType):
|
||||||
self.actionType = action
|
self.actionType = action
|
||||||
|
@ -58,6 +59,7 @@ LexerSkipAction.INSTANCE = LexerSkipAction()
|
||||||
# Implements the {@code type} lexer action by calling {@link Lexer#setType}
|
# Implements the {@code type} lexer action by calling {@link Lexer#setType}
|
||||||
# with the assigned type.
|
# with the assigned type.
|
||||||
class LexerTypeAction(LexerAction):
|
class LexerTypeAction(LexerAction):
|
||||||
|
__slots__ = 'type'
|
||||||
|
|
||||||
def __init__(self, type:int):
|
def __init__(self, type:int):
|
||||||
super().__init__(LexerActionType.TYPE)
|
super().__init__(LexerActionType.TYPE)
|
||||||
|
@ -84,6 +86,7 @@ class LexerTypeAction(LexerAction):
|
||||||
# Implements the {@code pushMode} lexer action by calling
|
# Implements the {@code pushMode} lexer action by calling
|
||||||
# {@link Lexer#pushMode} with the assigned mode.
|
# {@link Lexer#pushMode} with the assigned mode.
|
||||||
class LexerPushModeAction(LexerAction):
|
class LexerPushModeAction(LexerAction):
|
||||||
|
__slots__ = 'mode'
|
||||||
|
|
||||||
def __init__(self, mode:int):
|
def __init__(self, mode:int):
|
||||||
super().__init__(LexerActionType.PUSH_MODE)
|
super().__init__(LexerActionType.PUSH_MODE)
|
||||||
|
@ -152,6 +155,7 @@ LexerMoreAction.INSTANCE = LexerMoreAction()
|
||||||
# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
|
# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
|
||||||
# the assigned mode.
|
# the assigned mode.
|
||||||
class LexerModeAction(LexerAction):
|
class LexerModeAction(LexerAction):
|
||||||
|
__slots__ = 'mode'
|
||||||
|
|
||||||
def __init__(self, mode:int):
|
def __init__(self, mode:int):
|
||||||
super().__init__(LexerActionType.MODE)
|
super().__init__(LexerActionType.MODE)
|
||||||
|
@ -186,6 +190,7 @@ class LexerModeAction(LexerAction):
|
||||||
# command argument could not be evaluated when the grammar was compiled.</p>
|
# command argument could not be evaluated when the grammar was compiled.</p>
|
||||||
|
|
||||||
class LexerCustomAction(LexerAction):
|
class LexerCustomAction(LexerAction):
|
||||||
|
__slots__ = ('ruleIndex', 'actionIndex')
|
||||||
|
|
||||||
# Constructs a custom lexer action with the specified rule and action
|
# Constructs a custom lexer action with the specified rule and action
|
||||||
# indexes.
|
# indexes.
|
||||||
|
@ -220,6 +225,7 @@ class LexerCustomAction(LexerAction):
|
||||||
# Implements the {@code channel} lexer action by calling
|
# Implements the {@code channel} lexer action by calling
|
||||||
# {@link Lexer#setChannel} with the assigned channel.
|
# {@link Lexer#setChannel} with the assigned channel.
|
||||||
class LexerChannelAction(LexerAction):
|
class LexerChannelAction(LexerAction):
|
||||||
|
__slots__ = 'channel'
|
||||||
|
|
||||||
# Constructs a new {@code channel} action with the specified channel value.
|
# Constructs a new {@code channel} action with the specified channel value.
|
||||||
# @param channel The channel value to pass to {@link Lexer#setChannel}.
|
# @param channel The channel value to pass to {@link Lexer#setChannel}.
|
||||||
|
@ -255,6 +261,7 @@ class LexerChannelAction(LexerAction):
|
||||||
# lexer actions, see {@link LexerActionExecutor#append} and
|
# lexer actions, see {@link LexerActionExecutor#append} and
|
||||||
# {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
|
# {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
|
||||||
class LexerIndexedCustomAction(LexerAction):
|
class LexerIndexedCustomAction(LexerAction):
|
||||||
|
__slots__ = ('offset', 'action')
|
||||||
|
|
||||||
# Constructs a new indexed custom action by associating a character offset
|
# Constructs a new indexed custom action by associating a character offset
|
||||||
# with a {@link LexerAction}.
|
# with a {@link LexerAction}.
|
||||||
|
|
|
@ -20,6 +20,7 @@ Lexer = None
|
||||||
LexerActionExecutor = None
|
LexerActionExecutor = None
|
||||||
|
|
||||||
class LexerActionExecutor(object):
|
class LexerActionExecutor(object):
|
||||||
|
__slots__ = ('lexerActions', 'hashCode')
|
||||||
|
|
||||||
def __init__(self, lexerActions:list=list()):
|
def __init__(self, lexerActions:list=list()):
|
||||||
self.lexerActions = lexerActions
|
self.lexerActions = lexerActions
|
||||||
|
|
|
@ -255,6 +255,10 @@ from antlr4.error.Errors import NoViableAltException
|
||||||
|
|
||||||
|
|
||||||
class ParserATNSimulator(ATNSimulator):
|
class ParserATNSimulator(ATNSimulator):
|
||||||
|
__slots__ = (
|
||||||
|
'parser', 'decisionToDFA', 'predictionMode', '_input', '_startIndex',
|
||||||
|
'_outerContext', '_dfa', 'mergeCache'
|
||||||
|
)
|
||||||
|
|
||||||
debug = False
|
debug = False
|
||||||
debug_list_atn_decisions = False
|
debug_list_atn_decisions = False
|
||||||
|
@ -1643,4 +1647,3 @@ class ParserATNSimulator(ATNSimulator):
|
||||||
", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
|
", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
|
||||||
if self.parser is not None:
|
if self.parser is not None:
|
||||||
self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
||||||
|
|
||||||
|
|
|
@ -95,6 +95,7 @@ def filterPrecedencePredicates(collection:set):
|
||||||
|
|
||||||
|
|
||||||
class Predicate(SemanticContext):
|
class Predicate(SemanticContext):
|
||||||
|
__slots__ = ('ruleIndex', 'predIndex', 'isCtxDependent')
|
||||||
|
|
||||||
def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
|
def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
|
||||||
self.ruleIndex = ruleIndex
|
self.ruleIndex = ruleIndex
|
||||||
|
@ -153,6 +154,7 @@ class PrecedencePredicate(SemanticContext):
|
||||||
# is false.
|
# is false.
|
||||||
del AND
|
del AND
|
||||||
class AND(SemanticContext):
|
class AND(SemanticContext):
|
||||||
|
__slots__ = 'opnds'
|
||||||
|
|
||||||
def __init__(self, a:SemanticContext, b:SemanticContext):
|
def __init__(self, a:SemanticContext, b:SemanticContext):
|
||||||
operands = set()
|
operands = set()
|
||||||
|
@ -238,6 +240,7 @@ class AND(SemanticContext):
|
||||||
# contexts is true.
|
# contexts is true.
|
||||||
del OR
|
del OR
|
||||||
class OR (SemanticContext):
|
class OR (SemanticContext):
|
||||||
|
__slots__ = 'opnds'
|
||||||
|
|
||||||
def __init__(self, a:SemanticContext, b:SemanticContext):
|
def __init__(self, a:SemanticContext, b:SemanticContext):
|
||||||
operands = set()
|
operands = set()
|
||||||
|
|
|
@ -26,6 +26,8 @@ ATNState = None
|
||||||
RuleStartState = None
|
RuleStartState = None
|
||||||
|
|
||||||
class Transition (object):
|
class Transition (object):
|
||||||
|
__slots__ = ('target','isEpsilon','label')
|
||||||
|
|
||||||
# constants for serialization
|
# constants for serialization
|
||||||
EPSILON = 1
|
EPSILON = 1
|
||||||
RANGE = 2
|
RANGE = 2
|
||||||
|
@ -66,6 +68,7 @@ class Transition (object):
|
||||||
|
|
||||||
# TODO: make all transitions sets? no, should remove set edges
|
# TODO: make all transitions sets? no, should remove set edges
|
||||||
class AtomTransition(Transition):
|
class AtomTransition(Transition):
|
||||||
|
__slots__ = ('label_', 'serializationType')
|
||||||
|
|
||||||
def __init__(self, target:ATNState, label:int):
|
def __init__(self, target:ATNState, label:int):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -85,6 +88,7 @@ class AtomTransition(Transition):
|
||||||
return str(self.label_)
|
return str(self.label_)
|
||||||
|
|
||||||
class RuleTransition(Transition):
|
class RuleTransition(Transition):
|
||||||
|
__slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType')
|
||||||
|
|
||||||
def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState):
|
def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState):
|
||||||
super().__init__(ruleStart)
|
super().__init__(ruleStart)
|
||||||
|
@ -99,6 +103,7 @@ class RuleTransition(Transition):
|
||||||
|
|
||||||
|
|
||||||
class EpsilonTransition(Transition):
|
class EpsilonTransition(Transition):
|
||||||
|
__slots__ = ('serializationType', 'outermostPrecedenceReturn')
|
||||||
|
|
||||||
def __init__(self, target, outermostPrecedenceReturn=-1):
|
def __init__(self, target, outermostPrecedenceReturn=-1):
|
||||||
super(EpsilonTransition, self).__init__(target)
|
super(EpsilonTransition, self).__init__(target)
|
||||||
|
@ -113,6 +118,7 @@ class EpsilonTransition(Transition):
|
||||||
return "epsilon"
|
return "epsilon"
|
||||||
|
|
||||||
class RangeTransition(Transition):
|
class RangeTransition(Transition):
|
||||||
|
__slots__ = ('serializationType', 'start', 'stop')
|
||||||
|
|
||||||
def __init__(self, target:ATNState, start:int, stop:int):
|
def __init__(self, target:ATNState, start:int, stop:int):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -139,6 +145,7 @@ class AbstractPredicateTransition(Transition):
|
||||||
|
|
||||||
|
|
||||||
class PredicateTransition(AbstractPredicateTransition):
|
class PredicateTransition(AbstractPredicateTransition):
|
||||||
|
__slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent')
|
||||||
|
|
||||||
def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool):
|
def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -158,6 +165,7 @@ class PredicateTransition(AbstractPredicateTransition):
|
||||||
return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex)
|
return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex)
|
||||||
|
|
||||||
class ActionTransition(Transition):
|
class ActionTransition(Transition):
|
||||||
|
__slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent')
|
||||||
|
|
||||||
def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False):
|
def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -175,6 +183,7 @@ class ActionTransition(Transition):
|
||||||
|
|
||||||
# A transition containing a set of values.
|
# A transition containing a set of values.
|
||||||
class SetTransition(Transition):
|
class SetTransition(Transition):
|
||||||
|
__slots__ = 'serializationType'
|
||||||
|
|
||||||
def __init__(self, target:ATNState, set:IntervalSet):
|
def __init__(self, target:ATNState, set:IntervalSet):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -207,6 +216,7 @@ class NotSetTransition(SetTransition):
|
||||||
|
|
||||||
|
|
||||||
class WildcardTransition(Transition):
|
class WildcardTransition(Transition):
|
||||||
|
__slots__ = 'serializationType'
|
||||||
|
|
||||||
def __init__(self, target:ATNState):
|
def __init__(self, target:ATNState):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
@ -220,6 +230,7 @@ class WildcardTransition(Transition):
|
||||||
|
|
||||||
|
|
||||||
class PrecedencePredicateTransition(AbstractPredicateTransition):
|
class PrecedencePredicateTransition(AbstractPredicateTransition):
|
||||||
|
__slots__ = ('serializationType', 'precedence')
|
||||||
|
|
||||||
def __init__(self, target:ATNState, precedence:int):
|
def __init__(self, target:ATNState, precedence:int):
|
||||||
super().__init__(target)
|
super().__init__(target)
|
||||||
|
|
|
@ -11,6 +11,7 @@ from antlr4.error.Errors import IllegalStateException
|
||||||
|
|
||||||
|
|
||||||
class DFA(object):
|
class DFA(object):
|
||||||
|
__slots__ = ('atnStartState', 'decision', '_states', 's0', 'precedenceDfa')
|
||||||
|
|
||||||
def __init__(self, atnStartState:DecisionState, decision:int=0):
|
def __init__(self, atnStartState:DecisionState, decision:int=0):
|
||||||
# From which ATN state did we create this DFA?
|
# From which ATN state did we create this DFA?
|
||||||
|
@ -130,4 +131,3 @@ class DFA(object):
|
||||||
from antlr4.dfa.DFASerializer import LexerDFASerializer
|
from antlr4.dfa.DFASerializer import LexerDFASerializer
|
||||||
serializer = LexerDFASerializer(self)
|
serializer = LexerDFASerializer(self)
|
||||||
return str(serializer)
|
return str(serializer)
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ from antlr4.dfa.DFAState import DFAState
|
||||||
|
|
||||||
|
|
||||||
class DFASerializer(object):
|
class DFASerializer(object):
|
||||||
|
__slots__ = ('dfa', 'literalNames', 'symbolicNames')
|
||||||
|
|
||||||
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
|
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
|
||||||
self.dfa = dfa
|
self.dfa = dfa
|
||||||
|
|
|
@ -11,6 +11,8 @@ from antlr4.atn.SemanticContext import SemanticContext
|
||||||
|
|
||||||
|
|
||||||
class PredPrediction(object):
|
class PredPrediction(object):
|
||||||
|
__slots__ = ('alt', 'pred')
|
||||||
|
|
||||||
def __init__(self, pred:SemanticContext, alt:int):
|
def __init__(self, pred:SemanticContext, alt:int):
|
||||||
self.alt = alt
|
self.alt = alt
|
||||||
self.pred = pred
|
self.pred = pred
|
||||||
|
@ -43,6 +45,10 @@ class PredPrediction(object):
|
||||||
# meaning that state was reached via a different set of rule invocations.</p>
|
# meaning that state was reached via a different set of rule invocations.</p>
|
||||||
#/
|
#/
|
||||||
class DFAState(object):
|
class DFAState(object):
|
||||||
|
__slots__ = (
|
||||||
|
'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction',
|
||||||
|
'lexerActionExecutor', 'requiresFullContext', 'predicates'
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
|
def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
|
||||||
self.stateNumber = stateNumber
|
self.stateNumber = stateNumber
|
||||||
|
|
|
@ -8,6 +8,7 @@ class Chunk(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class TagChunk(Chunk):
|
class TagChunk(Chunk):
|
||||||
|
__slots__ = ('tag', 'label')
|
||||||
|
|
||||||
def __init__(self, tag:str, label:str=None):
|
def __init__(self, tag:str, label:str=None):
|
||||||
self.tag = tag
|
self.tag = tag
|
||||||
|
@ -20,10 +21,10 @@ class TagChunk(Chunk):
|
||||||
return self.label + ":" + self.tag
|
return self.label + ":" + self.tag
|
||||||
|
|
||||||
class TextChunk(Chunk):
|
class TextChunk(Chunk):
|
||||||
|
__slots__ = 'text'
|
||||||
|
|
||||||
def __init__(self, text:str):
|
def __init__(self, text:str):
|
||||||
self.text = text
|
self.text = text
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "'" + self.text + "'"
|
return "'" + self.text + "'"
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ from antlr4.tree.Tree import ParseTree
|
||||||
|
|
||||||
|
|
||||||
class ParseTreeMatch(object):
|
class ParseTreeMatch(object):
|
||||||
|
__slots__ = ('tree', 'pattern', 'labels', 'mismatchedNode')
|
||||||
#
|
#
|
||||||
# Constructs a new instance of {@link ParseTreeMatch} from the specified
|
# Constructs a new instance of {@link ParseTreeMatch} from the specified
|
||||||
# parse tree and pattern.
|
# parse tree and pattern.
|
||||||
|
|
|
@ -14,6 +14,7 @@ from antlr4.xpath.XPath import XPath
|
||||||
|
|
||||||
|
|
||||||
class ParseTreePattern(object):
|
class ParseTreePattern(object):
|
||||||
|
__slots__ = ('matcher', 'patternRuleIndex', 'pattern', 'patternTree')
|
||||||
|
|
||||||
# Construct a new instance of the {@link ParseTreePattern} class.
|
# Construct a new instance of the {@link ParseTreePattern} class.
|
||||||
#
|
#
|
||||||
|
|
|
@ -89,6 +89,7 @@ class StartRuleDoesNotConsumeFullPattern(Exception):
|
||||||
|
|
||||||
|
|
||||||
class ParseTreePatternMatcher(object):
|
class ParseTreePatternMatcher(object):
|
||||||
|
__slots__ = ('lexer', 'parser', 'start', 'stop', 'escape')
|
||||||
|
|
||||||
# Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and
|
# Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and
|
||||||
# {@link Parser} object. The lexer input stream is altered for tokenizing
|
# {@link Parser} object. The lexer input stream is altered for tokenizing
|
||||||
|
|
|
@ -13,6 +13,7 @@ from antlr4.Token import Token
|
||||||
|
|
||||||
|
|
||||||
class RuleTagToken(Token):
|
class RuleTagToken(Token):
|
||||||
|
__slots__ = ('label', 'ruleName')
|
||||||
#
|
#
|
||||||
# Constructs a new instance of {@link RuleTagToken} with the specified rule
|
# Constructs a new instance of {@link RuleTagToken} with the specified rule
|
||||||
# name, bypass token type, and label.
|
# name, bypass token type, and label.
|
||||||
|
|
|
@ -13,7 +13,7 @@ from antlr4.Token import CommonToken
|
||||||
|
|
||||||
|
|
||||||
class TokenTagToken(CommonToken):
|
class TokenTagToken(CommonToken):
|
||||||
|
__slots__ = ('tokenName', 'label')
|
||||||
# Constructs a new instance of {@link TokenTagToken} with the specified
|
# Constructs a new instance of {@link TokenTagToken} with the specified
|
||||||
# token name, type, and label.
|
# token name, type, and label.
|
||||||
#
|
#
|
||||||
|
|
|
@ -80,6 +80,7 @@ class ParseTreeListener(object):
|
||||||
del ParserRuleContext
|
del ParserRuleContext
|
||||||
|
|
||||||
class TerminalNodeImpl(TerminalNode):
|
class TerminalNodeImpl(TerminalNode):
|
||||||
|
__slots__ = ('parentCtx', 'symbol')
|
||||||
|
|
||||||
def __init__(self, symbol:Token):
|
def __init__(self, symbol:Token):
|
||||||
self.parentCtx = None
|
self.parentCtx = None
|
||||||
|
|
|
@ -652,6 +652,7 @@ CaptureNextTokenType(d) ::= "<d.varName> = self._input.LA(1)"
|
||||||
|
|
||||||
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers) ::= <<
|
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers) ::= <<
|
||||||
class <struct.name>(<if(contextSuperClass)><contextSuperClass><else>ParserRuleContext<endif>):
|
class <struct.name>(<if(contextSuperClass)><contextSuperClass><else>ParserRuleContext<endif>):
|
||||||
|
__slots__ = 'parser'
|
||||||
|
|
||||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1<struct.ctorAttrs:{a | , <a.name><if(a.type)>:<a.type><endif>=None}>):
|
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1<struct.ctorAttrs:{a | , <a.name><if(a.type)>:<a.type><endif>=None}>):
|
||||||
super().__init__(parent, invokingState)
|
super().__init__(parent, invokingState)
|
||||||
|
|
Loading…
Reference in New Issue