forked from jasder/antlr
Mass refectoring of const variables
This commit is contained in:
parent
fc75fbfab0
commit
729f30768a
|
@ -104,7 +104,7 @@ func (bt *BufferedTokenStream) consume() {
|
|||
// not yet initialized
|
||||
skipEofCheck = false
|
||||
}
|
||||
if (!skipEofCheck && bt.LA(1) == Token.EOF) {
|
||||
if (!skipEofCheck && bt.LA(1) == TokenEOF) {
|
||||
panic( "cannot consume EOF" )
|
||||
}
|
||||
if (bt.sync(bt.index + 1)) {
|
||||
|
@ -140,7 +140,7 @@ func (bt *BufferedTokenStream) fetch(n int) int {
|
|||
var t = bt.tokenSource.nextToken()
|
||||
t.tokenIndex = len(bt.tokens)
|
||||
bt.tokens.push(t)
|
||||
if (t.type == Token.EOF) {
|
||||
if (t.type == TokenEOF) {
|
||||
bt.fetchedEOF = true
|
||||
return i + 1
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (bt *BufferedTokenStream) getTokens(start, stop, types) {
|
|||
}
|
||||
for i := start; i < stop; i++ {
|
||||
var t = bt.tokens[i]
|
||||
if (t.type == Token.EOF) {
|
||||
if (t.type == TokenEOF) {
|
||||
break
|
||||
}
|
||||
if (types == nil || types.contains(t.type)) {
|
||||
|
@ -248,7 +248,7 @@ func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel) {
|
|||
}
|
||||
var token = bt.tokens[i]
|
||||
while (token.channel != bt.channel) {
|
||||
if (token.type == Token.EOF) {
|
||||
if (token.type == TokenEOF) {
|
||||
return -1
|
||||
}
|
||||
i += 1
|
||||
|
@ -281,7 +281,7 @@ func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex,
|
|||
panic( "" + tokenIndex + " not in 0.." + len(bt.tokens) - 1
|
||||
}
|
||||
var nextOnChannel = bt.nextTokenOnChannel(tokenIndex + 1,
|
||||
Lexer.DEFAULT_TOKEN_CHANNEL)
|
||||
LexerDefaultTokenChannel)
|
||||
var from_ = tokenIndex + 1
|
||||
// if none onchannel to right, nextOnChannel=-1 so set to = last token
|
||||
var to = nextOnChannel == -1 ? len(bt.tokens) - 1 : nextOnChannel
|
||||
|
@ -301,7 +301,7 @@ func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex,
|
|||
panic( "" + tokenIndex + " not in 0.." + len(bt.tokens) - 1
|
||||
}
|
||||
var prevOnChannel = bt.previousTokenOnChannel(tokenIndex - 1,
|
||||
Lexer.DEFAULT_TOKEN_CHANNEL)
|
||||
LexerDefaultTokenChannel)
|
||||
if (prevOnChannel == tokenIndex - 1) {
|
||||
return nil
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ func (bt *BufferedTokenStream) filterForChannel(left, right, channel) {
|
|||
for var i = left; i < right + 1; i++ {
|
||||
var t = bt.tokens[i]
|
||||
if (channel == -1) {
|
||||
if (t.channel != Lexer.DEFAULT_TOKEN_CHANNEL) {
|
||||
if (t.channel != LexerDefaultTokenChannel) {
|
||||
hidden.push(t)
|
||||
}
|
||||
} else if (t.channel == channel) {
|
||||
|
@ -357,7 +357,7 @@ func (bt *BufferedTokenStream) getText(interval) string {
|
|||
var s = ""
|
||||
for i := start; i < stop + 1; i++ {
|
||||
var t = bt.tokens[i]
|
||||
if (t.type == Token.EOF) {
|
||||
if (t.type == TokenEOF) {
|
||||
break
|
||||
}
|
||||
s = s + t.text
|
||||
|
|
|
@ -31,11 +31,13 @@ type CommonTokenStream struct {
|
|||
|
||||
func NewCommonTokenStream(lexer Lexer, channel) {
|
||||
|
||||
ts := new(BufferedTokenStream)
|
||||
ts := new(CommonTokenStream)
|
||||
|
||||
BufferedTokenStream.call(ts, lexer)
|
||||
ts.channel = channel
|
||||
return ts
|
||||
|
||||
|
||||
}
|
||||
|
||||
func (ts *CommonTokenStream) adjustSeekIndex(i int) {
|
||||
|
@ -90,7 +92,7 @@ func (ts *CommonTokenStream) getNumberOfOnChannelTokens() {
|
|||
if t.channel==ts.channel {
|
||||
n += 1
|
||||
}
|
||||
if t.type==Token.EOF {
|
||||
if t.type==TokenEOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (is *InputStream) reset() {
|
|||
|
||||
func (is *InputStream) consume() {
|
||||
if (is.index >= is.size) {
|
||||
// assert is.LA(1) == Token.EOF
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic ("cannot consume EOF")
|
||||
}
|
||||
is.index += 1
|
||||
|
@ -57,7 +57,7 @@ func (is *InputStream) LA(offset int) {
|
|||
}
|
||||
var pos = is.index + offset - 1
|
||||
if (pos < 0 || pos >= is.size) { // invalid
|
||||
return Token.EOF
|
||||
return TokenEOF
|
||||
}
|
||||
return is.data[pos]
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func NewIntervalSet() *IntervalSet {
|
|||
|
||||
func (i *IntervalSet) first(v int) int {
|
||||
if (i.intervals == nil || len(i.intervals)==0) {
|
||||
return Token.INVALID_TYPE
|
||||
return TokenInvalidType
|
||||
} else {
|
||||
return i.intervals[0].start
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ func (is *IntervalSet) toCharString() {
|
|||
for i := 0; i < len( is.intervals ); i++ {
|
||||
var v = is.intervals[i]
|
||||
if(v.stop==v.start+1) {
|
||||
if ( v.start==Token.EOF ) {
|
||||
if ( v.start== TokenEOF ) {
|
||||
append(names, "<EOF>")
|
||||
} else {
|
||||
append(names, ("'" + String.fromCharCode(v.start) + "'"))
|
||||
|
@ -269,7 +269,7 @@ func (i *IntervalSet) toIndexString() {
|
|||
for (var i = 0 i < len( i.intervals ) i++) {
|
||||
var v = i.intervals[i]
|
||||
if(v.stop==v.start+1) {
|
||||
if ( v.start==Token.EOF ) {
|
||||
if ( v.start==TokenEOF ) {
|
||||
names.push("<EOF>")
|
||||
} else {
|
||||
names.push(v.start.toString())
|
||||
|
@ -302,9 +302,9 @@ func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []strin
|
|||
}
|
||||
|
||||
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
|
||||
if (a == Token.EOF) {
|
||||
if (a == TokenEOF) {
|
||||
return "<EOF>"
|
||||
} else if (a == Token.EPSILON) {
|
||||
} else if (a == TokenEpsilon) {
|
||||
return "<EPSILON>"
|
||||
} else {
|
||||
return literalNames[a] || symbolicNames[a]
|
||||
|
|
|
@ -34,7 +34,7 @@ func NewLL1Analyzer (atn) *LL1Analyzer {
|
|||
//* Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
///
|
||||
LL1Analyzer.HIT_PRED = Token.INVALID_TYPE
|
||||
LL1Analyzer.HIT_PRED = TokenInvalidType
|
||||
|
||||
//*
|
||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
|
||||
|
@ -132,19 +132,19 @@ func (la *LL1Analyzer) _LOOK(s, stopState , ctx, look, lookBusy, calledRuleStack
|
|||
lookBusy.add(c)
|
||||
if (s == stopState) {
|
||||
if (ctx ==nil) {
|
||||
look.addOne(Token.EPSILON)
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if (ctx.isEmpty() && addEOF) {
|
||||
look.addOne(Token.EOF)
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
}
|
||||
if (s instanceof RuleStopState ) {
|
||||
if (ctx ==nil) {
|
||||
look.addOne(Token.EPSILON)
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if (ctx.isEmpty() && addEOF) {
|
||||
look.addOne(Token.EOF)
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
if (ctx != PredictionContext.EMPTY) {
|
||||
|
@ -186,12 +186,12 @@ func (la *LL1Analyzer) _LOOK(s, stopState , ctx, look, lookBusy, calledRuleStack
|
|||
} else if( t.isEpsilon) {
|
||||
la._LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
} else if (t.constructor == WildcardTransition) {
|
||||
look.addRange( Token.MIN_USER_TOKEN_TYPE, la.atn.maxTokenType )
|
||||
look.addRange( TokenMinUserTokenType, la.atn.maxTokenType )
|
||||
} else {
|
||||
var set = t.label
|
||||
if (set != nil) {
|
||||
if (t instanceof NotSetTransition) {
|
||||
set = set.complement(Token.MIN_USER_TOKEN_TYPE, la.atn.maxTokenType)
|
||||
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
|
||||
}
|
||||
look.addSet(set)
|
||||
}
|
||||
|
|
|
@ -74,20 +74,20 @@ func NewLexer(input *InputStream) *Lexer {
|
|||
lexer._hitEOF = false
|
||||
|
||||
// The channel number for the current token///
|
||||
lexer._channel = Token.DEFAULT_CHANNEL
|
||||
lexer._channel = TokenDefaultChannel
|
||||
|
||||
// The token type for the current token///
|
||||
lexer._type = Token.INVALID_TYPE
|
||||
lexer._type = TokenInvalidType
|
||||
|
||||
lexer._modeStack = []
|
||||
lexer._mode = LexerDEFAULT_MODE
|
||||
lexer._mode = LexerDefaultMode
|
||||
|
||||
// You can set the text for the current token to override what is in
|
||||
// the input char buffer. Use setText() or can set l instance var.
|
||||
// /
|
||||
lexer._text = nil
|
||||
|
||||
return l
|
||||
return lexer
|
||||
}
|
||||
|
||||
func InitLexer(lexer Lexer){
|
||||
|
@ -97,16 +97,16 @@ func InitLexer(lexer Lexer){
|
|||
}
|
||||
|
||||
const (
|
||||
LexerDEFAULT_MODE = 0
|
||||
LexerMORE = -2
|
||||
LexerSKIP = -3
|
||||
LexerDefaultMode = 0
|
||||
LexerMore = -2
|
||||
LexerSkip = -3
|
||||
)
|
||||
|
||||
const (
|
||||
LexerDEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
|
||||
LexerHIDDEN = Token.HIDDEN_CHANNEL
|
||||
LexerMIN_CHAR_VALUE = '\u0000'
|
||||
LexerMAX_CHAR_VALUE = '\uFFFE'
|
||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
||||
LexerHidden = TokenHiddenChannel
|
||||
LexerMinCharValue = '\u0000'
|
||||
LexerMaxCharValue = '\uFFFE'
|
||||
)
|
||||
|
||||
func (l *Lexer) reset() {
|
||||
|
@ -115,15 +115,15 @@ func (l *Lexer) reset() {
|
|||
l._input.seek(0) // rewind the input
|
||||
}
|
||||
l._token = nil
|
||||
l._type = Token.INVALID_TYPE
|
||||
l._channel = Token.DEFAULT_CHANNEL
|
||||
l._type = TokenInvalidType
|
||||
l._channel = TokenDefaultChannel
|
||||
l._tokenStartCharIndex = -1
|
||||
l._tokenStartColumn = -1
|
||||
l._tokenStartLine = -1
|
||||
l._text = nil
|
||||
|
||||
l._hitEOF = false
|
||||
l._mode = LexerDEFAULT_MODE
|
||||
l._mode = LexerDefaultMode
|
||||
l._modeStack = []
|
||||
|
||||
l._interp.reset()
|
||||
|
@ -145,32 +145,32 @@ func (l *Lexer) nextToken() {
|
|||
return l._token
|
||||
}
|
||||
l._token = nil
|
||||
l._channel = Token.DEFAULT_CHANNEL
|
||||
l._channel = TokenDefaultChannel
|
||||
l._tokenStartCharIndex = l._input.index
|
||||
l._tokenStartColumn = l._interp.column
|
||||
l._tokenStartLine = l._interp.line
|
||||
l._text = nil
|
||||
var continueOuter = false
|
||||
for (true) {
|
||||
l._type = Token.INVALID_TYPE
|
||||
var ttype = LexerSKIP
|
||||
l._type = TokenInvalidType
|
||||
var ttype = LexerSkip
|
||||
try {
|
||||
ttype = l._interp.match(l._input, l._mode)
|
||||
} catch (e) {
|
||||
l.notifyListeners(e) // report error
|
||||
l.recover(e)
|
||||
}
|
||||
if (l._input.LA(1) == Token.EOF) {
|
||||
if (l._input.LA(1) == TokenEOF) {
|
||||
l._hitEOF = true
|
||||
}
|
||||
if (l._type == Token.INVALID_TYPE) {
|
||||
if (l._type == TokenInvalidType) {
|
||||
l._type = ttype
|
||||
}
|
||||
if (l._type == LexerSKIP) {
|
||||
if (l._type == LexerSkip) {
|
||||
continueOuter = true
|
||||
break
|
||||
}
|
||||
if (l._type != LexerMORE) {
|
||||
if (l._type != LexerMore) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -196,11 +196,11 @@ func (l *Lexer) nextToken() {
|
|||
// and emits it.
|
||||
// /
|
||||
func (l *Lexer) skip() {
|
||||
l._type = LexerSKIP
|
||||
l._type = LexerSkip
|
||||
}
|
||||
|
||||
func (l *Lexer) more() {
|
||||
l._type = LexerMORE
|
||||
l._type = LexerMore
|
||||
}
|
||||
|
||||
func (l *Lexer) mode(m) {
|
||||
|
@ -270,8 +270,8 @@ func (l *Lexer) emit() {
|
|||
func (l *Lexer) emitEOF() {
|
||||
var cpos = l.column
|
||||
var lpos = l.line
|
||||
var eof = l._factory.create(l._tokenFactorySourcePair, Token.EOF,
|
||||
nil, Token.DEFAULT_CHANNEL, l._input.index,
|
||||
var eof = l._factory.create(l._tokenFactorySourcePair, TokenEOF,
|
||||
nil, TokenDefaultChannel, l._input.index,
|
||||
l._input.index - 1, lpos, cpos)
|
||||
l.emitToken(eof)
|
||||
return eof
|
||||
|
@ -330,7 +330,7 @@ Object.defineProperty(Lexer.prototype, "text", {
|
|||
func (l *Lexer) getAllTokens() {
|
||||
var tokens = []
|
||||
var t = l.nextToken()
|
||||
while (t.type != Token.EOF) {
|
||||
for (t.type != TokenEOF) {
|
||||
tokens.push(t)
|
||||
t = l.nextToken()
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ func (l *Lexer) getErrorDisplay(s) {
|
|||
}
|
||||
|
||||
func (l *Lexer) getErrorDisplayForChar(c rune) string {
|
||||
if (c.charCodeAt(0) == Token.EOF) {
|
||||
if (c.charCodeAt(0) == TokenEOF) {
|
||||
return "<EOF>"
|
||||
} else if (c == '\n') {
|
||||
return "\\n"
|
||||
|
@ -379,7 +379,7 @@ func (l *Lexer) getCharErrorDisplay(c) string {
|
|||
// to do sophisticated error recovery if you are in a fragment rule.
|
||||
// /
|
||||
func (l *Lexer) recover(re) {
|
||||
if (l._input.LA(1) != Token.EOF) {
|
||||
if (l._input.LA(1) != TokenEOF) {
|
||||
if (ok, re := re.(LexerNoViableAltException)) {
|
||||
// skip a char and try again
|
||||
l._interp.consume(l._input)
|
||||
|
|
|
@ -363,7 +363,7 @@ func (p.*Parser) notifyErrorListeners(msg, offendingToken, err) {
|
|||
//
|
||||
func (p.*Parser) consume() {
|
||||
var o = p.getCurrentToken()
|
||||
if (o.type != Token.EOF) {
|
||||
if (o.type != TokenEOF) {
|
||||
p.getInputStream().consume()
|
||||
}
|
||||
var hasListener = p._parseListeners != nil && p._parseListeners.length > 0
|
||||
|
@ -536,10 +536,10 @@ func (p.*Parser) isExpectedToken(symbol) {
|
|||
if (following.contains(symbol)) {
|
||||
return true
|
||||
}
|
||||
if (!following.contains(Token.EPSILON)) {
|
||||
if (!following.contains(TokenEpsilon)) {
|
||||
return false
|
||||
}
|
||||
while (ctx != nil && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) {
|
||||
while (ctx != nil && ctx.invokingState >= 0 && following.contains(TokenEpsilon)) {
|
||||
var invokingState = atn.states[ctx.invokingState]
|
||||
var rt = invokingState.transitions[0]
|
||||
following = atn.nextTokens(rt.followState)
|
||||
|
@ -548,7 +548,7 @@ func (p.*Parser) isExpectedToken(symbol) {
|
|||
}
|
||||
ctx = ctx.parentCtx
|
||||
}
|
||||
if (following.contains(Token.EPSILON) && symbol == Token.EOF) {
|
||||
if (following.contains(TokenEpsilon) && symbol == TokenEOF) {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
|
|
|
@ -14,68 +14,76 @@ package antlr4
|
|||
//
|
||||
// Note text is not an actual field of a rule return value it is computed
|
||||
// from start and stop using the input stream's toString() method. I
|
||||
// could add a ctor to this so that we can pass in and store the input
|
||||
// could add a ctor to prc so that we can pass in and store the input
|
||||
// stream, but I'm not sure we want to do that. It would seem to be undefined
|
||||
// to get the .text property anyway if the rule matches tokens from multiple
|
||||
// input streams.
|
||||
//
|
||||
// I do not use getters for fields of objects that are used simply to
|
||||
// group values such as this aggregate. The getters/setters are there to
|
||||
// group values such as prc aggregate. The getters/setters are there to
|
||||
// satisfy the superclass interface.
|
||||
|
||||
var RuleContext = require('./RuleContext').RuleContext
|
||||
var Tree = require('./tree/Tree')
|
||||
var INVALID_INTERVAL = Tree.INVALID_INTERVAL
|
||||
var TerminalNode = Tree.TerminalNode
|
||||
var TerminalNodeImpl = Tree.TerminalNodeImpl
|
||||
var ErrorNodeImpl = Tree.ErrorNodeImpl
|
||||
var Interval = require("./IntervalSet").Interval
|
||||
//var RuleContext = require('./RuleContext').RuleContext
|
||||
//var Tree = require('./tree/Tree')
|
||||
//var INVALID_INTERVAL = Tree.INVALID_INTERVAL
|
||||
//var TerminalNode = Tree.TerminalNode
|
||||
//var TerminalNodeImpl = Tree.TerminalNodeImpl
|
||||
//var ErrorNodeImpl = Tree.ErrorNodeImpl
|
||||
//var Interval = require("./IntervalSet").Interval
|
||||
|
||||
func ParserRuleContext(parent, invokingStateNumber) {
|
||||
parent = parent || nil
|
||||
invokingStateNumber = invokingStateNumber || nil
|
||||
RuleContext.call(this, parent, invokingStateNumber)
|
||||
this.ruleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a visitor,
|
||||
// we need to track all of the tokens and rule invocations associated
|
||||
// with this rule's context. This is empty for parsing w/o tree constr.
|
||||
// operation because we don't the need to track the details about
|
||||
// how we parse this rule.
|
||||
// /
|
||||
this.children = nil
|
||||
this.start = nil
|
||||
this.stop = nil
|
||||
// The exception that forced this rule to return. If the rule successfully
|
||||
// completed, this is {@code nil}.
|
||||
this.exception = nil
|
||||
type ParserRuleContext struct {
|
||||
RuleContext
|
||||
ruleIndex int
|
||||
children []RuleContext
|
||||
start
|
||||
stop
|
||||
exception
|
||||
}
|
||||
|
||||
ParserRuleContext.prototype = Object.create(RuleContext.prototype)
|
||||
ParserRuleContext.prototype.constructor = ParserRuleContext
|
||||
func NewParserRuleContext(parent, invokingStateNumber) *ParserRuleContext {
|
||||
|
||||
// * COPY a ctx (I'm deliberately not using copy constructor)///
|
||||
func (this *ParserRuleContext) copyFrom(ctx) {
|
||||
RuleContext.call(prc, parent, invokingStateNumber)
|
||||
|
||||
prc.ruleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a visitor,
|
||||
// we need to track all of the tokens and rule invocations associated
|
||||
// with prc rule's context. This is empty for parsing w/o tree constr.
|
||||
// operation because we don't the need to track the details about
|
||||
// how we parse prc rule.
|
||||
// /
|
||||
prc.children = nil
|
||||
prc.start = nil
|
||||
prc.stop = nil
|
||||
// The exception that forced prc rule to return. If the rule successfully
|
||||
// completed, prc is {@code nil}.
|
||||
prc.exception = nil
|
||||
return prc
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (prc *ParserRuleContext) copyFrom(ctx *RuleContext) {
|
||||
// from RuleContext
|
||||
this.parentCtx = ctx.parentCtx
|
||||
this.invokingState = ctx.invokingState
|
||||
this.children = nil
|
||||
this.start = ctx.start
|
||||
this.stop = ctx.stop
|
||||
prc.parentCtx = ctx.parentCtx
|
||||
prc.invokingState = ctx.invokingState
|
||||
prc.children = nil
|
||||
prc.start = ctx.start
|
||||
prc.stop = ctx.stop
|
||||
}
|
||||
|
||||
// Double dispatch methods for listeners
|
||||
func (this *ParserRuleContext) enterRule(listener) {
|
||||
func (prc *ParserRuleContext) enterRule(listener *ParseTreeListener) {
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) exitRule(listener) {
|
||||
func (prc *ParserRuleContext) exitRule(listener *ParseTreeListener) {
|
||||
}
|
||||
|
||||
// * Does not set parent link other add methods do that///
|
||||
func (this *ParserRuleContext) addChild(child) {
|
||||
if (this.children == nil) {
|
||||
this.children = []
|
||||
func (prc *ParserRuleContext) addChild(child) {
|
||||
if (prc.children == nil) {
|
||||
prc.children = []
|
||||
}
|
||||
this.children.push(child)
|
||||
prc.children.push(child)
|
||||
return child
|
||||
}
|
||||
|
||||
|
@ -83,33 +91,33 @@ func (this *ParserRuleContext) addChild(child) {
|
|||
// we entered a rule. If we have // label, we will need to remove
|
||||
// generic ruleContext object.
|
||||
// /
|
||||
func (this *ParserRuleContext) removeLastChild() {
|
||||
if (this.children != nil) {
|
||||
this.children.pop()
|
||||
func (prc *ParserRuleContext) removeLastChild() {
|
||||
if (prc.children != nil) {
|
||||
prc.children.pop()
|
||||
}
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) addTokenNode(token) {
|
||||
func (prc *ParserRuleContext) addTokenNode(token) {
|
||||
var node = NewTerminalNodeImpl(token)
|
||||
this.addChild(node)
|
||||
node.parentCtx = this
|
||||
prc.addChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) addErrorNode(badToken) {
|
||||
func (prc *ParserRuleContext) addErrorNode(badToken) {
|
||||
var node = NewErrorNodeImpl(badToken)
|
||||
this.addChild(node)
|
||||
node.parentCtx = this
|
||||
prc.addChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getChild(i, type) {
|
||||
func (prc *ParserRuleContext) getChild(i, type) {
|
||||
type = type || nil
|
||||
if (type == nil) {
|
||||
return this.children.length>=i ? this.children[i] : nil
|
||||
return len(prc.children) >= i ? prc.children[i] : nil
|
||||
} else {
|
||||
for(var j=0 j<this.children.length j++) {
|
||||
var child = this.children[j]
|
||||
for(var j=0 j<len(prc.children) j++) {
|
||||
var child = prc.children[j]
|
||||
if(child instanceof type) {
|
||||
if(i==0) {
|
||||
return child
|
||||
|
@ -123,9 +131,9 @@ func (this *ParserRuleContext) getChild(i, type) {
|
|||
}
|
||||
|
||||
|
||||
func (this *ParserRuleContext) getToken(ttype, i) {
|
||||
for(var j=0 j<this.children.length j++) {
|
||||
var child = this.children[j]
|
||||
func (prc *ParserRuleContext) getToken(ttype, i) {
|
||||
for(var j=0 j<len(prc.children) j++) {
|
||||
var child = prc.children[j]
|
||||
if (child instanceof TerminalNode) {
|
||||
if (child.symbol.type == ttype) {
|
||||
if(i==0) {
|
||||
|
@ -139,13 +147,13 @@ func (this *ParserRuleContext) getToken(ttype, i) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getTokens(ttype ) {
|
||||
if (this.children== nil) {
|
||||
func (prc *ParserRuleContext) getTokens(ttype ) {
|
||||
if (prc.children== nil) {
|
||||
return []
|
||||
} else {
|
||||
var tokens = []
|
||||
for(var j=0 j<this.children.length j++) {
|
||||
var child = this.children[j]
|
||||
for(var j=0 j<len(prc.children) j++) {
|
||||
var child = prc.children[j]
|
||||
if (child instanceof TerminalNode) {
|
||||
if (child.symbol.type == ttype) {
|
||||
tokens.push(child)
|
||||
|
@ -156,17 +164,17 @@ func (this *ParserRuleContext) getTokens(ttype ) {
|
|||
}
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getTypedRuleContext(ctxType, i) {
|
||||
return this.getChild(i, ctxType)
|
||||
func (prc *ParserRuleContext) getTypedRuleContext(ctxType, i) {
|
||||
return prc.getChild(i, ctxType)
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getTypedRuleContexts(ctxType) {
|
||||
if (this.children== nil) {
|
||||
func (prc *ParserRuleContext) getTypedRuleContexts(ctxType) {
|
||||
if (prc.children== nil) {
|
||||
return []
|
||||
} else {
|
||||
var contexts = []
|
||||
for(var j=0 j<this.children.length j++) {
|
||||
var child = this.children[j]
|
||||
for(var j=0 j<len(prc.children) j++) {
|
||||
var child = prc.children[j]
|
||||
if (child instanceof ctxType) {
|
||||
contexts.push(child)
|
||||
}
|
||||
|
@ -175,30 +183,35 @@ func (this *ParserRuleContext) getTypedRuleContexts(ctxType) {
|
|||
}
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getChildCount() {
|
||||
if (this.children== nil) {
|
||||
func (prc *ParserRuleContext) getChildCount() {
|
||||
if (prc.children== nil) {
|
||||
return 0
|
||||
} else {
|
||||
return this.children.length
|
||||
return len(prc.children)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *ParserRuleContext) getSourceInterval() {
|
||||
if( this.start == nil || this.stop == nil) {
|
||||
func (prc *ParserRuleContext) getSourceInterval() {
|
||||
if( prc.start == nil || prc.stop == nil) {
|
||||
return INVALID_INTERVAL
|
||||
} else {
|
||||
return NewInterval(this.start.tokenIndex, this.stop.tokenIndex)
|
||||
return NewInterval(prc.start.tokenIndex, prc.stop.tokenIndex)
|
||||
}
|
||||
}
|
||||
|
||||
RuleContext.EMPTY = NewParserRuleContext()
|
||||
var RuleContextEMPTY = NewParserRuleContext(nil, nil)
|
||||
|
||||
func InterpreterRuleContext(parent, invokingStateNumber, ruleIndex) {
|
||||
ParserRuleContext.call(parent, invokingStateNumber)
|
||||
this.ruleIndex = ruleIndex
|
||||
return this
|
||||
type InterpreterRuleContext struct {
|
||||
ruleIndex int
|
||||
}
|
||||
|
||||
InterpreterRuleContext.prototype = Object.create(ParserRuleContext.prototype)
|
||||
InterpreterRuleContext.prototype.constructor = InterpreterRuleContext
|
||||
func InterpreterRuleContext(parent, invokingStateNumber, ruleIndex int) {
|
||||
|
||||
prc := new(InterpreterRuleContext)
|
||||
|
||||
prc.init(parent, invokingStateNumber)
|
||||
|
||||
prc.ruleIndex = ruleIndex
|
||||
|
||||
return prc
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (this *Recognizer) getTokenTypeMap() {
|
|||
var result = this.tokenTypeMapCache[tokenNames]
|
||||
if(result==undefined) {
|
||||
result = tokenNames.reduce(function(o, k, i) { o[k] = i })
|
||||
result.EOF = Token.EOF
|
||||
result.EOF = TokenEOF
|
||||
this.tokenTypeMapCache[tokenNames] = result
|
||||
}
|
||||
return result
|
||||
|
@ -65,7 +65,7 @@ func (this *Recognizer) getTokenType(tokenName) {
|
|||
if (ttype !=undefined) {
|
||||
return ttype
|
||||
} else {
|
||||
return Token.INVALID_TYPE
|
||||
return TokenInvalidType
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func (this *Recognizer) getTokenErrorDisplay(t) {
|
|||
}
|
||||
var s = t.text
|
||||
if (s==nil) {
|
||||
if (t.type==Token.EOF) {
|
||||
if (t.type==TokenEOF) {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + t.type + ">"
|
||||
|
|
|
@ -5,38 +5,43 @@ package antlr4
|
|||
// we obtained this token.
|
||||
|
||||
type Token struct {
|
||||
this.source = nil
|
||||
this.type = nil // token type of the token
|
||||
this.channel = nil // The parser ignores everything not on DEFAULT_CHANNEL
|
||||
this.start = nil // optional return -1 if not implemented.
|
||||
this.stop = nil // optional return -1 if not implemented.
|
||||
this.tokenIndex = nil // from 0..n-1 of the token object in the input stream
|
||||
this.line = nil // line=1..n of the 1st character
|
||||
this.column = nil // beginning of the line at which it occurs, 0..n-1
|
||||
this._text = nil // text of the token.
|
||||
return this
|
||||
source *TokenSource
|
||||
tokenType int // token type of the token
|
||||
channel int // The parser ignores everything not on DEFAULT_CHANNEL
|
||||
start int // optional return -1 if not implemented.
|
||||
stop int // optional return -1 if not implemented.
|
||||
tokenIndex int // from 0..n-1 of the token object in the input stream
|
||||
line int // line=1..n of the 1st character
|
||||
column int // beginning of the line at which it occurs, 0..n-1
|
||||
text string // text of the token.
|
||||
}
|
||||
|
||||
Token.INVALID_TYPE = 0
|
||||
func NewToken() *Token {
|
||||
return new(Token)
|
||||
}
|
||||
|
||||
// During lookahead operations, this "token" signifies we hit rule end ATN state
|
||||
// and did not follow it despite needing to.
|
||||
Token.EPSILON = -2
|
||||
const (
|
||||
TokenInvalidType = 0
|
||||
|
||||
Token.MIN_USER_TOKEN_TYPE = 1
|
||||
// During lookahead operations, this "token" signifies we hit rule end ATN state
|
||||
// and did not follow it despite needing to.
|
||||
TokenEpsilon = -2
|
||||
|
||||
Token.EOF = -1
|
||||
TokenMinUserTokenType = 1
|
||||
|
||||
// All tokens go to the parser (unless skip() is called in that rule)
|
||||
// on a particular "channel". The parser tunes to a particular channel
|
||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
TokenEOF = -1
|
||||
|
||||
Token.DEFAULT_CHANNEL = 0
|
||||
// All tokens go to the parser (unless skip() is called in that rule)
|
||||
// on a particular "channel". The parser tunes to a particular channel
|
||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
|
||||
// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
||||
// by parser.
|
||||
TokenDefaultChannel = 0
|
||||
|
||||
Token.HIDDEN_CHANNEL = 1
|
||||
// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
||||
// by parser.
|
||||
|
||||
TokenHiddenChannel = 1
|
||||
)
|
||||
|
||||
// Explicitly set the text for this token. If {code text} is not
|
||||
// {@code nil}, then {@link //getText} will return this value rather than
|
||||
|
@ -46,14 +51,16 @@ Token.HIDDEN_CHANNEL = 1
|
|||
// should be obtained from the input along with the start and stop indexes
|
||||
// of the token.
|
||||
|
||||
Object.defineProperty(Token.prototype, "text", {
|
||||
get : function() {
|
||||
return this._text
|
||||
},
|
||||
set : function(text) {
|
||||
this._text = text
|
||||
}
|
||||
})
|
||||
//
|
||||
//
|
||||
//Object.defineProperty(Token.prototype, "text", {
|
||||
// get : function() {
|
||||
// return this._text
|
||||
// },
|
||||
// set : function(text) {
|
||||
// this._text = text
|
||||
// }
|
||||
//})
|
||||
|
||||
func (this *Token) getTokenSource() {
|
||||
return this.source[0]
|
||||
|
@ -63,29 +70,33 @@ func (this *Token) getInputStream() {
|
|||
return this.source[1]
|
||||
}
|
||||
|
||||
func CommonToken(source, type, channel, start, stop) {
|
||||
Token.call(this)
|
||||
this.source = source != undefined ? source : CommonToken.EMPTY_SOURCE
|
||||
this.type = type != undefined ? type : nil
|
||||
this.channel = channel != undefined ? channel : Token.DEFAULT_CHANNEL
|
||||
this.start = start != undefined ? start : -1
|
||||
this.stop = stop != undefined ? stop : -1
|
||||
this.tokenIndex = -1
|
||||
if (this.source[0] != nil) {
|
||||
this.line = source[0].line
|
||||
this.column = source[0].column
|
||||
} else {
|
||||
this.column = -1
|
||||
}
|
||||
return this
|
||||
type CommonToken struct {
|
||||
Token
|
||||
}
|
||||
|
||||
CommonToken.prototype = Object.create(Token.prototype)
|
||||
CommonToken.prototype.constructor = CommonToken
|
||||
func NewCommonToken(source *InputStream, tokenType int, channel, start int, stop int) *CommonToken {
|
||||
|
||||
t := NewToken()
|
||||
|
||||
t.source = source
|
||||
t.tokenType = -1
|
||||
t.channel = channel
|
||||
t.start = start
|
||||
t.stop = stop
|
||||
t.tokenIndex = -1
|
||||
if (t.source[0] != nil) {
|
||||
t.line = source[0].line
|
||||
t.column = source[0].column
|
||||
} else {
|
||||
t.column = -1
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// An empty {@link Pair} which is used as the default value of
|
||||
// {@link //source} for tokens that do not have a source.
|
||||
CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
||||
|
||||
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
||||
|
||||
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
|
||||
//
|
||||
|
@ -99,13 +110,13 @@ CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
|||
//
|
||||
// @param oldToken The token to copy.
|
||||
//
|
||||
func (this *CommonToken) clone() {
|
||||
var t = NewCommonToken(this.source, this.type, this.channel, this.start,
|
||||
this.stop)
|
||||
t.tokenIndex = this.tokenIndex
|
||||
t.line = this.line
|
||||
t.column = this.column
|
||||
t.text = this.text
|
||||
func (ct *CommonToken) clone() {
|
||||
var t = NewCommonToken(ct.source, ct.tokenType, ct.channel, ct.start,
|
||||
ct.stop)
|
||||
t.tokenIndex = ct.tokenIndex
|
||||
t.line = ct.line
|
||||
t.column = ct.column
|
||||
t.text = ct.text
|
||||
return t
|
||||
}
|
||||
|
||||
|
@ -138,7 +149,7 @@ func (this *CommonToken) toString() {
|
|||
txt = "<no text>"
|
||||
}
|
||||
return "[@" + this.tokenIndex + "," + this.start + ":" + this.stop + "='" +
|
||||
txt + "',<" + this.type + ">" +
|
||||
txt + "',<" + this.tokenType + ">" +
|
||||
(this.channel > 0 ? ",channel=" + this.channel : "") + "," +
|
||||
this.line + ":" + this.column + "]"
|
||||
}
|
||||
|
|
|
@ -131,22 +131,22 @@ func (this *ATN) getExpectedTokens( stateNumber, ctx ) {
|
|||
}
|
||||
var s = this.states[stateNumber]
|
||||
var following = this.nextTokens(s)
|
||||
if (!following.contains(Token.EPSILON)) {
|
||||
if (!following.contains(TokenEpsilon)) {
|
||||
return following
|
||||
}
|
||||
var expected = NewIntervalSet()
|
||||
expected.addSet(following)
|
||||
expected.removeOne(Token.EPSILON)
|
||||
while (ctx != nil && ctx.invokingState >= 0 && following.contains(Token.EPSILON)) {
|
||||
expected.removeOne(TokenEpsilon)
|
||||
while (ctx != nil && ctx.invokingState >= 0 && following.contains(TokenEpsilon)) {
|
||||
var invokingState = this.states[ctx.invokingState]
|
||||
var rt = invokingState.transitions[0]
|
||||
following = this.nextTokens(rt.followState)
|
||||
expected.addSet(following)
|
||||
expected.removeOne(Token.EPSILON)
|
||||
expected.removeOne(TokenEpsilon)
|
||||
ctx = ctx.parentCtx
|
||||
}
|
||||
if (following.contains(Token.EPSILON)) {
|
||||
expected.addOne(Token.EOF)
|
||||
if (following.contains(TokenEpsilon)) {
|
||||
expected.addOne(TokenEOF)
|
||||
}
|
||||
return expected
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ func (this *ATNDeserializer) readRules(atn) {
|
|||
if ( atn.grammarType == ATNType.LEXER ) {
|
||||
var tokenType = this.readInt()
|
||||
if (tokenType == 0xFFFF) {
|
||||
tokenType = Token.EOF
|
||||
tokenType = TokenEOF
|
||||
}
|
||||
atn.ruleToTokenType[i] = tokenType
|
||||
}
|
||||
|
@ -588,7 +588,7 @@ ATNDeserializer.prototype.edgeFactory = function(atn, type, src, trg, arg1, arg2
|
|||
case Transition.EPSILON:
|
||||
return NewEpsilonTransition(target)
|
||||
case Transition.RANGE:
|
||||
return arg3 != 0 ? NewRangeTransition(target, Token.EOF, arg2) : NewRangeTransition(target, arg1, arg2)
|
||||
return arg3 != 0 ? NewRangeTransition(target, TokenEOF, arg2) : NewRangeTransition(target, arg1, arg2)
|
||||
case Transition.RULE:
|
||||
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
|
||||
case Transition.PREDICATE:
|
||||
|
@ -596,7 +596,7 @@ ATNDeserializer.prototype.edgeFactory = function(atn, type, src, trg, arg1, arg2
|
|||
case Transition.PRECEDENCE:
|
||||
return NewPrecedencePredicateTransition(target, arg1)
|
||||
case Transition.ATOM:
|
||||
return arg3 != 0 ? NewAtomTransition(target, Token.EOF) : NewAtomTransition(target, arg1)
|
||||
return arg3 != 0 ? NewAtomTransition(target, TokenEOF) : NewAtomTransition(target, arg1)
|
||||
case Transition.ACTION:
|
||||
return NewActionTransition(target, arg1, arg2, arg3 != 0)
|
||||
case Transition.SET:
|
||||
|
|
|
@ -61,7 +61,7 @@ func LexerATNSimulator(recog, atn, decisionToDFA, sharedContextCache) {
|
|||
// The index of the character relative to the beginning of the line
|
||||
// 0..n-1///
|
||||
this.column = 0
|
||||
this.mode = Lexer.DEFAULT_MODE
|
||||
this.mode = LexerDefaultMode
|
||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
||||
// info
|
||||
this.prevAccept = NewSimState()
|
||||
|
@ -110,7 +110,7 @@ func (this *LexerATNSimulator) reset() {
|
|||
this.startIndex = -1
|
||||
this.line = 1
|
||||
this.column = 0
|
||||
this.mode = Lexer.DEFAULT_MODE
|
||||
this.mode = LexerDefaultMode
|
||||
}
|
||||
|
||||
func (this *LexerATNSimulator) matchATN(input) {
|
||||
|
@ -184,12 +184,12 @@ LexerATNSimulator.prototype.execATN = function(input, ds0) {
|
|||
// capturing the accept state so the input index, line, and char
|
||||
// position accurately reflect the state of the interpreter at the
|
||||
// end of the token.
|
||||
if (t != Token.EOF) {
|
||||
if (t != TokenEOF) {
|
||||
this.consume(input)
|
||||
}
|
||||
if (target.isAcceptState) {
|
||||
this.captureSimState(this.prevAccept, input, target)
|
||||
if (t == Token.EOF) {
|
||||
if (t == TokenEOF) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -260,8 +260,8 @@ func (this *LexerATNSimulator) failOrAccept(prevAccept, input, reach, t) {
|
|||
return prevAccept.dfaState.prediction
|
||||
} else {
|
||||
// if no accept and EOF is first char, return EOF
|
||||
if (t == Token.EOF && input.index == this.startIndex) {
|
||||
return Token.EOF
|
||||
if (t == TokenEOF && input.index == this.startIndex) {
|
||||
return TokenEOF
|
||||
}
|
||||
throw NewLexerNoViableAltException(this.recog, input, this.startIndex, reach)
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func (this *LexerATNSimulator) getReachableConfigSet(input, closure,
|
|||
if (lexerActionExecutor != nil) {
|
||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - this.startIndex)
|
||||
}
|
||||
var treatEofAsEpsilon = (t == Token.EOF)
|
||||
var treatEofAsEpsilon = (t == TokenEOF)
|
||||
var config = NewLexerATNConfig({state:target, lexerActionExecutor:lexerActionExecutor}, cfg)
|
||||
if (this.closure(input, config, reach,
|
||||
currentAltReachedAcceptState, true, treatEofAsEpsilon)) {
|
||||
|
@ -463,7 +463,7 @@ func (this *LexerATNSimulator) getEpsilonTarget(input, config, trans,
|
|||
trans.serializationType == Transition.RANGE ||
|
||||
trans.serializationType == Transition.SET) {
|
||||
if (treatEofAsEpsilon) {
|
||||
if (trans.matches(Token.EOF, 0, 0xFFFF)) {
|
||||
if (trans.matches(TokenEOF, 0, 0xFFFF)) {
|
||||
cfg = NewLexerATNConfig( { state:trans.target }, config)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -489,7 +489,7 @@ ParserATNSimulator.prototype.execATN = function(dfa, s0, input, startIndex, oute
|
|||
}
|
||||
previousD = D
|
||||
|
||||
if (t != Token.EOF) {
|
||||
if (t != TokenEOF) {
|
||||
input.consume()
|
||||
t = input.LA(1)
|
||||
}
|
||||
|
@ -656,7 +656,7 @@ ParserATNSimulator.prototype.execATNWithFullContext = function(dfa, D, // how fa
|
|||
// So, keep going.
|
||||
}
|
||||
previous = reach
|
||||
if( t != Token.EOF) {
|
||||
if( t != TokenEOF) {
|
||||
input.consume()
|
||||
t = input.LA(1)
|
||||
}
|
||||
|
@ -728,7 +728,7 @@ func (this *ParserATNSimulator) computeReachSet(closure, t, fullCtx) {
|
|||
console.log("testing " + this.getTokenName(t) + " at " + c)
|
||||
}
|
||||
if (c.state instanceof RuleStopState) {
|
||||
if (fullCtx || t == Token.EOF) {
|
||||
if (fullCtx || t == TokenEOF) {
|
||||
if (skippedStopStates==nil) {
|
||||
skippedStopStates = []
|
||||
}
|
||||
|
@ -763,7 +763,7 @@ func (this *ParserATNSimulator) computeReachSet(closure, t, fullCtx) {
|
|||
// condition is not true when one or more configurations have been
|
||||
// withheld in skippedStopStates, or when the current symbol is EOF.
|
||||
//
|
||||
if (skippedStopStates==nil && t!=Token.EOF) {
|
||||
if (skippedStopStates==nil && t!=TokenEOF) {
|
||||
if (intermediate.items.length==1) {
|
||||
// Don't pursue the closure if there is just one state.
|
||||
// It can only have one alternative just add to result
|
||||
|
@ -782,12 +782,12 @@ func (this *ParserATNSimulator) computeReachSet(closure, t, fullCtx) {
|
|||
if (reach==nil) {
|
||||
reach = NewATNConfigSet(fullCtx)
|
||||
var closureBusy = NewSet()
|
||||
var treatEofAsEpsilon = t == Token.EOF
|
||||
var treatEofAsEpsilon = t == TokenEOF
|
||||
for (var k=0 k<intermediate.items.lengthk++) {
|
||||
this.closure(intermediate.items[k], reach, closureBusy, false, fullCtx, treatEofAsEpsilon)
|
||||
}
|
||||
}
|
||||
if (t == Token.EOF) {
|
||||
if (t == TokenEOF) {
|
||||
// After consuming EOF no additional input is possible, so we are
|
||||
// only interested in configurations which reached the end of the
|
||||
// decision rule (local context) or end of the start rule (full
|
||||
|
@ -859,7 +859,7 @@ func (this *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs, look
|
|||
}
|
||||
if (lookToEndOfRule && config.state.epsilonOnlyTransitions) {
|
||||
var nextTokens = this.atn.nextTokens(config.state)
|
||||
if (nextTokens.contains(Token.EPSILON)) {
|
||||
if (nextTokens.contains(TokenEpsilon)) {
|
||||
var endOfRuleState = this.atn.ruleToStopState[config.state.ruleIndex]
|
||||
result.add(NewATNConfig({state:endOfRuleState}, config), this.mergeCache)
|
||||
}
|
||||
|
@ -1334,7 +1334,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config, t, collectPredicates, i
|
|||
// EOF transitions act like epsilon transitions after the first EOF
|
||||
// transition is traversed
|
||||
if (treatEofAsEpsilon) {
|
||||
if (t.matches(Token.EOF, 0, 1)) {
|
||||
if (t.matches(TokenEOF, 0, 1)) {
|
||||
return NewATNConfig({state: t.target}, config)
|
||||
}
|
||||
}
|
||||
|
@ -1483,7 +1483,7 @@ func (this *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs) {
|
|||
}
|
||||
|
||||
func (this *ParserATNSimulator) getTokenName( t) {
|
||||
if (t==Token.EOF) {
|
||||
if (t==TokenEOF) {
|
||||
return "EOF"
|
||||
}
|
||||
if( this.parser!=nil && this.parser.literalNames!=nil) {
|
||||
|
|
|
@ -222,7 +222,7 @@ func SetTransition(target, set) {
|
|||
this.label = set
|
||||
} else {
|
||||
this.label = NewIntervalSet()
|
||||
this.label.addOne(Token.INVALID_TYPE)
|
||||
this.label.addOne(TokenInvalidType)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
package error
|
||||
|
||||
var Token = require('./../Token').Token
|
||||
var Errors = require('./Errors')
|
||||
var NoViableAltException = Errors.NoViableAltException
|
||||
var InputMismatchException = Errors.InputMismatchException
|
||||
var FailedPredicateException = Errors.FailedPredicateException
|
||||
var ParseCancellationException = Errors.ParseCancellationException
|
||||
var ATNState = require('./../atn/ATNState').ATNState
|
||||
var Interval = require('./../IntervalSet').Interval
|
||||
var IntervalSet = require('./../IntervalSet').IntervalSet
|
||||
//var Token = require('./../Token').Token
|
||||
//var Errors = require('./Errors')
|
||||
//var NoViableAltException = Errors.NoViableAltException
|
||||
//var InputMismatchException = Errors.InputMismatchException
|
||||
//var FailedPredicateException = Errors.FailedPredicateException
|
||||
//var ParseCancellationException = Errors.ParseCancellationException
|
||||
//var ATNState = require('./../atn/ATNState').ATNState
|
||||
//var Interval = require('./../IntervalSet').Interval
|
||||
//var IntervalSet = require('./../IntervalSet').IntervalSet
|
||||
|
||||
type ErrorStrategy struct {
|
||||
|
||||
|
@ -218,7 +218,7 @@ func (this *DefaultErrorStrategy) sync(recognizer) {
|
|||
var s = recognizer._interp.atn.states[recognizer.state]
|
||||
var la = recognizer.getTokenStream().LA(1)
|
||||
// try cheaper subset first might get lucky. seems to shave a wee bit off
|
||||
if (la==Token.EOF || recognizer.atn.nextTokens(s).contains(la)) {
|
||||
if (la==TokenEOF || recognizer.atn.nextTokens(s).contains(la)) {
|
||||
return
|
||||
}
|
||||
// Return but don't end recovery. only do that upon valid token match
|
||||
|
@ -262,7 +262,7 @@ func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer, e) {
|
|||
var tokens = recognizer.getTokenStream()
|
||||
var input
|
||||
if(tokens != nil) {
|
||||
if (e.startToken.type==Token.EOF) {
|
||||
if (e.startToken.type==TokenEOF) {
|
||||
input = "<EOF>"
|
||||
} else {
|
||||
input = tokens.getText(NewInterval(e.startToken, e.offendingToken))
|
||||
|
@ -522,18 +522,18 @@ func (this *DefaultErrorStrategy) getMissingSymbol(recognizer) {
|
|||
var expecting = this.getExpectedTokens(recognizer)
|
||||
var expectedTokenType = expecting.first() // get any element
|
||||
var tokenText
|
||||
if (expectedTokenType==Token.EOF) {
|
||||
if (expectedTokenType==TokenEOF) {
|
||||
tokenText = "<missing EOF>"
|
||||
} else {
|
||||
tokenText = "<missing " + recognizer.literalNames[expectedTokenType] + ">"
|
||||
}
|
||||
var current = currentSymbol
|
||||
var lookback = recognizer.getTokenStream().LT(-1)
|
||||
if (current.type==Token.EOF && lookback != nil) {
|
||||
if (current.type==TokenEOF && lookback != nil) {
|
||||
current = lookback
|
||||
}
|
||||
return recognizer.getTokenFactory().create(current.source,
|
||||
expectedTokenType, tokenText, Token.DEFAULT_CHANNEL,
|
||||
expectedTokenType, tokenText, TokenDefaultChannel,
|
||||
-1, -1, current.line, current.column)
|
||||
}
|
||||
|
||||
|
@ -555,7 +555,7 @@ func (this *DefaultErrorStrategy) getTokenErrorDisplay(t) {
|
|||
}
|
||||
var s = t.text
|
||||
if (s == nil) {
|
||||
if (t.type==Token.EOF) {
|
||||
if (t.type==TokenEOF) {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + t.type + ">"
|
||||
|
@ -675,14 +675,14 @@ func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer) {
|
|||
recoverSet.addSet(follow)
|
||||
ctx = ctx.parentCtx
|
||||
}
|
||||
recoverSet.removeOne(Token.EPSILON)
|
||||
recoverSet.removeOne(TokenEpsilon)
|
||||
return recoverSet
|
||||
}
|
||||
|
||||
// Consume tokens until one matches the given token set.//
|
||||
func (this *DefaultErrorStrategy) consumeUntil(recognizer, set) {
|
||||
var ttype = recognizer.getTokenStream().LA(1)
|
||||
while( ttype != Token.EOF && !set.contains(ttype)) {
|
||||
while( ttype != TokenEOF && !set.contains(ttype)) {
|
||||
recognizer.consume()
|
||||
ttype = recognizer.getTokenStream().LA(1)
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ func (this *TerminalNodeImpl) getText() {
|
|||
}
|
||||
|
||||
func (this *TerminalNodeImpl) toString() {
|
||||
if (this.symbol.type == Token.EOF) {
|
||||
if (this.symbol.type == TokenEOF) {
|
||||
return "<EOF>"
|
||||
} else {
|
||||
return this.symbol.text
|
||||
|
|
|
@ -139,7 +139,7 @@ func New<parser.name>(input) <parser.name> {
|
|||
}
|
||||
|
||||
const(
|
||||
<parser.name>EOF = antlr4.Token.EOF
|
||||
<parser.name>EOF = antlr4.TokenEOF
|
||||
<if(parser.tokens)>
|
||||
<parser.tokens:{k | <parser.name><k> = <parser.tokens.(k)>}; separator="\n", wrap, anchor>
|
||||
<endif>
|
||||
|
@ -845,7 +845,7 @@ func New<lexer.name>(input *antlr4.TokenStream) <lexer.name> {
|
|||
lex.symbolicNames = [...]string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
|
||||
lex.ruleNames = [...]string{ <lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
|
||||
lex.grammarFileName = "<lexer.grammarFileName>"
|
||||
lex.EOF = antlr4.Token.EOF
|
||||
lex.EOF = antlr4.TokenEOF
|
||||
|
||||
return lex
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue