Various type fixes

This commit is contained in:
Peter Boyer 2015-12-16 13:41:53 -05:00
parent ca20ec3cae
commit 350a39cef3
6 changed files with 135 additions and 259 deletions

View File

@ -1,101 +0,0 @@
grammar JSArithmetic;
options { language = JavaScript; }
equation
: expression relop expression
;
expression
: multiplyingExpression ((PLUS|MINUS) multiplyingExpression)*
;
multiplyingExpression
: powExpression ((TIMES|DIV) powExpression)*
;
powExpression
: atom (POW expression)?
;
atom
: scientific
| variable
| LPAREN expression RPAREN
;
scientific
: number (E number)?
;
relop
: EQ | GT | LT
;
number
: MINUS? DIGIT+ (POINT DIGIT+)?
;
variable
: MINUS? LETTER (LETTER | DIGIT)*;
LPAREN
: '('
;
RPAREN
: ')'
;
PLUS
: '+'
;
MINUS
: '-'
;
TIMES
: '*'
;
DIV
: '/'
;
GT
: '>'
;
LT
: '<'
;
EQ
: '='
;
POINT
: '.'
;
E
: 'e'
| 'E'
;
POW
: '^'
;
LETTER
: ('a'..'z') | ('A'..'Z')
;
DIGIT
: ('0'..'9')
;
WS
: [ \r\n\t]+ -> channel(HIDDEN)
;

View File

@ -69,7 +69,7 @@ func (is *InputStream) LT(offset int) {
} }
// mark/release do nothing we have entire buffer // mark/release do nothing we have entire buffer
func (is *InputStream) mark() { func (is *InputStream) mark() int {
return -1 return -1
} }

View File

@ -4,29 +4,12 @@ import (
"antlr4/atn" "antlr4/atn"
) )
//var Set = require('./Utils').Set
//var BitSet = require('./Utils').BitSet
//var Token = require('./Token').Token
//var ATNConfig = require('./atn/ATNConfig').ATNConfig
//var Interval = require('./IntervalSet').Interval
//var IntervalSet = require('./IntervalSet').IntervalSet
//var RuleStopState = require('./atn/ATNState').RuleStopState
//var RuleTransition = require('./atn/Transition').RuleTransition
//var NotSetTransition = require('./atn/Transition').NotSetTransition
//var WildcardTransition = require('./atn/Transition').WildcardTransition
//var AbstractPredicateTransition = require('./atn/Transition').AbstractPredicateTransition
//
//var pc = require('./PredictionContext')
//var predictionContextFromRuleContext = pc.predictionContextFromRuleContext
//var PredictionContext = pc.PredictionContext
//var SingletonPredictionContext = pc.SingletonPredictionContext
type LL1Analyzer struct { type LL1Analyzer struct {
atn atn.ATN atn *atn.ATN
} }
func NewLL1Analyzer (atn) *LL1Analyzer { func NewLL1Analyzer (atn *atn.ATN) *LL1Analyzer {
la = new(LL1Analyzer) la := new(LL1Analyzer)
la.atn = atn la.atn = atn
return la return la
} }
@ -34,7 +17,9 @@ func NewLL1Analyzer (atn) *LL1Analyzer {
//* Special value added to the lookahead sets to indicate that we hit //* Special value added to the lookahead sets to indicate that we hit
// a predicate during analysis if {@code seeThruPreds==false}. // a predicate during analysis if {@code seeThruPreds==false}.
/// ///
LL1Analyzer.HIT_PRED = TokenInvalidType const (
LL1AnalyzerHIT_PRED = TokenInvalidType
)
//* //*
// Calculates the SLL(1) expected lookahead set for each outgoing transition // Calculates the SLL(1) expected lookahead set for each outgoing transition
@ -50,14 +35,13 @@ func (la *LL1Analyzer) getDecisionLookahead(s) {
if (s == nil) { if (s == nil) {
return nil return nil
} }
var count = s.transitions.length var count = len(s.transitions)
var look = [] var look = []
for(var alt=0 alt< count alt++) { for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet() look[alt] = NewIntervalSet()
var lookBusy = NewSet() var lookBusy = NewSet()
var seeThruPreds = false // fail to get lookahead upon pred var seeThruPreds = false // fail to get lookahead upon pred
la._LOOK(s.transition(alt).target, nil, PredictionContext.EMPTY, la._LOOK(s.transition(alt).target, nil, PredictionContext.EMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing // Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds // or we had a predicate when we !seeThruPreds
if (look[alt].length==0 || look[alt].contains(LL1Analyzer.HIT_PRED)) { if (look[alt].length==0 || look[alt].contains(LL1Analyzer.HIT_PRED)) {
@ -85,11 +69,13 @@ func (la *LL1Analyzer) getDecisionLookahead(s) {
// @return The set of tokens that can follow {@code s} in the ATN in the // @return The set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}. // specified {@code ctx}.
/// ///
func (la *LL1Analyzer) LOOK(s, stopState, ctx) { func (la *LL1Analyzer) LOOK(s, stopState int, ctx *RuleContext) *IntervalSet {
var r = NewIntervalSet() var r = NewIntervalSet()
var seeThruPreds = true // ignore preds get all lookahead var seeThruPreds = true // ignore preds get all lookahead
ctx = ctx || nil var lookContext *RuleContext
var lookContext = ctx!=nil ? predictionContextFromRuleContext(s.atn, ctx) : nil if (ctx != nil){
predictionContextFromRuleContext(s.atn, ctx)
}
la._LOOK(s, stopState, lookContext, r, NewSet(), NewBitSet(), seeThruPreds, true) la._LOOK(s, stopState, lookContext, r, NewSet(), NewBitSet(), seeThruPreds, true)
return r return r
} }
@ -124,8 +110,8 @@ func (la *LL1Analyzer) LOOK(s, stopState, ctx) {
// outermost context is reached. This parameter has no effect if {@code ctx} // outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}. // is {@code nil}.
/// ///
func (la *LL1Analyzer) _LOOK(s, stopState , ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) { func (la *LL1Analyzer) _LOOK(s, stopState int, ctx *RuleContext, look *Set, lookBusy, calledRuleStack, seeThruPreds, addEOF) {
var c = NewATNConfig({state:s, alt:0}, ctx) var c = atn.NewATNConfig({state:s, alt:0}, ctx)
if (lookBusy.contains(c)) { if (lookBusy.contains(c)) {
return return
} }
@ -164,7 +150,7 @@ func (la *LL1Analyzer) _LOOK(s, stopState , ctx, look, lookBusy, calledRuleStack
return return
} }
} }
for j :=0; j<s.transitions.length; j++ { for j := 0; j < len(s.transitions); j++ {
var t = s.transitions[j] var t = s.transitions[j]
if (t.constructor == RuleTransition) { if (t.constructor == RuleTransition) {
if (calledRuleStack.contains(t.target.ruleIndex)) { if (calledRuleStack.contains(t.target.ruleIndex)) {

View File

@ -3,6 +3,7 @@ package antlr4
import ( import (
"strings" "strings"
"fmt" "fmt"
"antlr4/atn"
) )
// A lexer is recognizer that draws input symbols from a character stream. // A lexer is recognizer that draws input symbols from a character stream.
@ -22,13 +23,14 @@ type TokenFactorySourcePair struct {
inputStream InputStream inputStream InputStream
} }
type Lexer struct { type Lexer struct {
Recognizer Recognizer
_input *InputStream _input *InputStream
_factory *TokenFactory _factory *TokenFactory
_tokenFactorySourcePair TokenFactorySourcePair _tokenFactorySourcePair TokenFactorySourcePair
_interp *Parser _interp *atn.LexerATNSimulator
_token int _token int
_tokenStartCharIndex int _tokenStartCharIndex int
_tokenStartLine int _tokenStartLine int
@ -36,7 +38,7 @@ type Lexer struct {
_hitEOF int _hitEOF int
_channel int _channel int
_type int _type int
_modeStack [] _modeStack IntStack
_mode int _mode int
_text string _text string
} }
@ -81,7 +83,7 @@ func NewLexer(input *InputStream) *Lexer {
// The token type for the current token/// // The token type for the current token///
lexer._type = TokenInvalidType lexer._type = TokenInvalidType
lexer._modeStack = [] lexer._modeStack = make([]int,0)
lexer._mode = LexerDefaultMode lexer._mode = LexerDefaultMode
// You can set the text for the current token to override what is in // You can set the text for the current token to override what is in
@ -126,20 +128,20 @@ func (l *Lexer) reset() {
l._hitEOF = false l._hitEOF = false
l._mode = LexerDefaultMode l._mode = LexerDefaultMode
l._modeStack = [] l._modeStack = make([]int, 0)
l._interp.reset() l._interp.reset()
} }
// Return a token from l source i.e., match a token on the char stream. // Return a token from l source i.e., match a token on the char stream.
func (l *Lexer) nextToken() { func (l *Lexer) nextToken() Token {
if (l._input == nil) { if (l._input == nil) {
panic("nextToken requires a non-nil input stream.") panic("nextToken requires a non-nil input stream.")
} }
// Mark start location in char stream so unbuffered streams are // Mark start location in char stream so unbuffered streams are
// guaranteed at least have text of current token // guaranteed at least have text of current token
var tokenStartMarker = l._input.mark() // var tokenStartMarker = l._input.mark()
try { try {
for (true) { for (true) {
if (l._hitEOF) { if (l._hitEOF) {
@ -205,40 +207,41 @@ func (l *Lexer) more() {
l._type = LexerMore l._type = LexerMore
} }
func (l *Lexer) mode(m) { func (l *Lexer) mode(m int) {
l._mode = m l._mode = m
} }
func (l *Lexer) pushMode(m) { func (l *Lexer) pushMode(m int) {
if (l._interp.debug) { if (l._interp.debug) {
fmt.Println("pushMode " + m) fmt.Println("pushMode " + m)
} }
l._modeStack.push(l._mode) l._modeStack.Push(l._mode)
l.mode(m) l.mode(m)
} }
func (l *Lexer) popMode() { func (l *Lexer) popMode() {
if (l._modeStack.length == 0) { if ( len(l._modeStack) == 0) {
panic("Empty Stack") panic("Empty Stack")
} }
if (l._interp.debug) { if (l._interp.debug) {
fmt.Println("popMode back to " + l._modeStack.slice(0, -1)) fmt.Println("popMode back to " + l._modeStack.slice(0, -1))
} }
l.mode(l._modeStack.pop()) i, _ := l._modeStack.Pop()
l.mode(i)
return l._mode return l._mode
} }
func (l *Lexer) inputStream() *InputStream { func (l *Lexer) inputStream() *InputStream {
return _l.input return l._input
} }
func (l *Lexer) setInputStream() { func (l *Lexer) setInputStream(input *InputStream) {
l._input = nil l._input = nil
l._tokenFactorySourcePair = [ l, l._input ] l._tokenFactorySourcePair = TokenFactorySourcePair{l, l._input}
l.reset() l.reset()
l._input = input l._input = input
l._tokenFactorySourcePair = [ l, l._input ] l._tokenFactorySourcePair = TokenFactorySourcePair{l, l._input}
} }
@ -251,7 +254,7 @@ func (l *Lexer) sourceName() string {
// and getToken (to push tokens into a list and pull from that list // and getToken (to push tokens into a list and pull from that list
// rather than a single variable as l implementation does). // rather than a single variable as l implementation does).
// / // /
func (l *Lexer) emitToken(token) { func (l *Lexer) emitToken(token int) {
l._token = token l._token = token
} }
@ -262,19 +265,15 @@ func (l *Lexer) emitToken(token) {
// custom Token objects or provide a Newfactory. // custom Token objects or provide a Newfactory.
// / // /
func (l *Lexer) emit() { func (l *Lexer) emit() {
var t = l._factory.create(l._tokenFactorySourcePair, l._type, var t = l._factory.create(l._tokenFactorySourcePair, l._type, l._text, l._channel, l._tokenStartCharIndex, l.getCharIndex() - 1, l._tokenStartLine, l._tokenStartColumn)
l._text, l._channel, l._tokenStartCharIndex, l.getCharIndex() - 1, l._tokenStartLine,
l._tokenStartColumn)
l.emitToken(t) l.emitToken(t)
return t return t
} }
func (l *Lexer) emitEOF() { func (l *Lexer) emitEOF() int {
var cpos = l.column var cpos = l.column()
var lpos = l.line var lpos = l.line()
var eof = l._factory.create(l._tokenFactorySourcePair, TokenEOF, var eof = l._factory.create(l._tokenFactorySourcePair, TokenEOF, nil, TokenDefaultChannel, l._input.index, l._input.index - 1, lpos, cpos)
nil, TokenDefaultChannel, l._input.index,
l._input.index - 1, lpos, cpos)
l.emitToken(eof) l.emitToken(eof)
return eof return eof
} }
@ -288,25 +287,6 @@ Object.defineProperty(Lexer.prototype, "type", {
} }
}) })
Object.defineProperty(Lexer.prototype, "line", {
get : function() {
return l._interp.line
},
set : function(line) {
l._interp.line = line
}
})
Object.defineProperty(Lexer.prototype, "column", {
get : function() {
return l._interp.column
},
set : function(column) {
l._interp.column = column
}
})
// What is the index of the current character of lookahead?/// // What is the index of the current character of lookahead?///
func (l *Lexer) getCharIndex() { func (l *Lexer) getCharIndex() {
return l._input.index return l._input.index
@ -314,44 +294,44 @@ func (l *Lexer) getCharIndex() {
// Return the text matched so far for the current token or any text override. // Return the text matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text. //Set the complete text of l token it wipes any previous changes to the text.
Object.defineProperty(Lexer.prototype, "text", { //Object.defineProperty(Lexer.prototype, "text", {
get : function() { // get : function() {
if (l._text != nil) { // if (l._text != nil) {
return l._text // return l._text
} else { // } else {
return l._interp.getText(l._input) // return l._interp.getText(l._input)
} // }
}, // },
set : function(text) { // set : function(text) {
l._text = text // l._text = text
} // }
}) //})
// Return a list of all Token objects in input char stream. // Return a list of all Token objects in input char stream.
// Forces load of all tokens. Does not include EOF token. // Forces load of all tokens. Does not include EOF token.
// / // /
func (l *Lexer) getAllTokens() { func (l *Lexer) getAllTokens() []Token {
var tokens = [] var tokens = make([]Token, 0)
var t = l.nextToken() var t = l.nextToken()
for (t.type != TokenEOF) { for (t.tokenType != TokenEOF) {
tokens.push(t) tokens = append(tokens, t)
t = l.nextToken() t = l.nextToken()
} }
return tokens return tokens
} }
func (l *Lexer) notifyListeners(e) { func (l *Lexer) notifyListeners(e error) {
var start = l._tokenStartCharIndex var start = l._tokenStartCharIndex
var stop = l._input.index var stop = l._input.index
var text = l._input.getText(start, stop) var text = l._input.getText(start, stop)
var msg = "token recognition error at: '" + l.getErrorDisplay(text) + "'" var msg = "token recognition error at: '" + l.getErrorDisplay(text) + "'"
var listener = l.getErrorListenerDispatch() var listener = l.getErrorListenerDispatch()
listener.syntaxError(l, nil, l._tokenStartLine, listener.syntaxError(l, nil, l._tokenStartLine, l._tokenStartColumn, msg, e)
l._tokenStartColumn, msg, e)
} }
func (l *Lexer) getErrorDisplay(s) { func (l *Lexer) getErrorDisplay(s []string) string {
var d = make([]string,s.length) var d = make([]string,len(s))
for i := 0; i < s.length; i++ { for i := 0; i < len(s); i++ {
d[i] = s[i] d[i] = s[i]
} }
return strings.Join(d, "") return strings.Join(d, "")
@ -380,9 +360,9 @@ func (l *Lexer) getCharErrorDisplay(c) string {
// it all works out. You can instead use the rule invocation stack // it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule. // to do sophisticated error recovery if you are in a fragment rule.
// / // /
func (l *Lexer) recover(re) { func (l *Lexer) recover(re error) {
if (l._input.LA(1) != TokenEOF) { if (l._input.LA(1) != TokenEOF) {
if re, ok := re.(LexerNoViableAltException); ok { if _, ok := re.(error.LexerNoViableAltException); ok {
// skip a char and try again // skip a char and try again
l._interp.consume(l._input) l._interp.consume(l._input)
} else { } else {

View File

@ -2,6 +2,7 @@ package antlr4
import ( import (
"fmt" "fmt"
"antlr4/atn"
"antlr4/tree" "antlr4/tree"
"antlr4/error" "antlr4/error"
) )
@ -34,8 +35,15 @@ func (this *TraceListener) exitEveryRule(ctx) {
type Parser struct { type Parser struct {
Recognizer Recognizer
_input *TokenStream _input *TokenSource
_errHandler *error.ErrorStrategy _errHandler *error.ErrorStrategy
_precedenceStack IntStack
_ctx RuleContext
buildParseTrees bool
_tracer bool
_parseListeners []tree.ParseTreeListener
_syntaxErrors int
} }
// p.is all the parsing support code essentially most of it is error // p.is all the parsing support code essentially most of it is error
@ -49,8 +57,8 @@ func Parser(input *TokenStream) *Parser {
// The error handling strategy for the parser. The default value is a new // The error handling strategy for the parser. The default value is a new
// instance of {@link DefaultErrorStrategy}. // instance of {@link DefaultErrorStrategy}.
p._errHandler = error.NewDefaultErrorStrategy() p._errHandler = error.NewDefaultErrorStrategy()
p._precedenceStack = [] p._precedenceStack = make([]int, 0)
p._precedenceStack.push(0) p._precedenceStack.Push(0)
// The {@link ParserRuleContext} object for the currently executing rule. // The {@link ParserRuleContext} object for the currently executing rule.
// p.is always non-nil during the parsing process. // p.is always non-nil during the parsing process.
p._ctx = nil p._ctx = nil
@ -70,7 +78,7 @@ func Parser(input *TokenStream) *Parser {
// incremented each time {@link //notifyErrorListeners} is called. // incremented each time {@link //notifyErrorListeners} is called.
p._syntaxErrors = 0 p._syntaxErrors = 0
p.setInputStream(input) p.setInputStream(input)
return this return p
} }
//Parser.prototype = Object.create(Recognizer.prototype) //Parser.prototype = Object.create(Recognizer.prototype)
@ -89,12 +97,12 @@ func (p *Parser) reset() {
if (p._input != nil) { if (p._input != nil) {
p._input.seek(0) p._input.seek(0)
} }
p._errHandler.reset(p. p._errHandler.reset()
p._ctx = nil p._ctx = nil
p._syntaxErrors = 0 p._syntaxErrors = 0
p.setTrace(false) p.setTrace(false)
p._precedenceStack = [] p._precedenceStack = make([]int, 0)
p._precedenceStack.push(0) p._precedenceStack.Push(0)
if (p._interp != nil) { if (p._interp != nil) {
p._interp.reset() p._interp.reset()
} }
@ -117,9 +125,9 @@ func (p *Parser) reset() {
// {@code ttype} and the error strategy could not recover from the // {@code ttype} and the error strategy could not recover from the
// mismatched symbol // mismatched symbol
func (p.*Parser) match(ttype) { func (p *Parser) match(ttype) {
var t = p.getCurrentToken() var t = p.getCurrentToken()
if (t.type == ttype) { if (t.tokenType == ttype) {
p._errHandler.reportMatch(p. p._errHandler.reportMatch(p.
p.consume() p.consume()
} else { } else {
@ -149,9 +157,9 @@ func (p.*Parser) match(ttype) {
// a wildcard and the error strategy could not recover from the mismatched // a wildcard and the error strategy could not recover from the mismatched
// symbol // symbol
func (p.*Parser) matchWildcard() { func (p *Parser) matchWildcard() {
var t = p.getCurrentToken() var t = p.getCurrentToken()
if (t.type > 0) { if (t.tokenType > 0) {
p._errHandler.reportMatch(p. p._errHandler.reportMatch(p.
p.consume() p.consume()
} else { } else {
@ -166,7 +174,7 @@ func (p.*Parser) matchWildcard() {
return t return t
} }
func (p.*Parser) getParseListeners() { func (p *Parser) getParseListeners() {
return p._parseListeners || [] return p._parseListeners || []
} }
@ -198,9 +206,9 @@ func (p.*Parser) getParseListeners() {
// //
// @panics nilPointerException if {@code} listener is {@code nil} // @panics nilPointerException if {@code} listener is {@code nil}
// //
func (p.*Parser) addParseListener(listener) { func (p *Parser) addParseListener(listener *tree.ParseTreeListener) {
if (listener == nil) { if (listener == nil) {
panic "listener" panic("listener")
} }
if (p._parseListeners == nil) { if (p._parseListeners == nil) {
p._parseListeners = [] p._parseListeners = []
@ -215,31 +223,31 @@ func (p.*Parser) addParseListener(listener) {
// listener, p.method does nothing.</p> // listener, p.method does nothing.</p>
// @param listener the listener to remove // @param listener the listener to remove
// //
func (p.*Parser) removeParseListener(listener) { func (p *Parser) removeParseListener(listener *tree.ParseTreeListener) {
if (p._parseListeners != nil) { if (p._parseListeners != nil) {
var idx = p._parseListeners.indexOf(listener) var idx = p._parseListeners.indexOf(listener)
if (idx >= 0) { if (idx >= 0) {
p._parseListeners.splice(idx, 1) p._parseListeners.splice(idx, 1)
} }
if (p._parseListeners.length == 0) { if (len(p._parseListeners) == 0) {
p._parseListeners = nil p._parseListeners = nil
} }
} }
} }
// Remove all parse listeners. // Remove all parse listeners.
func (p.*Parser) removeParseListeners() { func (p *Parser) removeParseListeners() {
p._parseListeners = nil p._parseListeners = nil
} }
// Notify any parse listeners of an enter rule event. // Notify any parse listeners of an enter rule event.
func (p.*Parser) triggerEnterRuleEvent() { func (p *Parser) triggerEnterRuleEvent() {
if (p._parseListeners != nil) { if (p._parseListeners != nil) {
var ctx = p._ctx var ctx = p._ctx
p._parseListeners.map(function(listener) { for _,listener := range p._parseListeners {
listener.enterEveryRule(ctx) listener.enterEveryRule(ctx)
ctx.enterRule(listener) ctx.enterRule(listener)
}) }
} }
} }
@ -248,23 +256,26 @@ func (p.*Parser) triggerEnterRuleEvent() {
// //
// @see //addParseListener // @see //addParseListener
// //
func (p.*Parser) triggerExitRuleEvent() { func (p *Parser) triggerExitRuleEvent() {
if (p._parseListeners != nil) { if (p._parseListeners != nil) {
// reverse order walk of listeners // reverse order walk of listeners
var ctx = p._ctx ctx := p._ctx
p._parseListeners.slice(0).reverse().map(function(listener) { l := len(p._parseListeners) - 1
for i := range p._parseListeners {
listener := p._parseListeners[l-i]
ctx.exitRule(listener) ctx.exitRule(listener)
listener.exitEveryRule(ctx) listener.exitEveryRule(ctx)
}) }
} }
} }
func (p.*Parser) getTokenFactory() { func (p *Parser) getTokenFactory() {
return p._input.tokenSource._factory return p._input.tokenSource._factory
} }
// Tell our token source and error strategy about a Newway to create tokens.// // Tell our token source and error strategy about a Newway to create tokens.//
func (p.*Parser) setTokenFactory(factory) { func (p *Parser) setTokenFactory(factory) {
p._input.tokenSource._factory = factory p._input.tokenSource._factory = factory
} }
@ -274,7 +285,7 @@ func (p.*Parser) setTokenFactory(factory) {
// @panics UnsupportedOperationException if the current parser does not // @panics UnsupportedOperationException if the current parser does not
// implement the {@link //getSerializedATN()} method. // implement the {@link //getSerializedATN()} method.
// //
func (p.*Parser) getATNWithBypassAlts() { func (p *Parser) getATNWithBypassAlts() {
var serializedAtn = p.getSerializedATN() var serializedAtn = p.getSerializedATN()
if (serializedAtn == nil) { if (serializedAtn == nil) {
panic "The current parser does not support an ATN with bypass alternatives." panic "The current parser does not support an ATN with bypass alternatives."
@ -303,7 +314,7 @@ func (p.*Parser) getATNWithBypassAlts() {
//var Lexer = require('./Lexer').Lexer //var Lexer = require('./Lexer').Lexer
func (p.*Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer) { func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer) {
lexer = lexer || nil lexer = lexer || nil
if (lexer == nil) { if (lexer == nil) {
if (p.getTokenStream() != nil) { if (p.getTokenStream() != nil) {
@ -320,20 +331,20 @@ func (p.*Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer) {
return m.compile(pattern, patternRuleIndex) return m.compile(pattern, patternRuleIndex)
} }
func (p.*Parser) getInputStream() { func (p *Parser) getInputStream() {
return p.getTokenStream() return p.getTokenStream()
} }
func (p.*Parser) setInputStream(input) { func (p *Parser) setInputStream(input) {
p.setTokenStream(input) p.setTokenStream(input)
} }
func (p.*Parser) getTokenStream() { func (p *Parser) getTokenStream() {
return p._input return p._input
} }
// Set the token stream and reset the parser.// // Set the token stream and reset the parser.//
func (p.*Parser) setTokenStream(input) { func (p *Parser) setTokenStream(input) {
p._input = nil p._input = nil
p.reset() p.reset()
p._input = input p._input = input
@ -342,11 +353,11 @@ func (p.*Parser) setTokenStream(input) {
// Match needs to return the current input symbol, which gets put // Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID. // into the label for the associated token ref e.g., x=ID.
// //
func (p.*Parser) getCurrentToken() { func (p *Parser) getCurrentToken() int {
return p._input.LT(1) return p._input.LT(1)
} }
func (p.*Parser) notifyErrorListeners(msg, offendingToken, err) { func (p *Parser) notifyErrorListeners(msg, offendingToken, err) {
offendingToken = offendingToken || nil offendingToken = offendingToken || nil
err = err || nil err = err || nil
if (offendingToken == nil) { if (offendingToken == nil) {
@ -380,7 +391,7 @@ func (p.*Parser) notifyErrorListeners(msg, offendingToken, err) {
// {@link ParseTreeListener//visitErrorNode} is called on any parse // {@link ParseTreeListener//visitErrorNode} is called on any parse
// listeners. // listeners.
// //
func (p.*Parser) consume() { func (p *Parser) consume() {
var o = p.getCurrentToken() var o = p.getCurrentToken()
if (o.type != TokenEOF) { if (o.type != TokenEOF) {
p.getInputStream().consume() p.getInputStream().consume()
@ -403,7 +414,7 @@ func (p.*Parser) consume() {
return o return o
} }
func (p.*Parser) addContextToParseTree() { func (p *Parser) addContextToParseTree() {
// add current context to parent if we have a parent // add current context to parent if we have a parent
if (p._ctx.parentCtx != nil) { if (p._ctx.parentCtx != nil) {
p._ctx.parentCtx.addChild(p._ctx) p._ctx.parentCtx.addChild(p._ctx)
@ -413,7 +424,7 @@ func (p.*Parser) addContextToParseTree() {
// Always called by generated parsers upon entry to a rule. Access field // Always called by generated parsers upon entry to a rule. Access field
// {@link //_ctx} get the current context. // {@link //_ctx} get the current context.
func (p.*Parser) enterRule(localctx, state, ruleIndex) { func (p *Parser) enterRule(localctx, state, ruleIndex) {
p.state = state p.state = state
p._ctx = localctx p._ctx = localctx
p._ctx.start = p._input.LT(1) p._ctx.start = p._input.LT(1)
@ -425,7 +436,7 @@ func (p.*Parser) enterRule(localctx, state, ruleIndex) {
} }
} }
func (p.*Parser) exitRule() { func (p *Parser) exitRule() {
p._ctx.stop = p._input.LT(-1) p._ctx.stop = p._input.LT(-1)
// trigger event on _ctx, before it reverts to parent // trigger event on _ctx, before it reverts to parent
if (p._parseListeners != nil) { if (p._parseListeners != nil) {
@ -435,7 +446,7 @@ func (p.*Parser) exitRule() {
p._ctx = p._ctx.parentCtx p._ctx = p._ctx.parentCtx
} }
func (p.*Parser) enterOuterAlt(localctx, altNum) { func (p *Parser) enterOuterAlt(localctx, altNum) {
// if we have Newlocalctx, make sure we replace existing ctx // if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree // that is previous child of parse tree
if (p.buildParseTrees && p._ctx != localctx) { if (p.buildParseTrees && p._ctx != localctx) {
@ -452,7 +463,7 @@ func (p.*Parser) enterOuterAlt(localctx, altNum) {
// @return The precedence level for the top-most precedence rule, or -1 if // @return The precedence level for the top-most precedence rule, or -1 if
// the parser context is not nested within a precedence rule. // the parser context is not nested within a precedence rule.
func (p.*Parser) getPrecedence() { func (p *Parser) getPrecedence() {
if (p._precedenceStack.length == 0) { if (p._precedenceStack.length == 0) {
return -1 return -1
} else { } else {
@ -460,7 +471,7 @@ func (p.*Parser) getPrecedence() {
} }
} }
func (p.*Parser) enterRecursionRule(localctx, state, ruleIndex, func (p *Parser) enterRecursionRule(localctx, state, ruleIndex,
precedence) { precedence) {
p.state = state p.state = state
p._precedenceStack.push(precedence) p._precedenceStack.push(precedence)
@ -475,7 +486,7 @@ func (p.*Parser) enterRecursionRule(localctx, state, ruleIndex,
// //
// Like {@link //enterRule} but for recursive rules. // Like {@link //enterRule} but for recursive rules.
func (p.*Parser) pushNewRecursionContext(localctx, state, ruleIndex) { func (p *Parser) pushNewRecursionContext(localctx, state, ruleIndex) {
var previous = p._ctx var previous = p._ctx
previous.parentCtx = localctx previous.parentCtx = localctx
previous.invokingState = state previous.invokingState = state
@ -492,7 +503,7 @@ func (p.*Parser) pushNewRecursionContext(localctx, state, ruleIndex) {
} }
} }
func (p.*Parser) unrollRecursionContexts(parentCtx) { func (p *Parser) unrollRecursionContexts(parentCtx) {
p._precedenceStack.pop() p._precedenceStack.pop()
p._ctx.stop = p._input.LT(-1) p._ctx.stop = p._input.LT(-1)
var retCtx = p._ctx // save current ctx (return value) var retCtx = p._ctx // save current ctx (return value)
@ -513,7 +524,7 @@ func (p.*Parser) unrollRecursionContexts(parentCtx) {
} }
} }
func (p.*Parser) getInvokingContext(ruleIndex) { func (p *Parser) getInvokingContext(ruleIndex) {
var ctx = p._ctx var ctx = p._ctx
for (ctx != nil) { for (ctx != nil) {
if (ctx.ruleIndex == ruleIndex) { if (ctx.ruleIndex == ruleIndex) {
@ -524,11 +535,11 @@ func (p.*Parser) getInvokingContext(ruleIndex) {
return nil return nil
} }
func (p.*Parser) precpred(localctx, precedence) { func (p *Parser) precpred(localctx, precedence) {
return precedence >= p._precedenceStack[p._precedenceStack.length-1] return precedence >= p._precedenceStack[p._precedenceStack.length-1]
} }
func (p.*Parser) inContext(context) { func (p *Parser) inContext(context) {
// TODO: useful in parser? // TODO: useful in parser?
return false return false
} }
@ -547,7 +558,7 @@ func (p.*Parser) inContext(context) {
// @return {@code true} if {@code symbol} can follow the current state in // @return {@code true} if {@code symbol} can follow the current state in
// the ATN, otherwise {@code false}. // the ATN, otherwise {@code false}.
func (p.*Parser) isExpectedToken(symbol) { func (p *Parser) isExpectedToken(symbol) {
var atn = p._interp.atn var atn = p._interp.atn
var ctx = p._ctx var ctx = p._ctx
var s = atn.states[p.state] var s = atn.states[p.state]
@ -580,18 +591,18 @@ func (p.*Parser) isExpectedToken(symbol) {
// //
// @see ATN//getExpectedTokens(int, RuleContext) // @see ATN//getExpectedTokens(int, RuleContext)
// //
func (p.*Parser) getExpectedTokens() { func (p *Parser) getExpectedTokens() {
return p._interp.atn.getExpectedTokens(p.state, p._ctx) return p._interp.atn.getExpectedTokens(p.state, p._ctx)
} }
func (p.*Parser) getExpectedTokensWithinCurrentRule() { func (p *Parser) getExpectedTokensWithinCurrentRule() {
var atn = p._interp.atn var atn = p._interp.atn
var s = atn.states[p.state] var s = atn.states[p.state]
return atn.nextTokens(s) return atn.nextTokens(s)
} }
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// // Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p.*Parser) getRuleIndex(ruleName) { func (p *Parser) getRuleIndex(ruleName) {
var ruleIndex = p.getRuleIndexMap()[ruleName] var ruleIndex = p.getRuleIndexMap()[ruleName]
if (ruleIndex != nil) { if (ruleIndex != nil) {
return ruleIndex return ruleIndex
@ -607,7 +618,7 @@ func (p.*Parser) getRuleIndex(ruleName) {
// //
// p.is very useful for error messages. // p.is very useful for error messages.
// //
func (p.*Parser) getRuleInvocationStack(p) { func (p *Parser) getRuleInvocationStack(p) {
p = p || nil p = p || nil
if (p == nil) { if (p == nil) {
p = p._ctx p = p._ctx
@ -627,11 +638,11 @@ func (p.*Parser) getRuleInvocationStack(p) {
} }
// For debugging and other purposes.// // For debugging and other purposes.//
func (p.*Parser) getDFAStrings() { func (p *Parser) getDFAStrings() {
return p._interp.decisionToDFA.toString() return p._interp.decisionToDFA.toString()
} }
// For debugging and other purposes.// // For debugging and other purposes.//
func (p.*Parser) dumpDFA() { func (p *Parser) dumpDFA() {
var seenOne = false var seenOne = false
for i := 0 i < p._interp.decisionToDFA.length i++) { for i := 0 i < p._interp.decisionToDFA.length i++) {
var dfa = p._interp.decisionToDFA[i] var dfa = p._interp.decisionToDFA[i]
@ -653,14 +664,14 @@ func (p.*Parser) dumpDFA() {
" }\r\n" + " }\r\n" +
*/ */
func (p.*Parser) getSourceName() { func (p *Parser) getSourceName() {
return p._input.sourceName return p._input.sourceName
} }
// During a parse is sometimes useful to listen in on the rule entry and exit // During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token matches. p.is for quick and dirty debugging. // events as well as token matches. p.is for quick and dirty debugging.
// //
func (p.*Parser) setTrace(trace) { func (p *Parser) setTrace(trace) {
if (!trace) { if (!trace) {
p.removeParseListener(p._tracer) p.removeParseListener(p._tracer)
p._tracer = nil p._tracer = nil

View File

@ -111,7 +111,7 @@ func New<parser.name>(input) <parser.name> {
// TODO could be package level variable // TODO could be package level variable
var deserializer = atn.ATNDeserializer() var deserializer = atn.NewATNDeserializer()
var deserializedAtn = deserializer.deserialize(serializedATN) var deserializedAtn = deserializer.deserialize(serializedATN)
var decisionToDFA = make([]dfa.DFA,len(deserializedAtn.decisionToState)) var decisionToDFA = make([]dfa.DFA,len(deserializedAtn.decisionToState))
@ -826,7 +826,7 @@ func New<lexer.name>(input *antlr4.TokenStream) <lexer.name> {
// TODO could be package level variables // TODO could be package level variables
var deserializer = atn.ATNDeserializer() var deserializer = atn.NewATNDeserializer()
var deserializedAtn = deserializer.deserialize(serializedATN) var deserializedAtn = deserializer.deserialize(serializedATN)
var decisionToDFA = make([]dfa.DFA,len(deserializedAtn.decisionToState)) var decisionToDFA = make([]dfa.DFA,len(deserializedAtn.decisionToState))
@ -839,7 +839,7 @@ func New<lexer.name>(input *antlr4.TokenStream) <lexer.name> {
antlr4.InitLexer(lex, input); antlr4.InitLexer(lex, input);
lex._interp = atn.NewLexerATNSimulator(lex, atn, decisionsToDFA, antlr4.NewPredictionContextCache()) lex._interp = atn.NewLexerATNSimulator(lex, atn, decisionToDFA, antlr4.NewPredictionContextCache())
lex.modeNames = [...]string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> } lex.modeNames = [...]string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
lex.literalNames = [...]string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> } lex.literalNames = [...]string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
lex.symbolicNames = [...]string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> } lex.symbolicNames = [...]string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }