More work up to LL1Analyzer

This commit is contained in:
Peter Boyer 2015-12-15 15:50:44 -05:00
parent 7d07d7514e
commit d3d82b920a
14 changed files with 223 additions and 193 deletions

View File

@ -10,7 +10,7 @@
// {@link Token//HIDDEN_CHANNEL}, use a filtering token stream such a
// {@link CommonTokenStream}.</p>
package antlr
package antlr4
type TokenStream interface {
@ -98,7 +98,7 @@ func (bt *BufferedTokenStream) consume() {
skipEofCheck = bt.index < len(bt.tokens) - 1
} else {
// no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = bt.index < bt.tokens.length
skipEofCheck = bt.index < len(bt.tokens)
}
} else {
// not yet initialized
@ -119,7 +119,7 @@ func (bt *BufferedTokenStream) consume() {
// @see //get(int i)
// /
func (bt *BufferedTokenStream) sync(i) {
var n = i - bt.tokens.length + 1 // how many more elements we need?
var n = i - len(bt.tokens) + 1 // how many more elements we need?
if (n > 0) {
var fetched = bt.fetch(n)
return fetched >= n
@ -131,13 +131,14 @@ func (bt *BufferedTokenStream) sync(i) {
//
// @return The actual number of elements added to the buffer.
// /
func (bt *BufferedTokenStream) fetch(n) {
func (bt *BufferedTokenStream) fetch(n int) int {
if (bt.fetchedEOF) {
return 0
}
for i := 0; i < n; i++ {
var t = bt.tokenSource.nextToken()
t.tokenIndex = bt.tokens.length
t.tokenIndex = len(bt.tokens)
bt.tokens.push(t)
if (t.type == Token.EOF) {
bt.fetchedEOF = true
@ -157,8 +158,8 @@ func (bt *BufferedTokenStream) getTokens(start, stop, types) {
}
bt.lazyInit()
var subset = []
if (stop >= bt.tokens.length) {
stop = bt.tokens.length - 1
if (stop >= len(bt.tokens)) {
stop = len(bt.tokens) - 1
}
for i := start; i < stop; i++ {
var t = bt.tokens[i]
@ -193,9 +194,9 @@ func (bt *BufferedTokenStream) LT(k) {
}
var i = bt.index + k - 1
bt.sync(i)
if (i >= bt.tokens.length) { // return EOF token
if (i >= len(bt.tokens)) { // return EOF token
// EOF must be last token
return bt.tokens[bt.tokens.length - 1]
return bt.tokens[len(bt.tokens) - 1]
}
return bt.tokens[i]
}
@ -242,7 +243,7 @@ func (bt *BufferedTokenStream) setTokenSource(tokenSource) {
// /
func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel) {
bt.sync(i)
if (i >= bt.tokens.length) {
if (i >= len(bt.tokens)) {
return -1
}
var token = bt.tokens[i]
@ -276,14 +277,14 @@ func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex,
channel = -1
}
bt.lazyInit()
if (bt.tokenIndex < 0 || tokenIndex >= bt.tokens.length) {
panic( "" + tokenIndex + " not in 0.." + bt.tokens.length - 1
if (bt.tokenIndex < 0 || tokenIndex >= len(bt.tokens)) {
panic( "" + tokenIndex + " not in 0.." + len(bt.tokens) - 1
}
var nextOnChannel = bt.nextTokenOnChannel(tokenIndex + 1,
Lexer.DEFAULT_TOKEN_CHANNEL)
var from_ = tokenIndex + 1
// if none onchannel to right, nextOnChannel=-1 so set to = last token
var to = nextOnChannel == -1 ? bt.tokens.length - 1 : nextOnChannel
var to = nextOnChannel == -1 ? len(bt.tokens) - 1 : nextOnChannel
return bt.filterForChannel(from_, to, channel)
}
@ -296,8 +297,8 @@ func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex,
channel = -1
}
bt.lazyInit()
if (tokenIndex < 0 || tokenIndex >= bt.tokens.length) {
panic( "" + tokenIndex + " not in 0.." + bt.tokens.length - 1
if (tokenIndex < 0 || tokenIndex >= len(bt.tokens)) {
panic( "" + tokenIndex + " not in 0.." + len(bt.tokens) - 1
}
var prevOnChannel = bt.previousTokenOnChannel(tokenIndex - 1,
Lexer.DEFAULT_TOKEN_CHANNEL)
@ -337,7 +338,7 @@ func (bt *BufferedTokenStream) getText(interval) string {
bt.lazyInit()
bt.fill()
if (interval == undefined || interval == nil) {
interval = new Interval(0, bt.tokens.length - 1)
interval = new Interval(0, len(bt.tokens) - 1)
}
var start = interval.start
if (start instanceof Token) {
@ -350,8 +351,8 @@ func (bt *BufferedTokenStream) getText(interval) string {
if (start == nil || stop == nil || start < 0 || stop < 0) {
return ""
}
if (stop >= bt.tokens.length) {
stop = bt.tokens.length - 1
if (stop >= len(bt.tokens)) {
stop = len(bt.tokens) - 1
}
var s = ""
for i := start; i < stop + 1; i++ {

View File

@ -3,7 +3,7 @@
// {@link CommonToken} objects.
//
package antlr
package antlr4
type TokenFactory interface {
}

View File

@ -23,7 +23,7 @@
// channel.</p>
///
package antlr
package antlr4
type CommonTokenStream struct {
BufferedTokenStream

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
//
// This is an InputStream that is loaded from a file all at once

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
// Vacuum all input from a string and then treat it like a buffer.

View File

@ -1,4 +1,9 @@
package antlr
package antlr4
import (
"strings"
"strconv"
)
type Interval struct {
start int
@ -20,46 +25,54 @@ func (i *Interval) contains(item int) {
func (i *Interval) toString() {
if(i.start==i.stop-1) {
return i.start.toString()
return strconv.Itoa(i.start)
} else {
return i.start.toString() + ".." + (i.stop-1).toString()
return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1)
}
}
Object.defineProperty(Interval.prototype, "length", {
get : function() {
return i.stop - i.start
}
})
func (i *Interval) length() int {
return i.stop - i.start
}
type IntervalSet struct {
i.intervals = null
i.readOnly = false
intervals []Interval
readOnly bool
}
func (i *IntervalSet) first(v) {
if (i.intervals == null || i.intervals.length==0) {
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
i.intervals = nil
i.readOnly = false
return i
}
func (i *IntervalSet) first(v int) int {
if (i.intervals == nil || len(i.intervals)==0) {
return Token.INVALID_TYPE
} else {
return i.intervals[0].start
}
}
func (i *IntervalSet) addOne(v) {
i.addInterval(new Interval(v, v + 1))
func (i *IntervalSet) addOne(v int) {
i.addInterval(NewInterval(v, v + 1))
}
func (i *IntervalSet) addRange(l, h) {
i.addInterval(new Interval(l, h + 1))
func (i *IntervalSet) addRange(l int, h int) {
i.addInterval(NewInterval(l, h + 1))
}
func (i *IntervalSet) addInterval(v) {
if (i.intervals == null) {
i.intervals = []
i.intervals.push(v)
func (i *IntervalSet) addInterval(v Interval) {
if (i.intervals == nil) {
i.intervals = make([]Interval, 0)
append( i.intervals, v )
} else {
// find insert pos
for (var k = 0 k < i.intervals.length k++) {
for k := 0; k < len(i.intervals); k++ {
var i = i.intervals[k]
// distinct range -> insert
if (v.stop < i.start) {
@ -83,19 +96,19 @@ func (i *IntervalSet) addInterval(v) {
}
}
func (i *IntervalSet) addSet(other) {
if (other.intervals != null) {
for (var k = 0 k < other.intervals.length k++) {
func (i *IntervalSet) addSet(other IntervalSet) *IntervalSet {
if (other.intervals != nil) {
for k := 0; k < len(other.intervals); k++ {
var i = other.intervals[k]
i.addInterval(new Interval(i.start, i.stop))
i.addInterval(NewInterval(i.start, i.stop))
}
}
return i
}
func (i *IntervalSet) reduce(k) {
func (i *IntervalSet) reduce(k int) {
// only need to reduce if k is not the last
if (k < i.intervalslength - 1) {
if (k < len(i.intervals) - 1) {
var l = i.intervals[k]
var r = i.intervals[k + 1]
// if r contained in l
@ -103,26 +116,26 @@ func (i *IntervalSet) reduce(k) {
i.intervals.pop(k + 1)
i.reduce(k)
} else if (l.stop >= r.start) {
i.intervals[k] = new Interval(l.start, r.stop)
i.intervals[k] = NewInterval(l.start, r.stop)
i.intervals.pop(k + 1)
}
}
}
func (i *IntervalSet) complement(start, stop) {
var result = new IntervalSet()
result.addInterval(new Interval(start,stop+1))
for(var i=0 i<i.intervals.length i++) {
result.removeRange(i.intervals[i])
func (is *IntervalSet) complement(start int, stop int) *IntervalSet {
var result = NewIntervalSet()
result.addInterval(NewInterval(start,stop+1))
for i := 0; i< len(is.intervals); i++ {
result.removeRange(is.intervals[i])
}
return result
}
func (i *IntervalSet) contains(item) {
if (i.intervals == null) {
func (i *IntervalSet) contains(item Interval) bool {
if (i.intervals == nil) {
return false
} else {
for (var k = 0 k < i.intervals.length k++) {
for k := 0; k < len(i.intervals); k++ {
if(i.intervals[k].contains(item)) {
return true
}
@ -131,20 +144,22 @@ func (i *IntervalSet) contains(item) {
}
}
Object.defineProperty(IntervalSet.prototype, "length", {
get : function() {
var len = 0
i.intervals.map(function(i) {len += i.length})
return len
}
})
func (is *IntervalSet) length() int {
len := 0
func (i *IntervalSet) removeRange(v) {
if(v.start==v.stop-1) {
for _,v := range is.intervals {
len += v.length()
}
return len
}
func (i *IntervalSet) removeRange(v Interval) {
if v.start==v.stop-1 {
i.removeOne(v.start)
} else if (i.intervals!=nil) {
var k = 0
for n :=0 n<i.intervals.length n++) {
k:= 0
for n :=0; n<len( i.intervals ); n++ {
var i = i.intervals[k]
// intervals are ordered
if (v.stop<=i.start) {
@ -175,47 +190,49 @@ func (i *IntervalSet) removeRange(v) {
}
}
func (i *IntervalSet) removeOne(v) {
if (i.intervals != null) {
for (var k = 0 k < i.intervals.length k++) {
var i = i.intervals[k]
// intervals is ordered
if (v < i.start) {
func (is *IntervalSet) removeOne(v *Interval) {
if(v.start==v.stop-1) {
is.removeOne(v.start)
} else if (is.intervals!=nil) {
var k = 0
for n := 0; n < len(is.intervals); n++ {
i := is.intervals[k]
// intervals are ordered
if v.stop<=i.start {
return
}
// check for single value range
else if (v == i.start && v == i.stop - 1) {
i.intervals.splice(k, 1)
// check for including range, split it
else if(v.start>i.start && v.stop<i.stop) {
is.intervals[k] = NewInterval(i.start, v.start)
var x = NewInterval(v.stop, i.stop)
is.intervals.splice(k, 0, x)
return
}
// check for included range, remove it
else if(v.start<=i.start && v.stop>=i.stop) {
is.intervals.splice(k, 1)
k = k - 1; // need another pass
}
// check for lower boundary
else if (v == i.start) {
i.intervals[k] = new Interval(i.start + 1, i.stop)
return
else if(v.start<i.stop) {
is.intervals[k] = NewInterval(i.start, v.start)
}
// check for upper boundary
else if (v == i.stop - 1) {
i.intervals[k] = new Interval(i.start, i.stop - 1)
return
}
// split existing range
else if (v < i.stop - 1) {
var x = new Interval(i.start, v)
i.start = v + 1
i.intervals.splice(k, 0, x)
return
else if(v.stop<i.stop) {
is.intervals[k] = NewInterval(v.stop, i.stop)
}
k += 1
}
}
}
func (i *IntervalSet) toString(literalNames, symbolicNames, elemsAreChar) {
literalNames = literalNames || null
symbolicNames = symbolicNames || null
func (i *IntervalSet) toString(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
literalNames = literalNames || nil
symbolicNames = symbolicNames || nil
elemsAreChar = elemsAreChar || false
if (i.intervals == null) {
if (i.intervals == nil) {
return "{}"
} else if(literalNames!=null || symbolicNames!=null) {
} else if(literalNames!=nil || symbolicNames!=nil) {
return i.toTokenString(literalNames, symbolicNames)
} else if(elemsAreChar) {
return i.toCharString()
@ -224,22 +241,23 @@ func (i *IntervalSet) toString(literalNames, symbolicNames, elemsAreChar) {
}
}
func (i *IntervalSet) toCharString() {
var names = []
for (var i = 0 i < i.intervals.length i++) {
var v = i.intervals[i]
func (is *IntervalSet) toCharString() {
var names = make([]string, len(is.intervals))
for i := 0; i < len( is.intervals ); i++ {
var v = is.intervals[i]
if(v.stop==v.start+1) {
if ( v.start==Token.EOF ) {
names.push("<EOF>")
append(names, "<EOF>")
} else {
names.push("'" + String.fromCharCode(v.start) + "'")
append(names, ("'" + String.fromCharCode(v.start) + "'"))
}
} else {
names.push("'" + String.fromCharCode(v.start) + "'..'" + String.fromCharCode(v.stop-1) + "'")
append(names, "'" + String.fromCharCode(v.start) + "'..'" + String.fromCharCode(v.stop-1) + "'")
}
}
if (names.length > 1) {
return "{" + names.join(", ") + "}"
if (len(names) > 1) {
return "{" + strings.Join(names, ", ") + "}"
} else {
return names[0]
}
@ -248,7 +266,7 @@ func (i *IntervalSet) toCharString() {
func (i *IntervalSet) toIndexString() {
var names = []
for (var i = 0 i < i.intervals.length i++) {
for (var i = 0 i < len( i.intervals ) i++) {
var v = i.intervals[i]
if(v.stop==v.start+1) {
if ( v.start==Token.EOF ) {
@ -261,18 +279,18 @@ func (i *IntervalSet) toIndexString() {
}
}
if (names.length > 1) {
return "{" + names.join(", ") + "}"
return "{" + string.Join(names, ", ") + "}"
} else {
return names[0]
}
}
func (i *IntervalSet) toTokenString(literalNames, symbolicNames) {
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
var names = []
for (var i = 0 i < i.intervals.length i++) {
for i := 0; i < len( i.intervals ); i++ {
var v = i.intervals[i]
for (var j = v.start j < v.stop j++) {
for j := v.start; j < v.stop; j++ {
names.push(i.elementName(literalNames, symbolicNames, j))
}
}
@ -283,7 +301,7 @@ func (i *IntervalSet) toTokenString(literalNames, symbolicNames) {
}
}
func (i *IntervalSet) elementName(literalNames, symbolicNames, a) {
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
if (a == Token.EOF) {
return "<EOF>"
} else if (a == Token.EPSILON) {

View File

@ -1,23 +1,31 @@
package antlr
package antlr4
var Set = require('./Utils').Set
var BitSet = require('./Utils').BitSet
var Token = require('./Token').Token
var ATNConfig = require('./atn/ATNConfig').ATNConfig
var Interval = require('./IntervalSet').Interval
var IntervalSet = require('./IntervalSet').IntervalSet
var RuleStopState = require('./atn/ATNState').RuleStopState
var RuleTransition = require('./atn/Transition').RuleTransition
var NotSetTransition = require('./atn/Transition').NotSetTransition
var WildcardTransition = require('./atn/Transition').WildcardTransition
var AbstractPredicateTransition = require('./atn/Transition').AbstractPredicateTransition
import (
"antlr4/atn"
)
var pc = require('./PredictionContext')
var predictionContextFromRuleContext = pc.predictionContextFromRuleContext
var PredictionContext = pc.PredictionContext
var SingletonPredictionContext = pc.SingletonPredictionContext
//var Set = require('./Utils').Set
//var BitSet = require('./Utils').BitSet
//var Token = require('./Token').Token
//var ATNConfig = require('./atn/ATNConfig').ATNConfig
//var Interval = require('./IntervalSet').Interval
//var IntervalSet = require('./IntervalSet').IntervalSet
//var RuleStopState = require('./atn/ATNState').RuleStopState
//var RuleTransition = require('./atn/Transition').RuleTransition
//var NotSetTransition = require('./atn/Transition').NotSetTransition
//var WildcardTransition = require('./atn/Transition').WildcardTransition
//var AbstractPredicateTransition = require('./atn/Transition').AbstractPredicateTransition
//
//var pc = require('./PredictionContext')
//var predictionContextFromRuleContext = pc.predictionContextFromRuleContext
//var PredictionContext = pc.PredictionContext
//var SingletonPredictionContext = pc.SingletonPredictionContext
func LL1Analyzer (atn) {
type LL1Analyzer struct {
atn atn.ATN
}
func NewLL1Analyzer (atn) *LL1Analyzer {
this.atn = atn
}

View File

@ -1,7 +1,8 @@
package antlr
package antlr4
import (
"strings"
"fmt"
)
// A lexer is recognizer that draws input symbols from a character stream.
@ -14,12 +15,17 @@ type TokenSource interface {
}
type TokenFactorySourcePair struct {
factory TokenFactory
inputStream InputStream
}
type Lexer struct {
Recognizer
_input
_input *InputStream
_factory
_tokenFactorySourcePair
_tokenFactorySourcePair TokenFactorySourcePair
_interp
_token int
_tokenStartCharIndex int
@ -28,29 +34,29 @@ type Lexer struct {
_hitEOF int
_channel int
_type int
lexer._modeStack
lexer._mode int
lexer._text string
_modeStack
_mode int
_text string
}
func NewLexer(input InputStream) {
func NewLexer(input *InputStream) *Lexer {
lexer := new(Lexer)
lexer._input = input
lexer._factory = CommonTokenFactory.DEFAULT
lexer._tokenFactorySourcePair = [ l, input ]
lexer._tokenFactorySourcePair = TokenFactorySourcePair{l, input}
lexer._interp = null // child classes must populate l
lexer._interp = nil // child classes must populate l
// The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to
// create a single token. nextToken will return l object after
// matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be matched or
// something nonnull so that the auto token emit mechanism will not
// something nonnil so that the auto token emit mechanism will not
// emit another token.
lexer._token = null
lexer._token = nil
// What character index in the stream did the current token start at?
// Needed, for example, to get the text for current token. Set at
@ -74,12 +80,12 @@ func NewLexer(input InputStream) {
lexer._type = Token.INVALID_TYPE
lexer._modeStack = []
lexer._mode = Lexer.DEFAULT_MODE
lexer._mode = LexerDEFAULT_MODE
// You can set the text for the current token to override what is in
// the input char buffer. Use setText() or can set l instance var.
// /
lexer._text = null
lexer._text = nil
return l
}
@ -105,19 +111,19 @@ const (
func (l *Lexer) reset() {
// wack Lexer state variables
if (l._input !== null) {
if (l._input != nil) {
l._input.seek(0) // rewind the input
}
l._token = null
l._token = nil
l._type = Token.INVALID_TYPE
l._channel = Token.DEFAULT_CHANNEL
l._tokenStartCharIndex = -1
l._tokenStartColumn = -1
l._tokenStartLine = -1
l._text = null
l._text = nil
l._hitEOF = false
l._mode = Lexer.DEFAULT_MODE
l._mode = LexerDEFAULT_MODE
l._modeStack = []
l._interp.reset()
@ -125,8 +131,8 @@ func (l *Lexer) reset() {
// Return a token from l source i.e., match a token on the char stream.
func (l *Lexer) nextToken() {
if (l._input == null) {
panic("nextToken requires a non-null input stream.")
if (l._input == nil) {
panic("nextToken requires a non-nil input stream.")
}
// Mark start location in char stream so unbuffered streams are
@ -138,16 +144,16 @@ func (l *Lexer) nextToken() {
l.emitEOF()
return l._token
}
l._token = null
l._token = nil
l._channel = Token.DEFAULT_CHANNEL
l._tokenStartCharIndex = l._input.index
l._tokenStartColumn = l._interp.column
l._tokenStartLine = l._interp.line
l._text = null
l._text = nil
var continueOuter = false
for (true) {
l._type = Token.INVALID_TYPE
var ttype = Lexer.SKIP
var ttype = LexerSKIP
try {
ttype = l._interp.match(l._input, l._mode)
} catch (e) {
@ -160,18 +166,18 @@ func (l *Lexer) nextToken() {
if (l._type == Token.INVALID_TYPE) {
l._type = ttype
}
if (l._type == Lexer.SKIP) {
if (l._type == LexerSKIP) {
continueOuter = true
break
}
if (l._type !== Lexer.MORE) {
if (l._type != LexerMORE) {
break
}
}
if (continueOuter) {
continue
}
if (l._token == null) {
if (l._token == nil) {
l.emit()
}
return l._token
@ -186,15 +192,15 @@ func (l *Lexer) nextToken() {
// Instruct the lexer to skip creating a token for current lexer rule
// and look for another token. nextToken() knows to keep looking when
// a lexer rule finishes with token set to SKIP_TOKEN. Recall that
// if token==null at end of any token rule, it creates one for you
// if token==nil at end of any token rule, it creates one for you
// and emits it.
// /
func (l *Lexer) skip() {
l._type = Lexer.SKIP
l._type = LexerSKIP
}
func (l *Lexer) more() {
l._type = Lexer.MORE
l._type = LexerMORE
}
func (l *Lexer) mode(m) {
@ -203,7 +209,7 @@ func (l *Lexer) mode(m) {
func (l *Lexer) pushMode(m) {
if (l._interp.debug) {
console.log("pushMode " + m)
fmt.Println("pushMode " + m)
}
l._modeStack.push(l._mode)
l.mode(m)
@ -211,34 +217,32 @@ func (l *Lexer) pushMode(m) {
func (l *Lexer) popMode() {
if (l._modeStack.length == 0) {
throw "Empty Stack"
panic("Empty Stack")
}
if (l._interp.debug) {
console.log("popMode back to " + l._modeStack.slice(0, -1))
fmt.Println("popMode back to " + l._modeStack.slice(0, -1))
}
l.mode(l._modeStack.pop())
return l._mode
}
// Set the char stream and reset the lexer
Object.defineProperty(Lexer.prototype, "inputStream", {
get : function() {
return l._input
},
set : function(input) {
l._input = null
l._tokenFactorySourcePair = [ l, l._input ]
l.reset()
l._input = input
l._tokenFactorySourcePair = [ l, l._input ]
}
})
Object.defineProperty(Lexer.prototype, "sourceName", {
get : type sourceName struct {
return l._input.sourceName
}
})
func (l *Lexer) inputStream() *InputStream {
return _l.input
}
func (l *Lexer) setInputStream() {
l._input = nil
l._tokenFactorySourcePair = [ l, l._input ]
l.reset()
l._input = input
l._tokenFactorySourcePair = [ l, l._input ]
}
func (l *Lexer) sourceName() string {
return l._input.sourceName
}
// By default does not support multiple emits per nextToken invocation
// for efficiency reasons. Subclass and override l method, nextToken,
@ -257,8 +261,7 @@ func (l *Lexer) emitToken(token) {
// /
func (l *Lexer) emit() {
var t = l._factory.create(l._tokenFactorySourcePair, l._type,
l._text, l._channel, l._tokenStartCharIndex, l
.getCharIndex() - 1, l._tokenStartLine,
l._text, l._channel, l._tokenStartCharIndex, l.getCharIndex() - 1, l._tokenStartLine,
l._tokenStartColumn)
l.emitToken(t)
return t
@ -268,7 +271,7 @@ func (l *Lexer) emitEOF() {
var cpos = l.column
var lpos = l.line
var eof = l._factory.create(l._tokenFactorySourcePair, Token.EOF,
null, Token.DEFAULT_CHANNEL, l._input.index,
nil, Token.DEFAULT_CHANNEL, l._input.index,
l._input.index - 1, lpos, cpos)
l.emitToken(eof)
return eof
@ -311,7 +314,7 @@ func (l *Lexer) getCharIndex() {
//Set the complete text of l token it wipes any previous changes to the text.
Object.defineProperty(Lexer.prototype, "text", {
get : function() {
if (l._text !== null) {
if (l._text != nil) {
return l._text
} else {
return l._interp.getText(l._input)
@ -327,7 +330,7 @@ Object.defineProperty(Lexer.prototype, "text", {
func (l *Lexer) getAllTokens() {
var tokens = []
var t = l.nextToken()
while (t.type !== Token.EOF) {
while (t.type != Token.EOF) {
tokens.push(t)
t = l.nextToken()
}
@ -340,7 +343,7 @@ func (l *Lexer) notifyListeners(e) {
var text = l._input.getText(start, stop)
var msg = "token recognition error at: '" + l.getErrorDisplay(text) + "'"
var listener = l.getErrorListenerDispatch()
listener.syntaxError(l, null, l._tokenStartLine,
listener.syntaxError(l, nil, l._tokenStartLine,
l._tokenStartColumn, msg, e)
}
@ -352,7 +355,7 @@ func (l *Lexer) getErrorDisplay(s) {
return strings.Join(d, "")
}
func (l *Lexer) getErrorDisplayForChar(c) {
func (l *Lexer) getErrorDisplayForChar(c rune) string {
if (c.charCodeAt(0) == Token.EOF) {
return "<EOF>"
} else if (c == '\n') {
@ -366,7 +369,7 @@ func (l *Lexer) getErrorDisplayForChar(c) {
}
}
func (l *Lexer) getCharErrorDisplay(c) {
func (l *Lexer) getCharErrorDisplay(c) string {
return "'" + l.getErrorDisplayForChar(c) + "'"
}
@ -376,8 +379,8 @@ func (l *Lexer) getCharErrorDisplay(c) {
// to do sophisticated error recovery if you are in a fragment rule.
// /
func (l *Lexer) recover(re) {
if (l._input.LA(1) !== Token.EOF) {
if (re instanceof LexerNoViableAltException) {
if (l._input.LA(1) != Token.EOF) {
if (ok, re := re.(LexerNoViableAltException)) {
// skip a char and try again
l._interp.consume(l._input)
} else {

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
var Token = require('./Token').Token
var ParseTreeListener = require('./tree/Tree').ParseTreeListener

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
//* A rule invocation record for parsing.
//

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
var RuleContext = require('./RuleContext').RuleContext

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
var Token = require('./Token').Token
var ConsoleErrorListener = require('./error/ErrorListener').ConsoleErrorListener

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
// A rule context is a record of a single rule invocation. It knows
// which context invoked it, if any. If there is no parent context, then

View File

@ -1,4 +1,4 @@
package antlr
package antlr4
// A token has properties: text, type, line, character position in the line
// (so we can ignore tabs), token channel, index, and source from which