gofmt all files

This commit is contained in:
Peter Boyer 2015-12-22 18:53:17 -06:00
parent 859df5231c
commit bd35305bc8
44 changed files with 3839 additions and 4043 deletions

View File

@ -1,105 +1,106 @@
package antlr4
type ATN struct {
grammarType int
maxTokenType int
states []IATNState
decisionToState []*DecisionState
ruleToStartState []*RuleStartState
ruleToStopState []*RuleStopState
modeNameToStartState map[string]*TokensStartState
modeToStartState []*TokensStartState
ruleToTokenType []int
lexerActions []ILexerAction
DecisionToState []*DecisionState
grammarType int
maxTokenType int
states []IATNState
ruleToStartState []*RuleStartState
ruleToStopState []*RuleStopState
modeNameToStartState map[string]*TokensStartState
modeToStartState []*TokensStartState
ruleToTokenType []int
lexerActions []ILexerAction
}
func NewATN(grammarType int, maxTokenType int) *ATN {
atn := new(ATN)
atn := new(ATN)
// Used for runtime deserialization of ATNs from strings///
// The type of the ATN.
atn.grammarType = grammarType
// The maximum value for any symbol recognized by a transition in the ATN.
atn.maxTokenType = maxTokenType
atn.states = make([]IATNState,0)
// Each subrule/rule is a decision point and we must track them so we
// can go back later and build DFA predictors for them. This includes
// all the rules, subrules, optional blocks, ()+, ()* etc...
atn.decisionToState = make([]*DecisionState, 0)
// Maps from rule index to starting state number.
atn.ruleToStartState = make([]*RuleStartState, 0)
// Maps from rule index to stop state number.
atn.ruleToStopState = nil
atn.modeNameToStartState = make( map[string]*TokensStartState )
// For lexer ATNs, atn.maps the rule index to the resulting token type.
// For parser ATNs, atn.maps the rule index to the generated bypass token
// type if the
// {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions}
// deserialization option was specified otherwise, atn.is {@code nil}.
atn.ruleToTokenType = nil
// For lexer ATNs, atn.is an array of {@link LexerAction} objects which may
// be referenced by action transitions in the ATN.
atn.lexerActions = nil
atn.modeToStartState = make([]*TokensStartState, 0)
// Used for runtime deserialization of ATNs from strings///
// The type of the ATN.
atn.grammarType = grammarType
// The maximum value for any symbol recognized by a transition in the ATN.
atn.maxTokenType = maxTokenType
atn.states = make([]IATNState, 0)
// Each subrule/rule is a decision point and we must track them so we
// can go back later and build DFA predictors for them. This includes
// all the rules, subrules, optional blocks, ()+, ()* etc...
atn.DecisionToState = make([]*DecisionState, 0)
// Maps from rule index to starting state number.
atn.ruleToStartState = make([]*RuleStartState, 0)
// Maps from rule index to stop state number.
atn.ruleToStopState = nil
atn.modeNameToStartState = make(map[string]*TokensStartState)
// For lexer ATNs, atn.maps the rule index to the resulting token type.
// For parser ATNs, atn.maps the rule index to the generated bypass token
// type if the
// {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions}
// deserialization option was specified otherwise, atn.is {@code nil}.
atn.ruleToTokenType = nil
// For lexer ATNs, atn.is an array of {@link LexerAction} objects which may
// be referenced by action transitions in the ATN.
atn.lexerActions = nil
atn.modeToStartState = make([]*TokensStartState, 0)
return atn
return atn
}
// Compute the set of valid tokens that can occur starting in state {@code s}.
// If {@code ctx} is nil, the set of tokens will not include what can follow
// the rule surrounding {@code s}. In other words, the set will be
// restricted to tokens reachable staying within {@code s}'s rule.
func (this *ATN) nextTokensInContext(s IATNState, ctx IRuleContext) *IntervalSet {
var anal = NewLL1Analyzer(this)
return anal.LOOK(s, nil, ctx)
var anal = NewLL1Analyzer(this)
return anal.LOOK(s, nil, ctx)
}
// Compute the set of valid tokens that can occur starting in {@code s} and
// staying in same rule. {@link Token//EPSILON} is in set if we reach end of
// rule.
func (this *ATN) nextTokensNoContext(s IATNState) *IntervalSet {
if (s.getNextTokenWithinRule() != nil ) {
return s.getNextTokenWithinRule()
}
s.setNextTokenWithinRule( this.nextTokensInContext(s, nil) )
s.getNextTokenWithinRule().readOnly = true
return s.getNextTokenWithinRule()
if s.getNextTokenWithinRule() != nil {
return s.getNextTokenWithinRule()
}
s.setNextTokenWithinRule(this.nextTokensInContext(s, nil))
s.getNextTokenWithinRule().readOnly = true
return s.getNextTokenWithinRule()
}
func (this *ATN) nextTokens(s IATNState, ctx IRuleContext) *IntervalSet {
if ( ctx==nil ) {
return this.nextTokensNoContext(s)
} else {
return this.nextTokensInContext(s, ctx)
}
if ctx == nil {
return this.nextTokensNoContext(s)
} else {
return this.nextTokensInContext(s, ctx)
}
}
func (this *ATN) addState( state IATNState ) {
if ( state != nil ) {
state.setATN(this)
state.setStateNumber(len(this.states))
}
this.states = append(this.states, state)
func (this *ATN) addState(state IATNState) {
if state != nil {
state.setATN(this)
state.setStateNumber(len(this.states))
}
this.states = append(this.states, state)
}
func (this *ATN) removeState( state IATNState ) {
this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list
func (this *ATN) removeState(state IATNState) {
this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list
}
func (this *ATN) defineDecisionState( s *DecisionState ) int {
this.decisionToState = append( this.decisionToState, s)
s.decision = len(this.decisionToState)-1
return s.decision
func (this *ATN) defineDecisionState(s *DecisionState) int {
this.DecisionToState = append(this.DecisionToState, s)
s.decision = len(this.DecisionToState) - 1
return s.decision
}
func (this *ATN) getDecisionState( decision int) *DecisionState {
if (len(this.decisionToState)==0) {
return nil
} else {
return this.decisionToState[decision]
}
func (this *ATN) getDecisionState(decision int) *DecisionState {
if len(this.DecisionToState) == 0 {
return nil
} else {
return this.DecisionToState[decision]
}
}
// Computes the set of input symbols which could follow ATN state number
@ -122,31 +123,30 @@ func (this *ATN) getDecisionState( decision int) *DecisionState {
//var Token = require('./../Token').Token
func (this *ATN) getExpectedTokens( stateNumber int, ctx IRuleContext ) *IntervalSet {
if ( stateNumber < 0 || stateNumber >= len(this.states) ) {
panic("Invalid state number.")
}
var s = this.states[stateNumber]
var following = this.nextTokens(s, nil)
if (!following.contains(TokenEpsilon)) {
return following
}
var expected = NewIntervalSet()
expected.addSet(following)
expected.removeOne(TokenEpsilon)
for (ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon)) {
var invokingState = this.states[ctx.getInvokingState()]
var rt = invokingState.getTransitions()[0]
following = this.nextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following)
expected.removeOne(TokenEpsilon)
ctx = ctx.getParent().(IRuleContext)
}
if (following.contains(TokenEpsilon)) {
expected.addOne(TokenEOF)
}
return expected
func (this *ATN) getExpectedTokens(stateNumber int, ctx IRuleContext) *IntervalSet {
if stateNumber < 0 || stateNumber >= len(this.states) {
panic("Invalid state number.")
}
var s = this.states[stateNumber]
var following = this.nextTokens(s, nil)
if !following.contains(TokenEpsilon) {
return following
}
var expected = NewIntervalSet()
expected.addSet(following)
expected.removeOne(TokenEpsilon)
for ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon) {
var invokingState = this.states[ctx.getInvokingState()]
var rt = invokingState.getTransitions()[0]
following = this.nextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following)
expected.removeOne(TokenEpsilon)
ctx = ctx.getParent().(IRuleContext)
}
if following.contains(TokenEpsilon) {
expected.addOne(TokenEOF)
}
return expected
}
var ATNINVALID_ALT_NUMBER = 0

View File

@ -1,8 +1,8 @@
package antlr4
import (
"reflect"
"fmt"
"reflect"
"strconv"
)
@ -33,25 +33,25 @@ type IATNConfig interface {
type ATNConfig struct {
precedenceFilterSuppressed bool
state IATNState
alt int
context IPredictionContext
semanticContext SemanticContext
reachesIntoOuterContext int
state IATNState
alt int
context IPredictionContext
semanticContext SemanticContext
reachesIntoOuterContext int
}
func NewATNConfig7(old *ATNConfig) *ATNConfig { // dup
a := new(ATNConfig)
a.state = old.state;
a.alt = old.alt;
a.context = old.context;
a.semanticContext = old.semanticContext;
a.reachesIntoOuterContext = old.reachesIntoOuterContext;
a.state = old.state
a.alt = old.alt
a.context = old.context
a.semanticContext = old.semanticContext
a.reachesIntoOuterContext = old.reachesIntoOuterContext
return a
}
func NewATNConfig6(state IATNState, alt int, context IPredictionContext) *ATNConfig {
return NewATNConfig5(state, alt, context, SemanticContextNONE);
return NewATNConfig5(state, alt, context, SemanticContextNONE)
}
func NewATNConfig5(state IATNState, alt int, context IPredictionContext, semanticContext SemanticContext) *ATNConfig {
@ -61,23 +61,23 @@ func NewATNConfig5(state IATNState, alt int, context IPredictionContext, semanti
return a
}
func NewATNConfig4(c IATNConfig , state IATNState) *ATNConfig {
return NewATNConfig(c, state, c.getContext(), c.getSemanticContext());
func NewATNConfig4(c IATNConfig, state IATNState) *ATNConfig {
return NewATNConfig(c, state, c.getContext(), c.getSemanticContext())
}
func NewATNConfig3(c IATNConfig , state IATNState, semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, state, c.getContext(), semanticContext);
func NewATNConfig3(c IATNConfig, state IATNState, semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, state, c.getContext(), semanticContext)
}
func NewATNConfig2(c IATNConfig , semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, c.getState(), c.getContext(), semanticContext);
func NewATNConfig2(c IATNConfig, semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, c.getState(), c.getContext(), semanticContext)
}
func NewATNConfig1(c IATNConfig , state IATNState, context IPredictionContext) *ATNConfig {
return NewATNConfig(c, state, context, c.getSemanticContext());
func NewATNConfig1(c IATNConfig, state IATNState, context IPredictionContext) *ATNConfig {
return NewATNConfig(c, state, context, c.getSemanticContext())
}
func NewATNConfig(c IATNConfig , state IATNState, context IPredictionContext, semanticContext SemanticContext) *ATNConfig {
func NewATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) *ATNConfig {
a := new(ATNConfig)
a.InitATNConfig(c, state, context, semanticContext)
@ -119,23 +119,22 @@ func (this *ATNConfig) setReachesIntoOuterContext(v int) {
this.reachesIntoOuterContext = v
}
func (a *ATNConfig) InitATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) {
func (a *ATNConfig) InitATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) {
a.state = state;
a.alt = c.getAlt();
a.context = context;
a.semanticContext = semanticContext;
a.reachesIntoOuterContext = c.getReachesIntoOuterContext();
a.state = state
a.alt = c.getAlt()
a.context = context
a.semanticContext = semanticContext
a.reachesIntoOuterContext = c.getReachesIntoOuterContext()
}
func (a *ATNConfig) InitATNConfig2(state IATNState, alt int, context IPredictionContext, semanticContext SemanticContext) {
a.state = state;
a.alt = alt;
a.context = context;
a.semanticContext = semanticContext;
a.state = state
a.alt = alt
a.context = context
a.semanticContext = semanticContext
}
@ -144,57 +143,55 @@ func (a *ATNConfig) InitATNConfig2(state IATNState, alt int, context IPrediction
// syntactic/semantic contexts are the same.
///
func (this *ATNConfig) equals(other interface{}) bool {
if (this == other) {
return true
} else if _, ok := other.(*ATNConfig); !ok {
return false
} else {
return reflect.DeepEqual(this, other)
}
if this == other {
return true
} else if _, ok := other.(*ATNConfig); !ok {
return false
} else {
return reflect.DeepEqual(this, other)
}
}
func (this *ATNConfig) shortHashString() string {
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString()
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString()
}
func (this *ATNConfig) hashString() string {
var c string
if (this.context == nil){
if this.context == nil {
c = ""
} else {
c = this.context.hashString()
}
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString()
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString()
}
func (this *ATNConfig) toString() string {
var a string
if (this.context != nil){
if this.context != nil {
a = ",[" + fmt.Sprint(this.context) + "]"
}
var b string
if (this.semanticContext != SemanticContextNONE){
if this.semanticContext != SemanticContextNONE {
b = ("," + fmt.Sprint(this.semanticContext))
}
var c string
if (this.reachesIntoOuterContext > 0){
if this.reachesIntoOuterContext > 0 {
c = ",up=" + fmt.Sprint(this.reachesIntoOuterContext)
}
return "(" + fmt.Sprint(this.state) + "," + strconv.Itoa(this.alt) + a + b + c + ")"
return "(" + fmt.Sprint(this.state) + "," + strconv.Itoa(this.alt) + a + b + c + ")"
}
type LexerATNConfig struct {
ATNConfig
lexerActionExecutor *LexerActionExecutor
lexerActionExecutor *LexerActionExecutor
passedThroughNonGreedyDecision bool
}
@ -219,7 +216,7 @@ func NewLexerATNConfig5(state IATNState, alt int, context IPredictionContext, le
return this
}
func NewLexerATNConfig4(c *LexerATNConfig, state IATNState) *LexerATNConfig {
func NewLexerATNConfig4(c *LexerATNConfig, state IATNState) *LexerATNConfig {
this := new(LexerATNConfig)
@ -239,7 +236,7 @@ func NewLexerATNConfig3(c *LexerATNConfig, state IATNState, lexerActionExecutor
return this
}
func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPredictionContext) *LexerATNConfig {
func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPredictionContext) *LexerATNConfig {
this := new(LexerATNConfig)
@ -249,21 +246,19 @@ func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPrediction
return this
}
func NewLexerATNConfig1( state IATNState, alt int, context IPredictionContext) *LexerATNConfig {
func NewLexerATNConfig1(state IATNState, alt int, context IPredictionContext) *LexerATNConfig {
this := new(LexerATNConfig)
// c IATNConfig , state IATNState, context IPredictionContext, semanticContext SemanticContext
this.InitATNConfig2(state, alt, context, SemanticContextNONE)
this.lexerActionExecutor = nil
this.passedThroughNonGreedyDecision = false
this.lexerActionExecutor = nil
this.passedThroughNonGreedyDecision = false
return this
return this
}
func (this *LexerATNConfig) hashString() string {
var f string
@ -273,7 +268,7 @@ func (this *LexerATNConfig) hashString() string {
f = "0"
}
return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) +
return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) +
fmt.Sprint(this.semanticContext) + f + fmt.Sprint(this.lexerActionExecutor)
}
@ -281,27 +276,27 @@ func (this *LexerATNConfig) equals(other interface{}) bool {
othert, ok := other.(*LexerATNConfig)
if (this == other) {
return true
} else if !ok {
return false
} else if (this.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision) {
return false
}
if this == other {
return true
} else if !ok {
return false
} else if this.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
return false
}
var b bool
if (this.lexerActionExecutor != nil){
b = !this.lexerActionExecutor.equals(othert.lexerActionExecutor)
if this.lexerActionExecutor != nil {
b = !this.lexerActionExecutor.equals(othert.lexerActionExecutor)
} else {
b = othert.lexerActionExecutor != nil
}
if (b) {
return false
} else {
if b {
return false
} else {
panic("Not implemented")
// return ATNConfig.prototype.equals.call(this, other)
}
// return ATNConfig.prototype.equals.call(this, other)
}
}
func checkNonGreedyDecision(source *LexerATNConfig, target IATNState) bool {

View File

@ -1,4 +1,5 @@
package antlr4
import (
"fmt"
)
@ -14,27 +15,27 @@ func hashATNConfig(c interface{}) string {
}
func equalATNConfigs(a, b interface{}) bool {
if ( a==b ) {
if a == b {
return true
}
if ( a==nil || b==nil ) {
if a == nil || b == nil {
return false
}
return a.(*ATNConfig).state.getStateNumber()==b.(*ATNConfig).state.getStateNumber() &&
a.(*ATNConfig).alt==b.(*ATNConfig).alt &&
return a.(*ATNConfig).state.getStateNumber() == b.(*ATNConfig).state.getStateNumber() &&
a.(*ATNConfig).alt == b.(*ATNConfig).alt &&
a.(*ATNConfig).semanticContext.equals(b.(*ATNConfig).semanticContext)
}
type ATNConfigSet struct {
readOnly bool
fullCtx bool
configLookup *Set
conflictingAlts *BitSet
cachedHashString string
hasSemanticContext bool
readOnly bool
fullCtx bool
configLookup *Set
conflictingAlts *BitSet
cachedHashString string
hasSemanticContext bool
dipsIntoOuterContext bool
configs []IATNConfig
uniqueAlt int
configs []IATNConfig
uniqueAlt int
}
func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
@ -98,19 +99,19 @@ func (a *ATNConfigSet) InitATNConfigSet(fullCtx bool) {
// /
func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool {
if (this.readOnly) {
if this.readOnly {
panic("This set is readonly")
}
if (config.getSemanticContext() != SemanticContextNONE) {
if config.getSemanticContext() != SemanticContextNONE {
this.hasSemanticContext = true
}
if (config.getReachesIntoOuterContext() > 0) {
if config.getReachesIntoOuterContext() > 0 {
this.dipsIntoOuterContext = true
}
var existing = this.configLookup.add(config).(IATNConfig)
if (existing == config) {
if existing == config {
this.cachedHashString = "-1"
this.configs = append( this.configs, config )// track order here
this.configs = append(this.configs, config) // track order here
return true
}
// a previous (s,i,pi,_), merge with it and save result
@ -119,17 +120,17 @@ func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool {
// no need to check for existing.context, config.context in cache
// since only way to create Newgraphs is "call rule" and here. We
// cache at both places.
existing.setReachesIntoOuterContext( intMax( existing.getReachesIntoOuterContext(), config.getReachesIntoOuterContext()) )
existing.setReachesIntoOuterContext(intMax(existing.getReachesIntoOuterContext(), config.getReachesIntoOuterContext()))
// make sure to preserve the precedence filter suppression during the merge
if (config.getPrecedenceFilterSuppressed()) {
existing.setPrecedenceFilterSuppressed( true )
if config.getPrecedenceFilterSuppressed() {
existing.setPrecedenceFilterSuppressed(true)
}
existing.setContext( merged )// replace context no need to alt mapping
existing.setContext(merged) // replace context no need to alt mapping
return true
}
func (this *ATNConfigSet) getStates() *Set {
var states = NewSet(nil,nil)
var states = NewSet(nil, nil)
for i := 0; i < len(this.configs); i++ {
states.add(this.configs[i].getState())
}
@ -137,10 +138,10 @@ func (this *ATNConfigSet) getStates() *Set {
}
func (this *ATNConfigSet) getPredicates() []SemanticContext {
var preds = make([]SemanticContext,0)
var preds = make([]SemanticContext, 0)
for i := 0; i < len(this.configs); i++ {
c := this.configs[i].getSemanticContext()
if (c != SemanticContextNONE) {
if c != SemanticContextNONE {
preds = append(preds, c)
}
}
@ -152,10 +153,10 @@ func (this *ATNConfigSet) getItems() []IATNConfig {
}
func (this *ATNConfigSet) optimizeConfigs(interpreter *ATNSimulator) {
if (this.readOnly) {
if this.readOnly {
panic("This set is readonly")
}
if (this.configLookup.length() == 0) {
if this.configLookup.length() == 0 {
return
}
for i := 0; i < len(this.configs); i++ {
@ -164,15 +165,15 @@ func (this *ATNConfigSet) optimizeConfigs(interpreter *ATNSimulator) {
}
}
func (this *ATNConfigSet) addAll(coll []*ATNConfig) bool{
func (this *ATNConfigSet) addAll(coll []*ATNConfig) bool {
for i := 0; i < len(coll); i++ {
this.add(coll[i],nil)
this.add(coll[i], nil)
}
return false
}
func (this *ATNConfigSet) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*ATNConfigSet); !ok {
return false
@ -181,17 +182,17 @@ func (this *ATNConfigSet) equals(other interface{}) bool {
other2 := other.(*ATNConfigSet)
return this.configs != nil &&
// this.configs.equals(other2.configs) && // TODO is this necessary?
this.fullCtx == other2.fullCtx &&
this.uniqueAlt == other2.uniqueAlt &&
this.conflictingAlts == other2.conflictingAlts &&
this.hasSemanticContext == other2.hasSemanticContext &&
this.dipsIntoOuterContext == other2.dipsIntoOuterContext
// this.configs.equals(other2.configs) && // TODO is this necessary?
this.fullCtx == other2.fullCtx &&
this.uniqueAlt == other2.uniqueAlt &&
this.conflictingAlts == other2.conflictingAlts &&
this.hasSemanticContext == other2.hasSemanticContext &&
this.dipsIntoOuterContext == other2.dipsIntoOuterContext
}
func (this *ATNConfigSet) hashString() string {
if (this.readOnly) {
if (this.cachedHashString == "-1") {
if this.readOnly {
if this.cachedHashString == "-1" {
this.cachedHashString = this.hashConfigs()
}
return this.cachedHashString
@ -216,22 +217,22 @@ func (this *ATNConfigSet) isEmpty() bool {
return len(this.configs) == 0
}
func (this *ATNConfigSet) contains(item *ATNConfig ) bool {
if (this.configLookup == nil) {
func (this *ATNConfigSet) contains(item *ATNConfig) bool {
if this.configLookup == nil {
panic("This method is not implemented for readonly sets.")
}
return this.configLookup.contains(item)
}
func (this *ATNConfigSet) containsFast(item *ATNConfig ) bool {
if (this.configLookup == nil) {
func (this *ATNConfigSet) containsFast(item *ATNConfig) bool {
if this.configLookup == nil {
panic("This method is not implemented for readonly sets.")
}
return this.configLookup.contains(item) // TODO containsFast is not implemented for Set
}
func (this *ATNConfigSet) clear() {
if (this.readOnly) {
if this.readOnly {
panic("This set is readonly")
}
this.configs = make([]IATNConfig, 0)
@ -241,7 +242,7 @@ func (this *ATNConfigSet) clear() {
func (this *ATNConfigSet) setReadonly(readOnly bool) {
this.readOnly = readOnly
if (readOnly) {
if readOnly {
this.configLookup = nil // can't mod, no need for lookup cache
}
}
@ -249,18 +250,17 @@ func (this *ATNConfigSet) setReadonly(readOnly bool) {
func (this *ATNConfigSet) toString() string {
panic("not implemented")
return ""
// return Utils.arrayToString(this.configs) +
// (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") +
// (this.uniqueAlt != ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") +
// (this.conflictingAlts != nil ? ",conflictingAlts=" + this.conflictingAlts : "") +
// (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : "")
// return Utils.arrayToString(this.configs) +
// (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") +
// (this.uniqueAlt != ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") +
// (this.conflictingAlts != nil ? ",conflictingAlts=" + this.conflictingAlts : "") +
// (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : "")
}
type OrderedATNConfigSet struct {
*ATNConfigSet
}
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
this := new(OrderedATNConfigSet)
@ -270,6 +270,3 @@ func NewOrderedATNConfigSet() *OrderedATNConfigSet {
return this
}

View File

@ -1,23 +1,21 @@
package antlr4
type ATNDeserializationOptions struct {
readOnly bool
verifyATN bool
readOnly bool
verifyATN bool
generateRuleBypassTransitions bool
}
func NewATNDeserializationOptions(copyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
o := new(ATNDeserializationOptions)
if (copyFrom != nil){
if copyFrom != nil {
o.readOnly = copyFrom.readOnly
o.verifyATN = copyFrom.verifyATN
o.generateRuleBypassTransitions = copyFrom.generateRuleBypassTransitions
}
return o
return o
}
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true,false,false}
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}

File diff suppressed because it is too large Load Diff

View File

@ -1,53 +1,51 @@
package antlr4
type ATNSimulator struct {
atn *ATN
sharedContextCache *PredictionContextCache
atn *ATN
sharedContextCache *PredictionContextCache
}
func NewATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *ATNSimulator {
// The context cache maps all PredictionContext objects that are ==
// to a single cached copy. This cache is shared across all contexts
// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
// to use only cached nodes/graphs in addDFAState(). We don't want to
// fill this during closure() since there are lots of contexts that
// pop up but are not used ever again. It also greatly slows down closure().
//
// <p>This cache makes a huge difference in memory and a little bit in speed.
// For the Java grammar on java.*, it dropped the memory requirements
// at the end from 25M to 16M. We don't store any of the full context
// graphs in the DFA because they are limited to local context only,
// but apparently there's a lot of repetition there as well. We optimize
// the config contexts before storing the config set in the DFA states
// by literally rebuilding them with cached subgraphs only.</p>
//
// <p>I tried a cache for use during closure operations, that was
// whacked after each adaptivePredict(). It cost a little bit
// more time I think and doesn't save on the overall footprint
// so it's not worth the complexity.</p>
this := new(ATNSimulator)
// The context cache maps all PredictionContext objects that are ==
// to a single cached copy. This cache is shared across all contexts
// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
// to use only cached nodes/graphs in addDFAState(). We don't want to
// fill this during closure() since there are lots of contexts that
// pop up but are not used ever again. It also greatly slows down closure().
//
// <p>This cache makes a huge difference in memory and a little bit in speed.
// For the Java grammar on java.*, it dropped the memory requirements
// at the end from 25M to 16M. We don't store any of the full context
// graphs in the DFA because they are limited to local context only,
// but apparently there's a lot of repetition there as well. We optimize
// the config contexts before storing the config set in the DFA states
// by literally rebuilding them with cached subgraphs only.</p>
//
// <p>I tried a cache for use during closure operations, that was
// whacked after each adaptivePredict(). It cost a little bit
// more time I think and doesn't save on the overall footprint
// so it's not worth the complexity.</p>
this.InitATNSimulator(atn, sharedContextCache)
this := new(ATNSimulator)
return this
this.InitATNSimulator(atn, sharedContextCache)
return this
}
func (this *ATNSimulator) InitATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) {
this.atn = atn
this.sharedContextCache = sharedContextCache
this.atn = atn
this.sharedContextCache = sharedContextCache
}
// Must distinguish between missing edge and edge we know leads nowhere///
var ATNSimulatorERROR = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
func (this *ATNSimulator) getCachedContext(context IPredictionContext) IPredictionContext {
if (this.sharedContextCache == nil) {
return context
}
var visited = make(map[IPredictionContext]IPredictionContext)
return getCachedPredictionContext(context, this.sharedContextCache, visited)
if this.sharedContextCache == nil {
return context
}
var visited = make(map[IPredictionContext]IPredictionContext)
return getCachedPredictionContext(context, this.sharedContextCache, visited)
}

View File

@ -1,22 +1,22 @@
package antlr4
import "strconv"
const (
// constants for serialization
ATNStateInvalidType = 0
ATNStateBASIC = 1
ATNStateRULE_START = 2
ATNStateBLOCK_START = 3
// constants for serialization
ATNStateInvalidType = 0
ATNStateBASIC = 1
ATNStateRULE_START = 2
ATNStateBLOCK_START = 3
ATNStatePLUS_BLOCK_START = 4
ATNStateSTAR_BLOCK_START = 5
ATNStateTOKEN_START = 6
ATNStateRULE_STOP = 7
ATNStateBLOCK_END = 8
ATNStateSTAR_LOOP_BACK = 9
ATNStateSTAR_LOOP_ENTRY = 10
ATNStatePLUS_LOOP_BACK = 11
ATNStateLOOP_END = 12
ATNStateTOKEN_START = 6
ATNStateRULE_STOP = 7
ATNStateBLOCK_END = 8
ATNStateSTAR_LOOP_BACK = 9
ATNStateSTAR_LOOP_ENTRY = 10
ATNStatePLUS_LOOP_BACK = 11
ATNStateLOOP_END = 12
ATNStateINVALID_STATE_NUMBER = -1
)
@ -36,11 +36,9 @@ const (
// "PLUS_LOOP_BACK",
// "LOOP_END" ]
var INITIAL_NUM_TRANSITIONS = 4
type IATNState interface {
getEpsilonOnlyTransitions() bool
getRuleIndex() int
@ -58,7 +56,7 @@ type IATNState interface {
setStateNumber(int)
getTransitions() []ITransition
setTransitions( []ITransition )
setTransitions([]ITransition)
addTransition(ITransition, int)
toString() string
@ -66,10 +64,10 @@ type IATNState interface {
type ATNState struct {
// Which ATN are we in?
atn *ATN
stateNumber int
stateType int
ruleIndex int
atn *ATN
stateNumber int
stateType int
ruleIndex int
epsilonOnlyTransitions bool
// Track the transitions emanating from this ATN state.
transitions []ITransition
@ -85,7 +83,7 @@ func NewATNState() *ATNState {
return as
}
func (as *ATNState) InitATNState(){
func (as *ATNState) InitATNState() {
// Which ATN are we in?
as.atn = nil
@ -143,7 +141,7 @@ func (as *ATNState) getNextTokenWithinRule() *IntervalSet {
return as.nextTokenWithinRule
}
func (as *ATNState) setNextTokenWithinRule(v *IntervalSet) {
func (as *ATNState) setNextTokenWithinRule(v *IntervalSet) {
as.nextTokenWithinRule = v
}
@ -164,15 +162,15 @@ func (this *ATNState) isNonGreedyExitState() bool {
}
func (this *ATNState) addTransition(trans ITransition, index int) {
if ( len(this.transitions) == 0 ) {
if len(this.transitions) == 0 {
this.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if(this.epsilonOnlyTransitions != trans.getIsEpsilon()) {
} else if this.epsilonOnlyTransitions != trans.getIsEpsilon() {
this.epsilonOnlyTransitions = false
}
if (index==-1) {
if index == -1 {
this.transitions = append(this.transitions, trans)
} else {
this.transitions = append(this.transitions[:index], append([]ITransition{ trans }, this.transitions[index:]...)...)
this.transitions = append(this.transitions[:index], append([]ITransition{trans}, this.transitions[index:]...)...)
// this.transitions.splice(index, 1, trans)
}
}
@ -192,7 +190,7 @@ func NewBasicState() *BasicState {
type DecisionState struct {
*ATNState
decision int
decision int
nonGreedy bool
}
@ -252,7 +250,6 @@ func NewBasicBlockStartState() *BasicBlockStartState {
return this
}
// Terminal node of a simple {@code (a|b|c)} block.
type BlockEndState struct {
ATNState
@ -291,7 +288,7 @@ func NewRuleStopState() *RuleStopState {
type RuleStartState struct {
ATNState
stopState IATNState
stopState IATNState
isPrecedenceRule bool
}
@ -369,7 +366,6 @@ func NewStarBlockStartState() *StarBlockStartState {
return this
}
type StarLoopbackState struct {
*ATNState
}
@ -384,11 +380,10 @@ func NewStarLoopbackState() *StarLoopbackState {
return this
}
type StarLoopEntryState struct {
*DecisionState
loopBackState IATNState
loopBackState IATNState
precedenceRuleDecision bool
}
@ -408,7 +403,6 @@ func NewStarLoopEntryState() *StarLoopEntryState {
return this
}
// Mark the end of a * or + loop.
type LoopEndState struct {
*ATNState
@ -442,18 +436,3 @@ func NewTokensStartState() *TokensStartState {
this.stateType = ATNStateTOKEN_START
return this
}

View File

@ -3,8 +3,6 @@ package antlr4
// Represents the type of recognizer an ATN applies to.
const (
ATNTypeLexer = 0
ATNTypeLexer = 0
ATNTypeParser = 1
)

View File

@ -1,4 +1,3 @@
// This implementation of {@link TokenStream} loads tokens from a
// {@link TokenSource} on-demand, and places the tokens in a buffer to provide
// access to any previous token by index.
@ -11,16 +10,17 @@
// {@link CommonTokenStream}.</p>
package antlr4
import "strconv"
// bt is just to keep meaningful parameter types to Parser
type BufferedTokenStream struct {
tokenSource TokenSource
tokens []*Token
index int
tokens []*Token
index int
fetchedEOF bool
channel int
channel int
}
func NewBufferedTokenStream(tokenSource TokenSource) *BufferedTokenStream {
@ -30,7 +30,7 @@ func NewBufferedTokenStream(tokenSource TokenSource) *BufferedTokenStream {
return ts
}
func (ts *BufferedTokenStream) InitBufferedTokenStream(tokenSource TokenSource){
func (ts *BufferedTokenStream) InitBufferedTokenStream(tokenSource TokenSource) {
// The {@link TokenSource} from which tokens for bt stream are fetched.
ts.tokenSource = tokenSource
@ -94,11 +94,11 @@ func (bt *BufferedTokenStream) get(index int) *Token {
func (bt *BufferedTokenStream) consume() {
var skipEofCheck = false
if (bt.index >= 0) {
if (bt.fetchedEOF) {
if bt.index >= 0 {
if bt.fetchedEOF {
// the last token in tokens is EOF. skip check if p indexes any
// fetched token except the last.
skipEofCheck = bt.index < len(bt.tokens) - 1
skipEofCheck = bt.index < len(bt.tokens)-1
} else {
// no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = bt.index < len(bt.tokens)
@ -107,10 +107,10 @@ func (bt *BufferedTokenStream) consume() {
// not yet initialized
skipEofCheck = false
}
if (!skipEofCheck && bt.LA(1) == TokenEOF) {
panic( "cannot consume EOF" )
if !skipEofCheck && bt.LA(1) == TokenEOF {
panic("cannot consume EOF")
}
if (bt.sync(bt.index + 1)) {
if bt.sync(bt.index + 1) {
bt.index = bt.adjustSeekIndex(bt.index + 1)
}
}
@ -123,7 +123,7 @@ func (bt *BufferedTokenStream) consume() {
// /
func (bt *BufferedTokenStream) sync(i int) bool {
var n = i - len(bt.tokens) + 1 // how many more elements we need?
if (n > 0) {
if n > 0 {
var fetched = bt.fetch(n)
return fetched >= n
}
@ -143,7 +143,7 @@ func (bt *BufferedTokenStream) fetch(n int) int {
var t *Token = bt.tokenSource.nextToken()
t.tokenIndex = len(bt.tokens)
bt.tokens = append(bt.tokens, t)
if (t.tokenType == TokenEOF) {
if t.tokenType == TokenEOF {
bt.fetchedEOF = true
return i + 1
}
@ -154,20 +154,20 @@ func (bt *BufferedTokenStream) fetch(n int) int {
// Get all tokens from start..stop inclusively///
func (bt *BufferedTokenStream) getTokens(start int, stop int, types *IntervalSet) []*Token {
if (start < 0 || stop < 0) {
if start < 0 || stop < 0 {
return nil
}
bt.lazyInit()
var subset = make([]*Token, 0)
if (stop >= len(bt.tokens)) {
if stop >= len(bt.tokens) {
stop = len(bt.tokens) - 1
}
for i := start; i < stop; i++ {
var t = bt.tokens[i]
if (t.tokenType == TokenEOF) {
if t.tokenType == TokenEOF {
break
}
if (types == nil || types.contains(t.tokenType)) {
if types == nil || types.contains(t.tokenType) {
subset = append(subset, t)
}
}
@ -179,25 +179,25 @@ func (bt *BufferedTokenStream) LA(i int) int {
}
func (bt *BufferedTokenStream) LB(k int) *Token {
if (bt.index - k < 0) {
if bt.index-k < 0 {
return nil
}
return bt.tokens[bt.index - k]
return bt.tokens[bt.index-k]
}
func (bt *BufferedTokenStream) LT(k int) *Token {
bt.lazyInit()
if (k == 0) {
if k == 0 {
return nil
}
if (k < 0) {
if k < 0 {
return bt.LB(-k)
}
var i = bt.index + k - 1
bt.sync(i)
if (i >= len(bt.tokens)) { // return EOF token
if i >= len(bt.tokens) { // return EOF token
// EOF must be last token
return bt.tokens[len(bt.tokens) - 1]
return bt.tokens[len(bt.tokens)-1]
}
return bt.tokens[i]
}
@ -220,7 +220,7 @@ func (bt *BufferedTokenStream) adjustSeekIndex(i int) int {
}
func (bt *BufferedTokenStream) lazyInit() {
if (bt.index == -1) {
if bt.index == -1 {
bt.setup()
}
}
@ -247,12 +247,12 @@ func (bt *BufferedTokenStream) setTokenSource(tokenSource TokenSource) {
// /
func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
bt.sync(i)
if (i >= len(bt.tokens)) {
if i >= len(bt.tokens) {
return -1
}
var token = bt.tokens[i]
for (token.channel != bt.channel) {
if (token.tokenType == TokenEOF) {
for token.channel != bt.channel {
if token.tokenType == TokenEOF {
return -1
}
i += 1
@ -266,7 +266,7 @@ func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
// Return i if tokens[i] is on channel. Return -1 if there are no tokens
// on channel between i and 0.
func (bt *BufferedTokenStream) previousTokenOnChannel(i, channel int) int {
for (i >= 0 && bt.tokens[i].channel != channel) {
for i >= 0 && bt.tokens[i].channel != channel {
i -= 1
}
return i
@ -277,14 +277,14 @@ func (bt *BufferedTokenStream) previousTokenOnChannel(i, channel int) int {
// EOF. If channel is -1, find any non default channel token.
func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex, channel int) []*Token {
bt.lazyInit()
if (tokenIndex < 0 || tokenIndex >= len(bt.tokens)) {
panic( strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens) - 1) )
if tokenIndex < 0 || tokenIndex >= len(bt.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens)-1))
}
var nextOnChannel = bt.nextTokenOnChannel(tokenIndex + 1, LexerDefaultTokenChannel)
var nextOnChannel = bt.nextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
var from_ = tokenIndex + 1
// if none onchannel to right, nextOnChannel=-1 so set to = last token
var to int
if (nextOnChannel == -1){
if nextOnChannel == -1 {
to = len(bt.tokens) - 1
} else {
to = nextOnChannel
@ -297,11 +297,11 @@ func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex, channel int) [
// If channel is -1, find any non default channel token.
func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex, channel int) []*Token {
bt.lazyInit()
if (tokenIndex < 0 || tokenIndex >= len(bt.tokens)) {
panic( strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens) - 1) )
if tokenIndex < 0 || tokenIndex >= len(bt.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens)-1))
}
var prevOnChannel = bt.previousTokenOnChannel(tokenIndex - 1, LexerDefaultTokenChannel)
if (prevOnChannel == tokenIndex - 1) {
var prevOnChannel = bt.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
if prevOnChannel == tokenIndex-1 {
return nil
}
// if none on channel to left, prevOnChannel=-1 then from=0
@ -311,18 +311,18 @@ func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex, channel int) []
}
func (bt *BufferedTokenStream) filterForChannel(left, right, channel int) []*Token {
var hidden = make([]*Token,0)
for i := left; i < right + 1; i++ {
var hidden = make([]*Token, 0)
for i := left; i < right+1; i++ {
var t = bt.tokens[i]
if (channel == -1) {
if (t.channel != LexerDefaultTokenChannel) {
if channel == -1 {
if t.channel != LexerDefaultTokenChannel {
hidden = append(hidden, t)
}
} else if (t.channel == channel) {
} else if t.channel == channel {
hidden = append(hidden, t)
}
}
if (len(hidden) == 0) {
if len(hidden) == 0 {
return nil
}
return hidden
@ -336,27 +336,27 @@ func (bt *BufferedTokenStream) getSourceName() string {
func (bt *BufferedTokenStream) getText(interval *Interval) string {
bt.lazyInit()
bt.fill()
if (interval == nil) {
interval = NewInterval(0, len(bt.tokens) - 1)
if interval == nil {
interval = NewInterval(0, len(bt.tokens)-1)
}
var start = interval.start
// if s2, ok := start.(*Token); ok {
// start = s2.tokenIndex
// }
// if s2, ok := start.(*Token); ok {
// start = s2.tokenIndex
// }
var stop = interval.stop
// if s2, ok := stop.(*Token); ok {
// stop = s2.tokenIndex
// }
if (start < 0 || stop < 0) {
// if s2, ok := stop.(*Token); ok {
// stop = s2.tokenIndex
// }
if start < 0 || stop < 0 {
return ""
}
if (stop >= len(bt.tokens)) {
if stop >= len(bt.tokens) {
stop = len(bt.tokens) - 1
}
var s = ""
for i := start; i < stop + 1; i++ {
for i := start; i < stop+1; i++ {
var t = bt.tokens[i]
if (t.tokenType == TokenEOF) {
if t.tokenType == TokenEOF {
break
}
s += t.text()
@ -367,9 +367,7 @@ func (bt *BufferedTokenStream) getText(interval *Interval) string {
// Get all tokens from lexer until EOF///
func (bt *BufferedTokenStream) fill() {
bt.lazyInit()
for (bt.fetch(1000) == 1000) {
for bt.fetch(1000) == 1000 {
continue
}
}

View File

@ -4,4 +4,4 @@ type CharStream interface {
IntStream
getTextFromInterval(*Interval) string
}
}

View File

@ -6,32 +6,32 @@
package antlr4
type TokenFactory interface {
create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token
create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token
}
type CommonTokenFactory struct {
copyText bool
copyText bool
}
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
tf := new(CommonTokenFactory)
tf := new(CommonTokenFactory)
// Indicates whether {@link CommonToken//setText} should be called after
// constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings
// of text from the input after the lexer creates a token (e.g. the
// implementation of {@link CharStream//getText} in
// {@link UnbufferedCharStream} panics an
// {@link UnsupportedOperationException}). Explicitly setting the token text
// allows {@link Token//getText} to be called at any time regardless of the
// input stream implementation.
//
// <p>
// The default value is {@code false} to avoid the performance and memory
// overhead of copying text for every token unless explicitly requested.</p>
//
tf.copyText = copyText
// Indicates whether {@link CommonToken//setText} should be called after
// constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings
// of text from the input after the lexer creates a token (e.g. the
// implementation of {@link CharStream//getText} in
// {@link UnbufferedCharStream} panics an
// {@link UnsupportedOperationException}). Explicitly setting the token text
// allows {@link Token//getText} to be called at any time regardless of the
// input stream implementation.
//
// <p>
// The default value is {@code false} to avoid the performance and memory
// overhead of copying text for every token unless explicitly requested.</p>
//
tf.copyText = copyText
return tf
}
@ -46,21 +46,19 @@ func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
func (this *CommonTokenFactory) create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token {
var t = NewCommonToken(source, ttype, channel, start, stop)
t.line = line
t.column = column
if (text != "") {
t.setText( text )
} else if (this.copyText && source.charStream != nil) {
t.setText( source.charStream.getTextFromInterval(NewInterval(start,stop)))
}
return t.Token
var t = NewCommonToken(source, ttype, channel, start, stop)
t.line = line
t.column = column
if text != "" {
t.setText(text)
} else if this.copyText && source.charStream != nil {
t.setText(source.charStream.getTextFromInterval(NewInterval(start, stop)))
}
return t.Token
}
func (this *CommonTokenFactory) createThin(ttype int, text string) *Token {
var t = NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
t.setText( text )
return t.Token
var t = NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
t.setText(text)
return t.Token
}

View File

@ -26,75 +26,74 @@
package antlr4
type CommonTokenStream struct {
*BufferedTokenStream
*BufferedTokenStream
}
func NewCommonTokenStream(lexer ILexer, channel int) *CommonTokenStream {
ts := new(CommonTokenStream)
ts.InitBufferedTokenStream(lexer)
ts := new(CommonTokenStream)
ts.InitBufferedTokenStream(lexer)
ts.channel = channel
ts.channel = channel
return ts
return ts
}
func (ts *CommonTokenStream) adjustSeekIndex(i int) int {
return ts.nextTokenOnChannel(i, ts.channel)
return ts.nextTokenOnChannel(i, ts.channel)
}
func (ts *CommonTokenStream) LB(k int) *Token {
if (k==0 || ts.index-k<0) {
return nil
}
var i = ts.index
var n = 1
// find k good tokens looking backwards
for (n <= k) {
// skip off-channel tokens
i = ts.previousTokenOnChannel(i - 1, ts.channel)
n += 1
}
if (i < 0) {
return nil
}
return ts.tokens[i]
if k == 0 || ts.index-k < 0 {
return nil
}
var i = ts.index
var n = 1
// find k good tokens looking backwards
for n <= k {
// skip off-channel tokens
i = ts.previousTokenOnChannel(i-1, ts.channel)
n += 1
}
if i < 0 {
return nil
}
return ts.tokens[i]
}
func (ts *CommonTokenStream) LT(k int) *Token {
ts.lazyInit()
if (k == 0) {
return nil
}
if (k < 0) {
return ts.LB(-k)
}
var i = ts.index
var n = 1 // we know tokens[pos] is a good one
// find k good tokens
for n < k {
// skip off-channel tokens, but make sure to not look past EOF
if (ts.sync(i + 1)) {
i = ts.nextTokenOnChannel(i + 1, ts.channel)
}
n += 1
}
return ts.tokens[i]
ts.lazyInit()
if k == 0 {
return nil
}
if k < 0 {
return ts.LB(-k)
}
var i = ts.index
var n = 1 // we know tokens[pos] is a good one
// find k good tokens
for n < k {
// skip off-channel tokens, but make sure to not look past EOF
if ts.sync(i + 1) {
i = ts.nextTokenOnChannel(i+1, ts.channel)
}
n += 1
}
return ts.tokens[i]
}
// Count EOF just once.///
func (ts *CommonTokenStream) getNumberOfOnChannelTokens() int {
var n = 0
ts.fill()
for i := 0; i < len(ts.tokens); i++ {
var t = ts.tokens[i]
if t.channel==ts.channel {
n += 1
}
if t.tokenType==TokenEOF {
break
}
}
return n
var n = 0
ts.fill()
for i := 0; i < len(ts.tokens); i++ {
var t = ts.tokens[i]
if t.channel == ts.channel {
n += 1
}
if t.tokenType == TokenEOF {
break
}
}
return n
}

View File

@ -2,9 +2,9 @@ package antlr4
type DFA struct {
atnStartState *DecisionState
decision int
_states map[string]*DFAState
s0 *DFAState
decision int
_states map[string]*DFAState
s0 *DFAState
precedenceDfa bool
}
@ -37,11 +37,11 @@ func NewDFA(atnStartState *DecisionState, decision int) *DFA {
// @see //isPrecedenceDfa()
func (this *DFA) getPrecedenceStartState(precedence int) *DFAState {
if (!(this.precedenceDfa)) {
if !(this.precedenceDfa) {
panic("Only precedence DFAs may contain a precedence start state.")
}
// s0.edges is never nil for a precedence DFA
if (precedence < 0 || precedence >= len(this.s0.edges)) {
if precedence < 0 || precedence >= len(this.s0.edges) {
return nil
}
return this.s0.edges[precedence]
@ -56,11 +56,11 @@ func (this *DFA) getPrecedenceStartState(precedence int) *DFAState {
// @panics IllegalStateException if this is not a precedence DFA.
// @see //isPrecedenceDfa()
//
func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if (!(this.precedenceDfa)) {
panic ("Only precedence DFAs may contain a precedence start state.")
func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if !(this.precedenceDfa) {
panic("Only precedence DFAs may contain a precedence start state.")
}
if (precedence < 0) {
if precedence < 0 {
return
}
@ -88,11 +88,11 @@ func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState)
// {@code false}
func (this *DFA) setPrecedenceDfa(precedenceDfa bool) {
if (this.precedenceDfa!=precedenceDfa) {
if this.precedenceDfa != precedenceDfa {
this._states = make(map[string]*DFAState)
if (precedenceDfa) {
if precedenceDfa {
var precedenceState = NewDFAState(-1, NewATNConfigSet(false))
precedenceState.edges = make([]*DFAState,0)
precedenceState.edges = make([]*DFAState, 0)
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
this.s0 = precedenceState
@ -114,18 +114,18 @@ func (this *DFA) sortedStates() []*DFAState {
return nil
// states_ is a map of state/state, where key=value
// var keys = Object.keys(this._states)
// var list = []
// for i:=0; i<keys.length; i++ {
// list.push(this._states[keys[i]])
// }
// return list.sort(function(a, b) {
// return a.stateNumber - b.stateNumber
// })
// var keys = Object.keys(this._states)
// var list = []
// for i:=0; i<keys.length; i++ {
// list.push(this._states[keys[i]])
// }
// return list.sort(function(a, b) {
// return a.stateNumber - b.stateNumber
// })
}
func (this *DFA) toString(literalNames []string, symbolicNames []string) string {
if (this.s0 == nil) {
if this.s0 == nil {
return ""
}
var serializer = NewDFASerializer(this, literalNames, symbolicNames)
@ -133,11 +133,9 @@ func (this *DFA) toString(literalNames []string, symbolicNames []string) string
}
func (this *DFA) toLexerString() string {
if (this.s0 == nil) {
if this.s0 == nil {
return ""
}
var serializer = NewLexerDFASerializer(this)
return serializer.toString()
}

View File

@ -1,119 +1,119 @@
package antlr4
import (
"fmt"
"strconv"
"fmt"
"strconv"
)
// A DFA walker that knows how to dump them to serialized strings.#/
type DFASerializer struct {
dfa *DFA
literalNames, symbolicNames []string
dfa *DFA
literalNames, symbolicNames []string
}
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
if (literalNames == nil){
literalNames = make([]string, 0)
}
if literalNames == nil {
literalNames = make([]string, 0)
}
if (symbolicNames == nil){
symbolicNames = make([]string, 0)
}
if symbolicNames == nil {
symbolicNames = make([]string, 0)
}
this := new(DFASerializer)
this := new(DFASerializer)
this.InitDFASerializer(dfa, literalNames, symbolicNames)
this.InitDFASerializer(dfa, literalNames, symbolicNames)
return this
}
func (this *DFASerializer) InitDFASerializer(dfa *DFA, literalNames, symbolicNames []string) {
this.dfa = dfa
this.literalNames = literalNames
this.symbolicNames = symbolicNames
this.dfa = dfa
this.literalNames = literalNames
this.symbolicNames = symbolicNames
}
func (this *DFASerializer) toString() string {
if(this.dfa.s0 == nil) {
return ""
}
if this.dfa.s0 == nil {
return ""
}
var buf = ""
var states = this.dfa.sortedStates()
for i := 0; i<len(states); i++ {
var s = states[i]
if(s.edges!=nil) {
var n = len(s.edges)
for j :=0; j<n; j++ {
var t = s.edges[j]
if(t!=nil && t.stateNumber != 0x7FFFFFFF) {
buf += this.getStateString(s)
buf += "-"
buf += this.getEdgeLabel(j)
buf += "->"
buf += this.getStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
var buf = ""
var states = this.dfa.sortedStates()
for i := 0; i < len(states); i++ {
var s = states[i]
if s.edges != nil {
var n = len(s.edges)
for j := 0; j < n; j++ {
var t = s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += this.getStateString(s)
buf += "-"
buf += this.getEdgeLabel(j)
buf += "->"
buf += this.getStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
return buf
return buf
}
func (this *DFASerializer) getEdgeLabel(i int) string {
if (i==0) {
return "EOF"
} else if(this.literalNames !=nil || this.symbolicNames!=nil) {
if (this.literalNames[i-1] == ""){
return this.literalNames[i-1]
} else {
return this.symbolicNames[i-1]
}
} else {
return string(i-1)
}
if i == 0 {
return "EOF"
} else if this.literalNames != nil || this.symbolicNames != nil {
if this.literalNames[i-1] == "" {
return this.literalNames[i-1]
} else {
return this.symbolicNames[i-1]
}
} else {
return string(i - 1)
}
}
func (this *DFASerializer) getStateString(s *DFAState) string {
var a,b string
var a, b string
if (s.isAcceptState){
a = ":"
}
if s.isAcceptState {
a = ":"
}
if (s.requiresFullContext){
b = "^"
}
if s.requiresFullContext {
b = "^"
}
var baseStateStr = a + "s" + strconv.Itoa(s.stateNumber) + b
if(s.isAcceptState) {
if (s.predicates != nil) {
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
} else {
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
}
} else {
return baseStateStr
}
var baseStateStr = a + "s" + strconv.Itoa(s.stateNumber) + b
if s.isAcceptState {
if s.predicates != nil {
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
} else {
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
}
} else {
return baseStateStr
}
}
type LexerDFASerializer struct {
DFASerializer
DFASerializer
}
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
this := new(LexerDFASerializer)
this := new(LexerDFASerializer)
this.InitDFASerializer(dfa, nil, nil)
this.InitDFASerializer(dfa, nil, nil)
return this
}
@ -121,7 +121,3 @@ func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
func (this *LexerDFASerializer) getEdgeLabel(i int) string {
return "'" + string(i) + "'"
}

View File

@ -8,7 +8,7 @@ import (
// Map a predicate to a predicted alternative.///
type PredPrediction struct {
alt int
alt int
pred SemanticContext
}
@ -51,19 +51,19 @@ func (this *PredPrediction) toString() string {
// /
type DFAState struct {
stateNumber int
configs *ATNConfigSet
edges []*DFAState
isAcceptState bool
prediction int
stateNumber int
configs *ATNConfigSet
edges []*DFAState
isAcceptState bool
prediction int
lexerActionExecutor *LexerActionExecutor
requiresFullContext bool
predicates []*PredPrediction
predicates []*PredPrediction
}
func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
if (configs == nil) {
if configs == nil {
configs = NewATNConfigSet(false)
}
@ -107,14 +107,14 @@ func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
// Get the set of all alts mentioned by all ATN configurations in this
// DFA state.
func (this *DFAState) getAltSet() *Set {
var alts = NewSet(nil,nil)
if (this.configs != nil) {
var alts = NewSet(nil, nil)
if this.configs != nil {
for i := 0; i < len(this.configs.configs); i++ {
var c = this.configs.configs[i]
alts.add(c.getAlt())
}
}
if (alts.length() == 0) {
if alts.length() == 0 {
return nil
} else {
return alts
@ -134,7 +134,7 @@ func (this *DFAState) getAltSet() *Set {
// {@link //stateNumber} is irrelevant.</p>
func (this *DFAState) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*DFAState); !ok {
return false
@ -150,17 +150,15 @@ func (this *DFAState) toString() string {
func (this *DFAState) hashString() string {
panic("Not implementd")
// var s string
// if (this.acceptState){
//
// }
//
// return "" + this.configs +
// (this.isAcceptState ?
// "=>" + (this.predicates != nil ?
// this.predicates :
// this.prediction) :
// "")
// var s string
// if (this.acceptState){
//
// }
//
// return "" + this.configs +
// (this.isAcceptState ?
// "=>" + (this.predicates != nil ?
// this.predicates :
// this.prediction) :
// "")
}

View File

@ -1,4 +1,5 @@
package antlr4
import (
"strconv"
)
@ -38,32 +39,32 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
}
func (this *DiagnosticErrorListener) reportAmbiguity(recognizer *Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
if (this.exactOnly && !exact) {
if this.exactOnly && !exact {
return
}
var msg = "reportAmbiguity d=" +
this.getDecisionDescription(recognizer, dfa) +
": ambigAlts=" +
this.getConflictingAlts(ambigAlts, configs).toString() +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
this.getDecisionDescription(recognizer, dfa) +
": ambigAlts=" +
this.getConflictingAlts(ambigAlts, configs).toString() +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}
func (this *DiagnosticErrorListener) reportAttemptingFullContext(recognizer *Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
var msg = "reportAttemptingFullContext d=" +
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}
func (this *DiagnosticErrorListener) reportContextSensitivity(recognizer *Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
var msg = "reportContextSensitivity d=" +
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}
@ -72,11 +73,11 @@ func (this *DiagnosticErrorListener) getDecisionDescription(recognizer *Parser,
var ruleIndex = dfa.atnStartState.ruleIndex
var ruleNames = recognizer.getRuleNames()
if (ruleIndex < 0 || ruleIndex >= len(ruleNames)) {
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
return strconv.Itoa(decision)
}
var ruleName = ruleNames[ruleIndex]
if (ruleName == "") {
if ruleName == "" {
return strconv.Itoa(decision)
}
return strconv.Itoa(decision) + " (" + ruleName + ")"
@ -94,7 +95,7 @@ func (this *DiagnosticErrorListener) getDecisionDescription(recognizer *Parser,
// returns the set of alternatives represented in {@code configs}.
//
func (this *DiagnosticErrorListener) getConflictingAlts(reportedAlts *BitSet, set *ATNConfigSet) *BitSet {
if (reportedAlts != nil) {
if reportedAlts != nil {
return reportedAlts
}
var result = NewBitSet()
@ -104,10 +105,10 @@ func (this *DiagnosticErrorListener) getConflictingAlts(reportedAlts *BitSet, se
return result
// valuestrings := make([]string, len(result.values()))
// for i,v := range result.values() {
// valuestrings[i] = strconv.Itoa(v)
// }
//
// return "{" + strings.Join(valuestrings, ", ") + "}"
}
// valuestrings := make([]string, len(result.values()))
// for i,v := range result.values() {
// valuestrings[i] = strconv.Itoa(v)
// }
//
// return "{" + strings.Join(valuestrings, ", ") + "}"
}

View File

@ -1,7 +1,7 @@
package antlr4
import (
"fmt"
"fmt"
"strconv"
)
@ -9,7 +9,6 @@ import (
// default implementation of each method does nothing, but can be overridden as
// necessary.
type IErrorListener interface {
syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException)
reportAmbiguity(recognizer IParser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
@ -18,7 +17,6 @@ type IErrorListener interface {
}
type DefaultErrorListener struct {
}
func NewErrorListener() *DefaultErrorListener {
@ -63,7 +61,7 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
// </pre>
//
func (this *ConsoleErrorListener) syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException) {
fmt.Errorf("line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + " " + msg)
fmt.Errorf("line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + " " + msg)
}
type ProxyErrorListener struct {
@ -72,41 +70,34 @@ type ProxyErrorListener struct {
}
func NewProxyErrorListener(delegates []IErrorListener) *ProxyErrorListener {
if (delegates==nil) {
panic("delegates is not provided")
}
if delegates == nil {
panic("delegates is not provided")
}
l := new(ProxyErrorListener)
l.delegates = delegates
l.delegates = delegates
return l
}
func (this *ProxyErrorListener) syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException) {
for _,d := range this.delegates {
for _, d := range this.delegates {
d.syntaxError(recognizer, offendingSymbol, line, column, msg, e)
}
}
func (this *ProxyErrorListener) reportAmbiguity(recognizer IParser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
for _,d := range this.delegates {
for _, d := range this.delegates {
d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
func (this *ProxyErrorListener) reportAttemptingFullContext(recognizer IParser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
for _,d := range this.delegates {
for _, d := range this.delegates {
d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
func (this *ProxyErrorListener) reportContextSensitivity(recognizer IParser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
for _,d := range this.delegates {
for _, d := range this.delegates {
d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
}

View File

@ -1,41 +1,41 @@
package antlr4
import (
"fmt"
"strings"
"reflect"
"strconv"
"fmt"
"reflect"
"strconv"
"strings"
)
type IErrorStrategy interface {
reset(IParser)
recoverInline(IParser) *Token
recover(IParser, IRecognitionException)
sync(IParser)
inErrorRecoveryMode(IParser) bool
reportError(IParser, IRecognitionException)
reportMatch(IParser)
reset(IParser)
recoverInline(IParser) *Token
recover(IParser, IRecognitionException)
sync(IParser)
inErrorRecoveryMode(IParser) bool
reportError(IParser, IRecognitionException)
reportMatch(IParser)
}
type ErrorStrategy struct {
}
func (this *ErrorStrategy) reset(recognizer IParser){
func (this *ErrorStrategy) reset(recognizer IParser) {
}
func (this *ErrorStrategy) recoverInline(recognizer IParser){
func (this *ErrorStrategy) recoverInline(recognizer IParser) {
}
func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException){
func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
}
func (this *ErrorStrategy) sync(recognizer IParser){
func (this *ErrorStrategy) sync(recognizer IParser) {
}
func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser){
func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser) {
}
func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException){
func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException) {
}
func (this *ErrorStrategy) reportMatch(recognizer IParser) {
@ -46,45 +46,45 @@ func (this *ErrorStrategy) reportMatch(recognizer IParser) {
// error reporting and recovery in ANTLR parsers.
//
type DefaultErrorStrategy struct {
*ErrorStrategy
*ErrorStrategy
errorRecoveryMode bool
lastErrorIndex int
lastErrorStates *IntervalSet
errorRecoveryMode bool
lastErrorIndex int
lastErrorStates *IntervalSet
}
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
d := new(DefaultErrorStrategy)
d.InitDefaultErrorStrategy()
return d
d.InitDefaultErrorStrategy()
return d
}
func (d *DefaultErrorStrategy) InitDefaultErrorStrategy() {
// Indicates whether the error strategy is currently "recovering from an
// error". This is used to suppress reporting multiple error messages while
// attempting to recover from a detected syntax error.
//
// @see //inErrorRecoveryMode
//
d.errorRecoveryMode = false
// Indicates whether the error strategy is currently "recovering from an
// error". This is used to suppress reporting multiple error messages while
// attempting to recover from a detected syntax error.
//
// @see //inErrorRecoveryMode
//
d.errorRecoveryMode = false
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
// ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
d.lastErrorStates = nil
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
// ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
d.lastErrorStates = nil
}
// <p>The default implementation simply calls {@link //endErrorCondition} to
// ensure that the handler is not in error recovery mode.</p>
func (this *DefaultErrorStrategy) reset(recognizer IParser) {
this.endErrorCondition(recognizer)
this.endErrorCondition(recognizer)
}
//
@ -94,11 +94,11 @@ func (this *DefaultErrorStrategy) reset(recognizer IParser) {
// @param recognizer the parser instance
//
func (this *DefaultErrorStrategy) beginErrorCondition(recognizer IParser) {
this.errorRecoveryMode = true
this.errorRecoveryMode = true
}
func (this *DefaultErrorStrategy) inErrorRecoveryMode(recognizer IParser) bool {
return this.errorRecoveryMode
return this.errorRecoveryMode
}
//
@ -108,9 +108,9 @@ func (this *DefaultErrorStrategy) inErrorRecoveryMode(recognizer IParser) bool {
// @param recognizer
//
func (this *DefaultErrorStrategy) endErrorCondition(recognizer IParser) {
this.errorRecoveryMode = false
this.lastErrorStates = nil
this.lastErrorIndex = -1
this.errorRecoveryMode = false
this.lastErrorStates = nil
this.lastErrorIndex = -1
}
//
@ -119,7 +119,7 @@ func (this *DefaultErrorStrategy) endErrorCondition(recognizer IParser) {
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
//
func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) {
this.endErrorCondition(recognizer)
this.endErrorCondition(recognizer)
}
//
@ -142,25 +142,25 @@ func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) {
// </ul>
//
func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognitionException) {
// if we've already reported an error and have not matched a token
// yet successfully, don't report any errors.
if(this.inErrorRecoveryMode(recognizer)) {
return // don't report spurious errors
}
this.beginErrorCondition(recognizer)
// if we've already reported an error and have not matched a token
// yet successfully, don't report any errors.
if this.inErrorRecoveryMode(recognizer) {
return // don't report spurious errors
}
this.beginErrorCondition(recognizer)
switch t := e.(type) {
default:
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
// fmt.Println(e.stack)
recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e)
case *NoViableAltException:
this.reportNoViableAlternative(recognizer, t)
case *InputMismatchException:
this.reportInputMismatch(recognizer, t)
case *FailedPredicateException:
this.reportFailedPredicate(recognizer, t)
}
switch t := e.(type) {
default:
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
// fmt.Println(e.stack)
recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e)
case *NoViableAltException:
this.reportNoViableAlternative(recognizer, t)
case *InputMismatchException:
this.reportInputMismatch(recognizer, t)
case *FailedPredicateException:
this.reportFailedPredicate(recognizer, t)
}
}
//
@ -172,21 +172,21 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition
//
func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
if (this.lastErrorIndex==recognizer.getInputStream().index() &&
this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState())) {
if this.lastErrorIndex == recognizer.getInputStream().index() &&
this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState()) {
// uh oh, another error at same token index and previously-visited
// state in ATN must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop this is a failsafe.
recognizer.consume()
}
this.lastErrorIndex = recognizer.getInputStream().index()
if (this.lastErrorStates == nil) {
this.lastErrorStates = NewIntervalSet()
}
this.lastErrorStates.addOne(recognizer.getState())
var followSet = this.getErrorRecoverySet(recognizer)
this.consumeUntil(recognizer, followSet)
}
this.lastErrorIndex = recognizer.getInputStream().index()
if this.lastErrorStates == nil {
this.lastErrorStates = NewIntervalSet()
}
this.lastErrorStates.addOne(recognizer.getState())
var followSet = this.getErrorRecoverySet(recognizer)
this.consumeUntil(recognizer, followSet)
}
// The default implementation of {@link ANTLRErrorStrategy//sync} makes sure
@ -235,43 +235,43 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce
// functionality by simply overriding this method as a blank { }.</p>
//
func (this *DefaultErrorStrategy) sync(recognizer IParser) {
// If already recovering, don't try to sync
if (this.inErrorRecoveryMode(recognizer)) {
return
}
var s = recognizer.getInterpreter().atn.states[recognizer.getState()]
var la = recognizer.getTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off
if (la==TokenEOF || recognizer.getATN().nextTokens(s,nil).contains(la)) {
return
}
// Return but don't end recovery. only do that upon valid token match
if(recognizer.isExpectedToken(la)) {
return
}
switch (s.getStateType()) {
case ATNStateBLOCK_START:
case ATNStateSTAR_BLOCK_START:
case ATNStatePLUS_BLOCK_START:
case ATNStateSTAR_LOOP_ENTRY:
// report error and recover if possible
if( this.singleTokenDeletion(recognizer) != nil) {
return
} else {
panic(NewInputMismatchException(recognizer))
}
break
case ATNStatePLUS_LOOP_BACK:
case ATNStateSTAR_LOOP_BACK:
this.reportUnwantedToken(recognizer)
var expecting = NewIntervalSet()
expecting.addSet(recognizer.getExpectedTokens())
var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer))
this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
break
default:
// do nothing if we can't identify the exact kind of ATN state
}
// If already recovering, don't try to sync
if this.inErrorRecoveryMode(recognizer) {
return
}
var s = recognizer.getInterpreter().atn.states[recognizer.getState()]
var la = recognizer.getTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off
if la == TokenEOF || recognizer.getATN().nextTokens(s, nil).contains(la) {
return
}
// Return but don't end recovery. only do that upon valid token match
if recognizer.isExpectedToken(la) {
return
}
switch s.getStateType() {
case ATNStateBLOCK_START:
case ATNStateSTAR_BLOCK_START:
case ATNStatePLUS_BLOCK_START:
case ATNStateSTAR_LOOP_ENTRY:
// report error and recover if possible
if this.singleTokenDeletion(recognizer) != nil {
return
} else {
panic(NewInputMismatchException(recognizer))
}
break
case ATNStatePLUS_LOOP_BACK:
case ATNStateSTAR_LOOP_BACK:
this.reportUnwantedToken(recognizer)
var expecting = NewIntervalSet()
expecting.addSet(recognizer.getExpectedTokens())
var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer))
this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
break
default:
// do nothing if we can't identify the exact kind of ATN state
}
}
// This is called by {@link //reportError} when the exception is a
@ -283,19 +283,19 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) {
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, e *NoViableAltException) {
var tokens = recognizer.getTokenStream()
var input string
if(tokens != nil) {
if (e.startToken.tokenType==TokenEOF) {
input = "<EOF>"
} else {
input = tokens.getTextFromTokens(e.startToken, e.offendingToken)
}
} else {
input = "<unknown input>"
}
var msg = "no viable alternative at input " + this.escapeWSAndQuote(input)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
var tokens = recognizer.getTokenStream()
var input string
if tokens != nil {
if e.startToken.tokenType == TokenEOF {
input = "<EOF>"
} else {
input = tokens.getTextFromTokens(e.startToken, e.offendingToken)
}
} else {
input = "<unknown input>"
}
var msg = "no viable alternative at input " + this.escapeWSAndQuote(input)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
}
//
@ -308,9 +308,9 @@ func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser,
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *InputMismatchException) {
var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
}
//
@ -323,9 +323,9 @@ func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *Inp
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *FailedPredicateException) {
var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()]
var msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()]
var msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
}
// This method is called to report a syntax error which requires the removal
@ -346,17 +346,18 @@ func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *F
// @param recognizer the parser instance
//
func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
if (this.inErrorRecoveryMode(recognizer)) {
return
}
this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken()
var tokenName = this.getTokenErrorDisplay(t)
var expecting = this.getExpectedTokens(recognizer)
var msg = "extraneous input " + tokenName + " expecting " +
expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, t, nil)
if this.inErrorRecoveryMode(recognizer) {
return
}
this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken()
var tokenName = this.getTokenErrorDisplay(t)
var expecting = this.getExpectedTokens(recognizer)
var msg = "extraneous input " + tokenName + " expecting " +
expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, t, nil)
}
// This method is called to report a syntax error which requires the
// insertion of a missing token into the input stream. At the time this
// method is called, the missing token has not yet been inserted. When this
@ -374,15 +375,15 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
// @param recognizer the parser instance
//
func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
if ( this.inErrorRecoveryMode(recognizer)) {
return
}
this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer)
var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) +
" at " + this.getTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, nil)
if this.inErrorRecoveryMode(recognizer) {
return
}
this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer)
var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) +
" at " + this.getTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, nil)
}
// <p>The default implementation attempts to recover from the mismatched input
@ -435,20 +436,20 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
//
func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token {
// SINGLE TOKEN DELETION
var matchedSymbol = this.singleTokenDeletion(recognizer)
if (matchedSymbol != nil) {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.consume()
return matchedSymbol
}
// SINGLE TOKEN INSERTION
if (this.singleTokenInsertion(recognizer)) {
return this.getMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
panic(NewInputMismatchException(recognizer))
// SINGLE TOKEN DELETION
var matchedSymbol = this.singleTokenDeletion(recognizer)
if matchedSymbol != nil {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.consume()
return matchedSymbol
}
// SINGLE TOKEN INSERTION
if this.singleTokenInsertion(recognizer) {
return this.getMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
panic(NewInputMismatchException(recognizer))
}
//
@ -469,20 +470,20 @@ func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token {
// strategy for the current mismatched input, otherwise {@code false}
//
func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool {
var currentSymbolType = recognizer.getTokenStream().LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token
var atn = recognizer.getInterpreter().atn
var currentState = atn.states[recognizer.getState()]
var next = currentState.getTransitions()[0].getTarget()
var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext())
if (expectingAtLL2.contains(currentSymbolType) ){
this.reportMissingToken(recognizer)
return true
} else {
return false
}
var currentSymbolType = recognizer.getTokenStream().LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token
var atn = recognizer.getInterpreter().atn
var currentState = atn.states[recognizer.getState()]
var next = currentState.getTransitions()[0].getTarget()
var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext())
if expectingAtLL2.contains(currentSymbolType) {
this.reportMissingToken(recognizer)
return true
} else {
return false
}
}
// This method implements the single-token deletion inline error recovery
@ -504,22 +505,22 @@ func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool
// {@code nil}
//
func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token {
var nextTokenType = recognizer.getTokenStream().LA(2)
var expecting = this.getExpectedTokens(recognizer)
if (expecting.contains(nextTokenType)) {
this.reportUnwantedToken(recognizer)
// print("recoverFromMismatchedToken deleting " \
// + str(recognizer.getTokenStream().LT(1)) \
// + " since " + str(recognizer.getTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr)
recognizer.consume() // simply delete extra token
// we want to return the token we're actually matching
var matchedSymbol = recognizer.getCurrentToken()
this.reportMatch(recognizer) // we know current token is correct
return matchedSymbol
} else {
return nil
}
var nextTokenType = recognizer.getTokenStream().LA(2)
var expecting = this.getExpectedTokens(recognizer)
if expecting.contains(nextTokenType) {
this.reportUnwantedToken(recognizer)
// print("recoverFromMismatchedToken deleting " \
// + str(recognizer.getTokenStream().LT(1)) \
// + " since " + str(recognizer.getTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr)
recognizer.consume() // simply delete extra token
// we want to return the token we're actually matching
var matchedSymbol = recognizer.getCurrentToken()
this.reportMatch(recognizer) // we know current token is correct
return matchedSymbol
} else {
return nil
}
}
// Conjure up a missing token during error recovery.
@ -542,27 +543,27 @@ func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token
// override this method to create the appropriate tokens.
//
func (this *DefaultErrorStrategy) getMissingSymbol(recognizer IParser) *Token {
var currentSymbol = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer)
var expectedTokenType = expecting.first()
var tokenText string
if (expectedTokenType==TokenEOF) {
tokenText = "<missing EOF>"
} else {
tokenText = "<missing " + recognizer.getLiteralNames()[expectedTokenType] + ">"
}
var current = currentSymbol
var lookback = recognizer.getTokenStream().LT(-1)
if (current.tokenType==TokenEOF && lookback != nil) {
current = lookback
}
var currentSymbol = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer)
var expectedTokenType = expecting.first()
var tokenText string
if expectedTokenType == TokenEOF {
tokenText = "<missing EOF>"
} else {
tokenText = "<missing " + recognizer.getLiteralNames()[expectedTokenType] + ">"
}
var current = currentSymbol
var lookback = recognizer.getTokenStream().LT(-1)
if current.tokenType == TokenEOF && lookback != nil {
current = lookback
}
tf := recognizer.getTokenFactory()
return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column)
tf := recognizer.getTokenFactory()
return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column)
}
func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *IntervalSet {
return recognizer.getExpectedTokens()
return recognizer.getExpectedTokens()
}
// How should a token be displayed in an error message? The default
@ -574,25 +575,25 @@ func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *Interva
// so that it creates a NewJava type.
//
func (this *DefaultErrorStrategy) getTokenErrorDisplay(t *Token) string {
if (t == nil) {
return "<no token>"
}
var s = t.text()
if (s == "") {
if (t.tokenType==TokenEOF) {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.tokenType) + ">"
}
}
return this.escapeWSAndQuote(s)
if t == nil {
return "<no token>"
}
var s = t.text()
if s == "" {
if t.tokenType == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.tokenType) + ">"
}
}
return this.escapeWSAndQuote(s)
}
func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
s = strings.Replace(s,"\t","\\t", -1)
s = strings.Replace(s,"\n","\\n", -1)
s = strings.Replace(s,"\r","\\r", -1)
return "'" + s + "'"
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
}
// Compute the error recovery set for the current rule. During
@ -688,28 +689,28 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// at run-time upon error to avoid overhead during parsing.
//
func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *IntervalSet {
var atn = recognizer.getInterpreter().atn
var ctx = recognizer.getParserRuleContext()
var recoverSet = NewIntervalSet()
for (ctx != nil && ctx.getInvokingState()>=0) {
// compute what follows who invoked us
var invokingState = atn.states[ctx.getInvokingState()]
var rt = invokingState.getTransitions()[0]
var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow)
ctx = ctx.getParent().(IParserRuleContext)
}
recoverSet.removeOne(TokenEpsilon)
return recoverSet
var atn = recognizer.getInterpreter().atn
var ctx = recognizer.getParserRuleContext()
var recoverSet = NewIntervalSet()
for ctx != nil && ctx.getInvokingState() >= 0 {
// compute what follows who invoked us
var invokingState = atn.states[ctx.getInvokingState()]
var rt = invokingState.getTransitions()[0]
var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow)
ctx = ctx.getParent().(IParserRuleContext)
}
recoverSet.removeOne(TokenEpsilon)
return recoverSet
}
// Consume tokens until one matches the given token set.//
func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *IntervalSet) {
var ttype = recognizer.getTokenStream().LA(1)
for( ttype != TokenEOF && !set.contains(ttype)) {
recognizer.consume()
ttype = recognizer.getTokenStream().LA(1)
}
var ttype = recognizer.getTokenStream().LA(1)
for ttype != TokenEOF && !set.contains(ttype) {
recognizer.consume()
ttype = recognizer.getTokenStream().LA(1)
}
}
//
@ -741,13 +742,13 @@ func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *Interval
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
type BailErrorStrategy struct {
DefaultErrorStrategy
DefaultErrorStrategy
}
func NewBailErrorStrategy() *BailErrorStrategy {
this := new(BailErrorStrategy)
this.InitDefaultErrorStrategy()
this := new(BailErrorStrategy)
this.InitDefaultErrorStrategy()
return this
}
@ -758,24 +759,22 @@ func NewBailErrorStrategy() *BailErrorStrategy {
// original {@link RecognitionException}.
//
func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
var context = recognizer.getParserRuleContext()
for (context != nil) {
context.setException(e)
context = context.getParent().(IParserRuleContext)
}
panic(NewParseCancellationException()) // TODO we don't emit e properly
var context = recognizer.getParserRuleContext()
for context != nil {
context.setException(e)
context = context.getParent().(IParserRuleContext)
}
panic(NewParseCancellationException()) // TODO we don't emit e properly
}
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
//
func (this *BailErrorStrategy) recoverInline(recognizer IParser) {
this.recover(recognizer, NewInputMismatchException(recognizer))
this.recover(recognizer, NewInputMismatchException(recognizer))
}
// Make sure we don't attempt to recover from problems in subrules.//
func (this *BailErrorStrategy) sync(recognizer IParser) {
// pass
// pass
}

View File

@ -1,7 +1,6 @@
package antlr4
import (
)
import ()
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
// 3 kinds of errors: prediction errors, failed predicate errors, and
@ -9,42 +8,39 @@ import (
// in the input, where it is in the ATN, the rule invocation stack,
// and what kind of problem occurred.
type IRecognitionException interface {
getOffendingToken() *Token
getMessage() string
}
type RecognitionException struct {
message string
recognizer IRecognizer
message string
recognizer IRecognizer
offendingToken *Token
offendingState int
ctx IRuleContext
input CharStream
ctx IRuleContext
input CharStream
}
func NewRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext) *RecognitionException {
// todo
// Error.call(this)
//
// if (!!Error.captureStackTrace) {
// Error.captureStackTrace(this, RecognitionException)
// } else {
// var stack = NewError().stack
// }
// todo
// Error.call(this)
//
// if (!!Error.captureStackTrace) {
// Error.captureStackTrace(this, RecognitionException)
// } else {
// var stack = NewError().stack
// }
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
t := new(RecognitionException)
t.InitRecognitionException(message, recognizer, input, ctx)
return t
return t
}
func (t *RecognitionException) InitRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext){
func (t *RecognitionException) InitRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext) {
t.message = message
t.recognizer = recognizer
@ -60,7 +56,7 @@ func (t *RecognitionException) InitRecognitionException(message string, recogniz
// {@link DecisionState} number. For others, it is the state whose outgoing
// edge we couldn't match.
t.offendingState = -1
if (t.recognizer!=nil) {
if t.recognizer != nil {
t.offendingState = t.recognizer.getState()
}
}
@ -86,58 +82,52 @@ func (this *RecognitionException) getOffendingToken() *Token {
// state in the ATN, or {@code nil} if the information is not available.
// /
func (this *RecognitionException) getExpectedTokens() *IntervalSet {
if (this.recognizer!=nil) {
return this.recognizer.getATN().getExpectedTokens(this.offendingState, this.ctx)
} else {
return nil
}
if this.recognizer != nil {
return this.recognizer.getATN().getExpectedTokens(this.offendingState, this.ctx)
} else {
return nil
}
}
func (this *RecognitionException) toString() string {
return this.message
return this.message
}
type LexerNoViableAltException struct {
RecognitionException
startIndex int
startIndex int
deadEndConfigs *ATNConfigSet
}
func NewLexerNoViableAltException(lexer *Lexer, input CharStream, startIndex int,
deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
this := new (LexerNoViableAltException)
this := new(LexerNoViableAltException)
this.InitRecognitionException("", lexer, input, nil)
this.startIndex = startIndex
this.deadEndConfigs = deadEndConfigs
this.deadEndConfigs = deadEndConfigs
return this
return this
}
func (this *LexerNoViableAltException) toString() string {
var symbol = ""
if (this.startIndex >= 0 && this.startIndex < this.input.size()) {
symbol = this.input.getTextFromInterval(NewInterval(this.startIndex,this.startIndex))
}
return "LexerNoViableAltException" + symbol
var symbol = ""
if this.startIndex >= 0 && this.startIndex < this.input.size() {
symbol = this.input.getTextFromInterval(NewInterval(this.startIndex, this.startIndex))
}
return "LexerNoViableAltException" + symbol
}
type NoViableAltException struct {
RecognitionException
startToken *Token
startToken *Token
offendingToken *Token
ctx IParserRuleContext
ctx IParserRuleContext
deadEndConfigs *ATNConfigSet
}
// Indicates that the parser could not decide which of two or more paths
@ -147,34 +137,34 @@ type NoViableAltException struct {
//
func NewNoViableAltException(recognizer IParser, input CharStream, startToken *Token, offendingToken *Token, deadEndConfigs *ATNConfigSet, ctx IParserRuleContext) *NoViableAltException {
if (ctx == nil){
if ctx == nil {
ctx = recognizer.getParserRuleContext()
}
if (offendingToken == nil){
if offendingToken == nil {
offendingToken = recognizer.getCurrentToken()
}
if (startToken == nil){
if startToken == nil {
startToken = recognizer.getCurrentToken()
}
if (input == nil){
if input == nil {
input = recognizer.getInputStream()
}
this := new(NoViableAltException)
this.InitRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.index() that couldn't match
// Which configurations did we try at input.index() that couldn't match
// input.LT(1)?//
this.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might
// not be buffering tokens so get a reference to it. (At the
// time the error occurred, of course the stream needs to keep a
// buffer all of the tokens but later we might not have access to those.)
this.startToken = startToken
this.offendingToken = offendingToken
this.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might
// not be buffering tokens so get a reference to it. (At the
// time the error occurred, of course the stream needs to keep a
// buffer all of the tokens but later we might not have access to those.)
this.startToken = startToken
this.offendingToken = offendingToken
return this
}
@ -203,13 +193,11 @@ func NewInputMismatchException(recognizer IParser) *InputMismatchException {
// prediction.
type FailedPredicateException struct {
RecognitionException
ruleIndex int
ruleIndex int
predicateIndex int
predicate string
predicate string
}
func NewFailedPredicateException(recognizer *Parser, predicate string, message string) *FailedPredicateException {
@ -218,38 +206,34 @@ func NewFailedPredicateException(recognizer *Parser, predicate string, message s
this.InitRecognitionException(this.formatMessage(predicate, message), recognizer, recognizer.getInputStream(), recognizer._ctx)
var s = recognizer._interp.atn.states[recognizer.state]
var trans = s.getTransitions()[0]
if trans2, ok := trans.(*PredicateTransition); ok {
this.ruleIndex = trans2.ruleIndex
this.predicateIndex = trans2.predIndex
} else {
this.ruleIndex = 0
this.predicateIndex = 0
}
this.predicate = predicate
this.offendingToken = recognizer.getCurrentToken()
var s = recognizer.Interpreter.atn.states[recognizer.state]
var trans = s.getTransitions()[0]
if trans2, ok := trans.(*PredicateTransition); ok {
this.ruleIndex = trans2.ruleIndex
this.predicateIndex = trans2.predIndex
} else {
this.ruleIndex = 0
this.predicateIndex = 0
}
this.predicate = predicate
this.offendingToken = recognizer.getCurrentToken()
return this
return this
}
func (this *FailedPredicateException) formatMessage(predicate, message string) string {
if (message != "") {
return message
} else {
return "failed predicate: {" + predicate + "}?"
}
if message != "" {
return message
} else {
return "failed predicate: {" + predicate + "}?"
}
}
type ParseCancellationException struct {
}
func NewParseCancellationException() *ParseCancellationException {
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
return new(ParseCancellationException)
}

View File

@ -2,8 +2,8 @@ package antlr4
import (
"bytes"
"os"
"io"
"os"
)
//
@ -25,7 +25,4 @@ func NewFileStream(fileName string) {
io.Copy(buf, f) // Error handling elided for brevity.
f.Close()
}

View File

@ -1,10 +1,10 @@
package antlr4
type InputStream struct {
name string
index int
data []rune
size int
name string
index int
data []rune
size int
}
func NewInputStream(data string) *InputStream {
@ -73,7 +73,7 @@ func (is *InputStream) getText(start int, stop int) string {
if start >= is.size {
return ""
} else {
return string(is.data[start:stop+1])
return string(is.data[start : stop+1])
}
}

View File

@ -1,7 +1,6 @@
package antlr4
type IntStream interface {
consume()
LA(int) int
mark() int
@ -10,5 +9,4 @@ type IntStream interface {
seek(index int)
size() int
getSourceName() string
}

View File

@ -1,17 +1,17 @@
package antlr4
import (
"strings"
"strconv"
"strings"
)
type Interval struct {
start int
stop int
stop int
}
/* stop is not included! */
func NewInterval(start, stop int) *Interval{
func NewInterval(start, stop int) *Interval {
i := new(Interval)
i.start = start
@ -24,7 +24,7 @@ func (i *Interval) contains(item int) bool {
}
func (i *Interval) toString() string {
if(i.start==i.stop-1) {
if i.start == i.stop-1 {
return strconv.Itoa(i.start)
} else {
return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1)
@ -37,7 +37,7 @@ func (i *Interval) length() int {
type IntervalSet struct {
intervals []*Interval
readOnly bool
readOnly bool
}
func NewIntervalSet() *IntervalSet {
@ -51,7 +51,7 @@ func NewIntervalSet() *IntervalSet {
}
func (i *IntervalSet) first() int {
if (len(i.intervals)==0) {
if len(i.intervals) == 0 {
return TokenInvalidType
} else {
return i.intervals[0].start
@ -59,30 +59,30 @@ func (i *IntervalSet) first() int {
}
func (i *IntervalSet) addOne(v int) {
i.addInterval(NewInterval(v, v + 1))
i.addInterval(NewInterval(v, v+1))
}
func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h + 1))
i.addInterval(NewInterval(l, h+1))
}
func (is *IntervalSet) addInterval(v *Interval) {
if (is.intervals == nil) {
if is.intervals == nil {
is.intervals = make([]*Interval, 0)
is.intervals = append( is.intervals, v )
is.intervals = append(is.intervals, v)
} else {
// find insert pos
for k := 0; k < len(is.intervals); k++ {
var i = is.intervals[k]
// distinct range -> insert
if (v.stop < i.start) {
if v.stop < i.start {
// is.intervals = splice(k, 0, v)
is.intervals = append(is.intervals[0:k], append([]*Interval{v}, is.intervals[k:]...)...)
return
} else if (v.stop == i.start) {
} else if v.stop == i.start {
is.intervals[k].start = v.start
return
} else if (v.start <= i.stop) {
} else if v.start <= i.stop {
is.intervals[k] = NewInterval(intMin(i.start, v.start), intMax(i.stop, v.stop))
is.reduce(k)
return
@ -94,7 +94,7 @@ func (is *IntervalSet) addInterval(v *Interval) {
}
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
if (other.intervals != nil) {
if other.intervals != nil {
for k := 0; k < len(other.intervals); k++ {
var i2 = other.intervals[k]
i.addInterval(NewInterval(i2.start, i2.stop))
@ -105,35 +105,35 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
func (i *IntervalSet) reduce(k int) {
// only need to reduce if k is not the last
if (k < len(i.intervals) - 1) {
if k < len(i.intervals)-1 {
var l = i.intervals[k]
var r = i.intervals[k + 1]
var r = i.intervals[k+1]
// if r contained in l
if (l.stop >= r.stop) {
i.intervals = i.intervals[0:len(i.intervals)-1] // pop(k + 1)
if l.stop >= r.stop {
i.intervals = i.intervals[0 : len(i.intervals)-1] // pop(k + 1)
i.reduce(k)
} else if (l.stop >= r.start) {
} else if l.stop >= r.start {
i.intervals[k] = NewInterval(l.start, r.stop)
i.intervals = i.intervals[0:len(i.intervals)-1] // i.intervals.pop(k + 1)
i.intervals = i.intervals[0 : len(i.intervals)-1] // i.intervals.pop(k + 1)
}
}
}
func (is *IntervalSet) complement(start int, stop int) *IntervalSet {
var result = NewIntervalSet()
result.addInterval(NewInterval(start,stop+1))
for i := 0; i< len(is.intervals); i++ {
result.removeRange(is.intervals[i])
}
return result
var result = NewIntervalSet()
result.addInterval(NewInterval(start, stop+1))
for i := 0; i < len(is.intervals); i++ {
result.removeRange(is.intervals[i])
}
return result
}
func (i *IntervalSet) contains(item int) bool {
if (i.intervals == nil) {
if i.intervals == nil {
return false
} else {
for k := 0; k < len(i.intervals); k++ {
if(i.intervals[k].contains(item)) {
if i.intervals[k].contains(item) {
return true
}
}
@ -144,7 +144,7 @@ func (i *IntervalSet) contains(item int) bool {
func (is *IntervalSet) length() int {
len := 0
for _,v := range is.intervals {
for _, v := range is.intervals {
len += v.length()
}
@ -152,74 +152,74 @@ func (is *IntervalSet) length() int {
}
func (is *IntervalSet) removeRange(v *Interval) {
if v.start==v.stop-1 {
is.removeOne(v.start)
} else if (is.intervals!=nil) {
k:= 0
for n :=0; n<len( is.intervals ); n++ {
var i = is.intervals[k]
// intervals are ordered
if (v.stop<=i.start) {
return
} else if(v.start>i.start && v.stop<i.stop) {
is.intervals[k] = NewInterval(i.start, v.start)
var x = NewInterval(v.stop, i.stop)
if v.start == v.stop-1 {
is.removeOne(v.start)
} else if is.intervals != nil {
k := 0
for n := 0; n < len(is.intervals); n++ {
var i = is.intervals[k]
// intervals are ordered
if v.stop <= i.start {
return
} else if v.start > i.start && v.stop < i.stop {
is.intervals[k] = NewInterval(i.start, v.start)
var x = NewInterval(v.stop, i.stop)
// is.intervals.splice(k, 0, x)
is.intervals = append(is.intervals[0:k], append([]*Interval{x}, is.intervals[k:]...)...)
return
} else if(v.start<=i.start && v.stop>=i.stop) {
// is.intervals.splice(k, 1)
return
} else if v.start <= i.start && v.stop >= i.stop {
// is.intervals.splice(k, 1)
is.intervals = append(is.intervals[0:k], is.intervals[k+1:]...)
k = k - 1 // need another pass
} else if(v.start<i.stop) {
is.intervals[k] = NewInterval(i.start, v.start)
} else if(v.stop<i.stop) {
is.intervals[k] = NewInterval(v.stop, i.stop)
}
k += 1
}
}
k = k - 1 // need another pass
} else if v.start < i.stop {
is.intervals[k] = NewInterval(i.start, v.start)
} else if v.stop < i.stop {
is.intervals[k] = NewInterval(v.stop, i.stop)
}
k += 1
}
}
}
func (is *IntervalSet) removeOne(v int) {
if (is.intervals != nil) {
if is.intervals != nil {
for k := 0; k < len(is.intervals); k++ {
var i = is.intervals[k];
var i = is.intervals[k]
// intervals is ordered
if (v < i.start) {
return;
} else if (v == i.start && v == i.stop - 1) {
// is.intervals.splice(k, 1);
if v < i.start {
return
} else if v == i.start && v == i.stop-1 {
// is.intervals.splice(k, 1);
is.intervals = append(is.intervals[0:k], is.intervals[k+1:]...)
return;
} else if (v == i.start) {
is.intervals[k] = NewInterval(i.start + 1, i.stop);
return;
} else if (v == i.stop - 1) {
is.intervals[k] = NewInterval(i.start, i.stop - 1);
return;
} else if (v < i.stop - 1) {
var x = NewInterval(i.start, v);
i.start = v + 1;
// is.intervals.splice(k, 0, x);
return
} else if v == i.start {
is.intervals[k] = NewInterval(i.start+1, i.stop)
return
} else if v == i.stop-1 {
is.intervals[k] = NewInterval(i.start, i.stop-1)
return
} else if v < i.stop-1 {
var x = NewInterval(i.start, v)
i.start = v + 1
// is.intervals.splice(k, 0, x);
is.intervals = append(is.intervals[0:k], append([]*Interval{x}, is.intervals[k:]...)...)
return;
return
}
}
}
}
func (i *IntervalSet) toString() string {
return i.toStringVerbose(nil,nil,false)
return i.toStringVerbose(nil, nil, false)
}
func (i *IntervalSet) toStringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
if (i.intervals == nil) {
if i.intervals == nil {
return "{}"
} else if(literalNames!=nil || symbolicNames!=nil) {
} else if literalNames != nil || symbolicNames != nil {
return i.toTokenString(literalNames, symbolicNames)
} else if(elemsAreChar) {
} else if elemsAreChar {
return i.toCharString()
} else {
return i.toIndexString()
@ -229,76 +229,71 @@ func (i *IntervalSet) toStringVerbose(literalNames []string, symbolicNames []str
func (is *IntervalSet) toCharString() string {
var names = make([]string, len(is.intervals))
for i := 0; i < len( is.intervals ); i++ {
for i := 0; i < len(is.intervals); i++ {
var v = is.intervals[i]
if(v.stop==v.start+1) {
if ( v.start== TokenEOF ) {
if v.stop == v.start+1 {
if v.start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, ("'" + string(v.start) + "'"))
}
} else {
names = append(names, "'" + string(v.start) + "'..'" + string(v.stop-1) + "'")
names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'")
}
}
if (len(names) > 1) {
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
} else {
return names[0]
}
}
func (is *IntervalSet) toIndexString() string {
var names = make([]string, 0)
for i := 0; i < len( is.intervals ); i++ {
for i := 0; i < len(is.intervals); i++ {
var v = is.intervals[i]
if(v.stop==v.start+1) {
if ( v.start==TokenEOF ) {
names = append( names, "<EOF>")
if v.stop == v.start+1 {
if v.start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append( names, string(v.start))
names = append(names, string(v.start))
}
} else {
names = append( names, string(v.start) + ".." + string(v.stop-1))
names = append(names, string(v.start)+".."+string(v.stop-1))
}
}
if (len(names) > 1) {
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
} else {
return names[0]
}
}
func (is *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
var names = make([]string, 0)
for i := 0; i < len( is.intervals ); i++ {
for i := 0; i < len(is.intervals); i++ {
var v = is.intervals[i]
for j := v.start; j < v.stop; j++ {
names = append(names, is.elementName(literalNames, symbolicNames, j))
}
}
if (len(names) > 1) {
return "{" + strings.Join(names,", ") + "}"
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
} else {
return names[0]
}
}
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
if (a == TokenEOF) {
if a == TokenEOF {
return "<EOF>"
} else if (a == TokenEpsilon) {
} else if a == TokenEpsilon {
return "<EPSILON>"
} else {
if (literalNames[a] != ""){
if literalNames[a] != "" {
return literalNames[a]
} else {
return symbolicNames[a]
}
}
}

View File

@ -1,23 +1,22 @@
package antlr4
import (
)
import ()
type LL1Analyzer struct {
atn *ATN
atn *ATN
}
func NewLL1Analyzer (atn *ATN) *LL1Analyzer {
la := new(LL1Analyzer)
la.atn = atn
return la
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
la := new(LL1Analyzer)
la.atn = atn
return la
}
//* Special value added to the lookahead sets to indicate that we hit
// a predicate during analysis if {@code seeThruPreds==false}.
///
const (
LL1AnalyzerHIT_PRED = TokenInvalidType
LL1AnalyzerHIT_PRED = TokenInvalidType
)
//*
@ -30,23 +29,23 @@ const (
// @param s the ATN state
// @return the expected symbols for each outgoing transition of {@code s}.
func (la *LL1Analyzer) getDecisionLookahead(s IATNState) []*IntervalSet {
if (s == nil) {
return nil
}
var count = len(s.getTransitions())
var look = make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
var lookBusy = NewSet(nil,nil)
var seeThruPreds = false // fail to get lookahead upon pred
la._LOOK(s.getTransitions()[alt].getTarget(), nil, PredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if (look[alt].length()==0 || look[alt].contains(LL1AnalyzerHIT_PRED)) {
look[alt] = nil
}
}
return look
if s == nil {
return nil
}
var count = len(s.getTransitions())
var look = make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
var lookBusy = NewSet(nil, nil)
var seeThruPreds = false // fail to get lookahead upon pred
la._LOOK(s.getTransitions()[alt].getTarget(), nil, PredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHIT_PRED) {
look[alt] = nil
}
}
return look
}
//*
@ -68,16 +67,16 @@ func (la *LL1Analyzer) getDecisionLookahead(s IATNState) []*IntervalSet {
// specified {@code ctx}.
///
func (la *LL1Analyzer) LOOK(s, stopState IATNState, ctx IRuleContext) *IntervalSet {
var r = NewIntervalSet()
var seeThruPreds = true // ignore preds get all lookahead
var lookContext IPredictionContext
if (ctx != nil){
predictionContextFromRuleContext(s.getATN(), ctx)
}
la._LOOK(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
return r
var r = NewIntervalSet()
var seeThruPreds = true // ignore preds get all lookahead
var lookContext IPredictionContext
if ctx != nil {
predictionContextFromRuleContext(s.getATN(), ctx)
}
la._LOOK(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
return r
}
//*
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
@ -108,98 +107,97 @@ func (la *LL1Analyzer) LOOK(s, stopState IATNState, ctx IRuleContext) *IntervalS
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
c := NewATNConfig6(s, 0, ctx)
c := NewATNConfig6(s, 0, ctx)
if lookBusy.add(c) == nil {
return
}
if lookBusy.add(c) == nil {
return
}
if (s == stopState) {
if (ctx == nil) {
look.addOne(TokenEpsilon)
return
} else if (ctx.isEmpty() && addEOF) {
look.addOne(TokenEOF)
return
}
}
if s == stopState {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
}
_,ok := s.(*RuleStopState)
_, ok := s.(*RuleStopState)
if ok {
if ( ctx==nil ) {
look.addOne(TokenEpsilon)
return
} else if (ctx.isEmpty() && addEOF) {
look.addOne(TokenEOF)
return
}
if ok {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
if ( ctx != PredictionContextEMPTY ) {
if ctx != PredictionContextEMPTY {
// run thru all possible stack tops in ctx
for i := 0; i < ctx.length(); i++ {
// run thru all possible stack tops in ctx
for i := 0; i < ctx.length(); i++ {
returnState := la.atn.states[ctx.getReturnState(i)]
// System.out.println("popping back to "+retState)
returnState := la.atn.states[ctx.getReturnState(i)]
// System.out.println("popping back to "+retState)
removed := calledRuleStack.contains(returnState.getRuleIndex())
removed := calledRuleStack.contains(returnState.getRuleIndex())
// TODO this is incorrect
defer func(){
if (removed) {
calledRuleStack.add(returnState.getRuleIndex())
}
}()
// TODO this is incorrect
defer func() {
if removed {
calledRuleStack.add(returnState.getRuleIndex())
}
}()
calledRuleStack.clear(returnState.getRuleIndex())
la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
calledRuleStack.clear(returnState.getRuleIndex())
la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
return
}
}
}
return
}
}
n := len(s.getTransitions())
n := len(s.getTransitions())
for i:=0; i<n; i++ {
t := s.getTransitions()[i]
for i := 0; i < n; i++ {
t := s.getTransitions()[i]
if t1, ok := t.(*RuleTransition); ok {
if t1, ok := t.(*RuleTransition); ok {
if (calledRuleStack.contains(t1.getTarget().getRuleIndex())) {
continue
}
if calledRuleStack.contains(t1.getTarget().getRuleIndex()) {
continue
}
newContext := SingletonPredictionContextcreate(ctx, t1.followState.getStateNumber())
newContext := SingletonPredictionContextcreate(ctx, t1.followState.getStateNumber())
defer func(){
calledRuleStack.remove(t1.getTarget().getRuleIndex());
}()
defer func() {
calledRuleStack.remove(t1.getTarget().getRuleIndex())
}()
calledRuleStack.add(t1.getTarget().getRuleIndex())
la._LOOK(t.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if t2, ok := t.(*AbstractPredicateTransition); ok {
if ( seeThruPreds ) {
la._LOOK(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else {
look.addOne(LL1AnalyzerHIT_PRED)
}
} else if ( t.getIsEpsilon() ) {
la._LOOK(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if _, ok := t.(*WildcardTransition); ok {
look.addRange( TokenMinUserTokenType, la.atn.maxTokenType );
} else {
set := t.getLabel()
if (set != nil) {
if _, ok := t.(*NotSetTransition); ok {
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType);
}
look.addSet(set)
}
}
}
}
calledRuleStack.add(t1.getTarget().getRuleIndex())
la._LOOK(t.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if t2, ok := t.(*AbstractPredicateTransition); ok {
if seeThruPreds {
la._LOOK(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else {
look.addOne(LL1AnalyzerHIT_PRED)
}
} else if t.getIsEpsilon() {
la._LOOK(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if _, ok := t.(*WildcardTransition); ok {
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
} else {
set := t.getLabel()
if set != nil {
if _, ok := t.(*NotSetTransition); ok {
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
}
look.addSet(set)
}
}
}
}

View File

@ -20,27 +20,27 @@ type ILexer interface {
popMode() int
setType(int)
mode(int)
}
type Lexer struct {
Recognizer
_input CharStream
_factory TokenFactory
Interpreter *LexerATNSimulator
_input CharStream
_factory TokenFactory
_tokenFactorySourcePair *TokenSourceCharStreamPair
_interp *LexerATNSimulator
_token *Token
_tokenStartCharIndex int
_tokenStartLine int
_tokenStartColumn int
_hitEOF bool
_channel int
_type int
_modeStack IntStack
_mode int
_text *string
actionType int
_token *Token
_tokenStartCharIndex int
_tokenStartLine int
_tokenStartColumn int
_hitEOF bool
_channel int
_type int
_modeStack IntStack
_mode int
_text *string
actionType int
}
func NewLexer(input CharStream) *Lexer {
@ -53,13 +53,13 @@ func NewLexer(input CharStream) *Lexer {
return lexer
}
func (l *Lexer) InitLexer(input CharStream){
func (l *Lexer) InitLexer(input CharStream) {
l._input = input
l._factory = CommonTokenFactoryDEFAULT
l._tokenFactorySourcePair = &TokenSourceCharStreamPair{l, input}
l._interp = nil // child classes must populate l
l.Interpreter = nil // child classes must populate it
// The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to
@ -91,7 +91,7 @@ func (l *Lexer) InitLexer(input CharStream){
// The token type for the current token///
l._type = TokenInvalidType
l._modeStack = make([]int,0)
l._modeStack = make([]int, 0)
l._mode = LexerDefaultMode
// You can set the text for the current token to override what is in
@ -103,20 +103,20 @@ func (l *Lexer) InitLexer(input CharStream){
const (
LexerDefaultMode = 0
LexerMore = -2
LexerSkip = -3
LexerMore = -2
LexerSkip = -3
)
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
LexerMinCharValue = '\u0000'
LexerMaxCharValue = '\uFFFE'
LexerHidden = TokenHiddenChannel
LexerMinCharValue = '\u0000'
LexerMaxCharValue = '\uFFFE'
)
func (l *Lexer) reset() {
// wack Lexer state variables
if (l._input != nil) {
if l._input != nil {
l._input.seek(0) // rewind the input
}
l._token = nil
@ -131,7 +131,7 @@ func (l *Lexer) reset() {
l._mode = LexerDefaultMode
l._modeStack = make([]int, 0)
l._interp.reset()
l.Interpreter.reset()
}
func (l *Lexer) getInputStream() CharStream {
@ -142,7 +142,7 @@ func (l *Lexer) getSourceName() string {
return l._input.getSourceName()
}
func (l *Lexer) setChannel(v int){
func (l *Lexer) setChannel(v int) {
l._channel = v
}
@ -157,9 +157,9 @@ func (l *Lexer) setTokenFactory(f TokenFactory) {
func (l *Lexer) safeMatch() (ret int) {
// previously in catch block
defer func(){
defer func() {
if e := recover(); e != nil {
if re,ok := e.(IRecognitionException); ok {
if re, ok := e.(IRecognitionException); ok {
l.notifyListeners(re) // report error
l.recover(re)
ret = LexerSkip // default
@ -167,12 +167,12 @@ func (l *Lexer) safeMatch() (ret int) {
}
}()
return l._interp.match(l._input, l._mode)
return l.Interpreter.match(l._input, l._mode)
}
// Return a token from l source i.e., match a token on the char stream.
func (l *Lexer) nextToken() *Token {
if (l._input == nil) {
if l._input == nil {
panic("nextToken requires a non-nil input stream.")
}
@ -180,48 +180,48 @@ func (l *Lexer) nextToken() *Token {
var tokenStartMarker = l._input.mark()
// previously in finally block
defer func(){
defer func() {
// make sure we release marker after match or
// unbuffered char stream will keep buffering
l._input.release(tokenStartMarker)
}()
for (true) {
if (l._hitEOF) {
for true {
if l._hitEOF {
l.emitEOF()
return l._token
}
l._token = nil
l._channel = TokenDefaultChannel
l._tokenStartCharIndex = l._input.index()
l._tokenStartColumn = l._interp.column
l._tokenStartLine = l._interp.line
l._tokenStartColumn = l.Interpreter.column
l._tokenStartLine = l.Interpreter.line
l._text = nil
var continueOuter = false
for (true) {
for true {
l._type = TokenInvalidType
var ttype = LexerSkip
ttype = l.safeMatch()
if (l._input.LA(1) == TokenEOF) {
if l._input.LA(1) == TokenEOF {
l._hitEOF = true
}
if (l._type == TokenInvalidType) {
if l._type == TokenInvalidType {
l._type = ttype
}
if (l._type == LexerSkip) {
if l._type == LexerSkip {
continueOuter = true
break
}
if (l._type != LexerMore) {
if l._type != LexerMore {
break
}
}
if (continueOuter) {
if continueOuter {
continue
}
if (l._token == nil) {
if l._token == nil {
l.emit()
}
return l._token
@ -249,7 +249,7 @@ func (l *Lexer) mode(m int) {
}
func (l *Lexer) pushMode(m int) {
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
l._modeStack.Push(l._mode)
@ -257,10 +257,10 @@ func (l *Lexer) pushMode(m int) {
}
func (l *Lexer) popMode() int {
if ( len(l._modeStack) == 0) {
if len(l._modeStack) == 0 {
panic("Empty Stack")
}
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("popMode back to " + fmt.Sprint(l._modeStack[0:len(l._modeStack)-1]))
}
i, _ := l._modeStack.Pop()
@ -268,7 +268,6 @@ func (l *Lexer) popMode() int {
return l._mode
}
func (l *Lexer) inputStream() CharStream {
return l._input
}
@ -297,25 +296,25 @@ func (l *Lexer) emitToken(token *Token) {
// custom Token objects or provide a Newfactory.
// /
func (l *Lexer) emit() *Token {
var t = l._factory.create(l._tokenFactorySourcePair, l._type, *l._text, l._channel, l._tokenStartCharIndex, l.getCharIndex() - 1, l._tokenStartLine, l._tokenStartColumn)
var t = l._factory.create(l._tokenFactorySourcePair, l._type, *l._text, l._channel, l._tokenStartCharIndex, l.getCharIndex()-1, l._tokenStartLine, l._tokenStartColumn)
l.emitToken(t)
return t
}
func (l *Lexer) emitEOF() *Token {
cpos := l.getCharPositionInLine();
lpos := l.getLine();
var eof = l._factory.create(l._tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, l._input.index(), l._input.index() - 1, lpos, cpos)
cpos := l.getCharPositionInLine()
lpos := l.getLine()
var eof = l._factory.create(l._tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, l._input.index(), l._input.index()-1, lpos, cpos)
l.emitToken(eof)
return eof
}
func (l *Lexer) getCharPositionInLine() int {
return l._interp.column
return l.Interpreter.column
}
func (l *Lexer) getLine() int {
return l._interp.line
return l.Interpreter.line
}
func (l *Lexer) getType() int {
@ -334,10 +333,10 @@ func (l *Lexer) getCharIndex() int {
// Return the text matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text.
func (l *Lexer) text() string {
if (l._text != nil) {
if l._text != nil {
return *l._text
} else {
return l._interp.getText(l._input)
return l.Interpreter.getText(l._input)
}
}
@ -346,7 +345,7 @@ func (l *Lexer) setText(text string) {
}
func (this *Lexer) getATN() *ATN {
return this._interp.atn
return this.Interpreter.atn
}
// Return a list of all Token objects in input char stream.
@ -355,7 +354,7 @@ func (this *Lexer) getATN() *ATN {
func (l *Lexer) getAllTokens() []*Token {
var tokens = make([]*Token, 0)
var t = l.nextToken()
for (t.tokenType != TokenEOF) {
for t.tokenType != TokenEOF {
tokens = append(tokens, t)
t = l.nextToken()
}
@ -372,13 +371,13 @@ func (l *Lexer) notifyListeners(e IRecognitionException) {
}
func (l *Lexer) getErrorDisplayForChar(c rune) string {
if (c == TokenEOF) {
if c == TokenEOF {
return "<EOF>"
} else if (c == '\n') {
} else if c == '\n' {
return "\\n"
} else if (c == '\t') {
} else if c == '\t' {
return "\\t"
} else if (c == '\r') {
} else if c == '\r' {
return "\\r"
} else {
return string(c)
@ -395,15 +394,13 @@ func (l *Lexer) getCharErrorDisplay(c rune) string {
// to do sophisticated error recovery if you are in a fragment rule.
// /
func (l *Lexer) recover(re IRecognitionException) {
if (l._input.LA(1) != TokenEOF) {
if l._input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
// skip a char and try again
l._interp.consume(l._input)
l.Interpreter.consume(l._input)
} else {
// TODO: Do we lose character or line position information?
l._input.consume()
}
}
}

View File

@ -1,6 +1,7 @@
package antlr4
import (
"fmt"
"fmt"
"strconv"
)
@ -28,9 +29,9 @@ func resetSimState(sim *SimState) {
}
type SimState struct {
index int
line int
column int
index int
line int
column int
dfaState *DFAState
}
@ -49,17 +50,16 @@ func (this *SimState) reset() {
type LexerATNSimulator struct {
ATNSimulator
recog *Lexer
recog *Lexer
predictionMode int
decisionToDFA []*DFA
mergeCache DoubleDict
startIndex int
line int
column int
mode int
prevAccept *SimState
match_calls int
decisionToDFA []*DFA
mergeCache DoubleDict
startIndex int
line int
column int
mode int
prevAccept *SimState
match_calls int
}
func NewLexerATNSimulator(recog *Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
@ -109,14 +109,14 @@ func (this *LexerATNSimulator) match(input CharStream, mode int) int {
this.mode = mode
var mark = input.mark()
defer func(){
defer func() {
input.release(mark)
}()
this.startIndex = input.index()
this.prevAccept.reset()
var dfa = this.decisionToDFA[mode]
if (dfa.s0 == nil) {
if dfa.s0 == nil {
return this.matchATN(input)
} else {
return this.execATN(input, dfa.s0)
@ -134,7 +134,7 @@ func (this *LexerATNSimulator) reset() {
func (this *LexerATNSimulator) matchATN(input CharStream) int {
var startState = this.atn.modeToStartState[this.mode]
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("matchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString())
}
var old_mode = this.mode
@ -144,31 +144,31 @@ func (this *LexerATNSimulator) matchATN(input CharStream) int {
var next = this.addDFAState(s0_closure.ATNConfigSet)
if (!suppressEdge) {
if !suppressEdge {
this.decisionToDFA[this.mode].s0 = next
}
var predict = this.execATN(input, next)
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString())
}
return predict
}
func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("start state closure=" + ds0.configs.toString())
}
if (ds0.isAcceptState) {
if ds0.isAcceptState {
// allow zero-length tokens
this.captureSimState(this.prevAccept, input, ds0)
}
var t = input.LA(1)
var s = ds0 // s is current/from DFA state
for (true) { // while more work
if (LexerATNSimulatordebug) {
for true { // while more work
if LexerATNSimulatordebug {
fmt.Println("execATN loop starting closure: " + s.configs.toString())
}
@ -192,23 +192,23 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// print("Target for:" + str(s) + " and:" + str(t))
var target = this.getExistingTargetState(s, t)
// print("Existing:" + str(target))
if (target == nil) {
if target == nil {
target = this.computeTargetState(input, s, t)
// print("Computed:" + str(target))
}
if (target == ATNSimulatorERROR) {
if target == ATNSimulatorERROR {
break
}
// If this is a consumable input element, make sure to consume before
// capturing the accept state so the input index, line, and char
// position accurately reflect the state of the interpreter at the
// end of the token.
if (t != TokenEOF) {
if t != TokenEOF {
this.consume(input)
}
if (target.isAcceptState) {
if target.isAcceptState {
this.captureSimState(this.prevAccept, input, target)
if (t == TokenEOF) {
if t == TokenEOF {
break
}
}
@ -228,15 +228,15 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// {@code t}, or {@code nil} if the target state for this edge is not
// already cached
func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
if (s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE) {
if s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE {
return nil
}
var target = s.edges[t - LexerATNSimulatorMIN_DFA_EDGE]
if(target==nil) {
var target = s.edges[t-LexerATNSimulatorMIN_DFA_EDGE]
if target == nil {
target = nil
}
if (LexerATNSimulatordebug && target != nil) {
if LexerATNSimulatordebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
@ -258,8 +258,8 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState,
// Fill reach starting from closure, following t transitions
this.getReachableConfigSet(input, s.configs, reach.ATNConfigSet, t)
if (len( reach.configs) == 0) { // we got nowhere on t from s
if (!reach.hasSemanticContext) {
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out this knowledge it'd
// cause a failover from DFA later.
this.addDFAEdge(s, t, ATNSimulatorERROR, nil)
@ -272,14 +272,14 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState,
}
func (this *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
if (this.prevAccept.dfaState != nil) {
if this.prevAccept.dfaState != nil {
var lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
this.accept(input, lexerActionExecutor, this.startIndex,
prevAccept.index, prevAccept.line, prevAccept.column)
prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
} else {
// if no accept and EOF is first char, return EOF
if (t == TokenEOF && input.index() == this.startIndex) {
if t == TokenEOF && input.index() == this.startIndex {
return TokenEOF
}
panic(NewLexerNoViableAltException(this.recog, input, this.startIndex, reach))
@ -296,24 +296,24 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *
for i := 0; i < len(closure.configs); i++ {
var cfg = closure.configs[i]
var currentAltReachedAcceptState = (cfg.getAlt() == skipAlt)
if (currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision) {
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
continue
}
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Printf("testing %s at %s\n", this.getTokenName(t), cfg.toString()) // this.recog, true))
}
for j := 0; j < len(cfg.getState().getTransitions()); j++ {
var trans = cfg.getState().getTransitions()[j] // for each transition
var target = this.getReachableTarget(trans, t)
if (target != nil) {
if target != nil {
var lexerActionExecutor = cfg.(*LexerATNConfig).lexerActionExecutor
if (lexerActionExecutor != nil) {
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index() - this.startIndex)
}
var treatEofAsEpsilon = (t == TokenEOF)
var config = NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
if (this.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEofAsEpsilon)) {
if this.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEofAsEpsilon) {
// any remaining configs for this alt have a lower priority
// than the one that just reached an accept state.
skipAlt = cfg.getAlt()
@ -324,32 +324,32 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *
}
func (this *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("ACTION %s\n", lexerActionExecutor)
}
// seek to after last char in token
input.seek(index)
this.line = line
this.column = charPos
if (lexerActionExecutor != nil && this.recog != nil) {
if lexerActionExecutor != nil && this.recog != nil {
lexerActionExecutor.execute(this.recog, input, startIndex)
}
}
func (this *LexerATNSimulator) getReachableTarget(trans ITransition, t int) IATNState {
if (trans.matches(t, 0, 0xFFFE)) {
if trans.matches(t, 0, 0xFFFE) {
return trans.getTarget()
} else {
return nil
}
}
func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState ) *OrderedATNConfigSet {
func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState) *OrderedATNConfigSet {
var configs = NewOrderedATNConfigSet()
for i := 0; i < len(p.getTransitions()); i++ {
var target = p.getTransitions()[i].getTarget()
var cfg = NewLexerATNConfig6(target, i+1, PredictionContextEMPTY)
var cfg = NewLexerATNConfig6(target, i+1, PredictionContextEMPTY)
this.closure(input, cfg, configs.ATNConfigSet, false, false, false)
}
return configs
@ -366,21 +366,21 @@ func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState )
func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs *ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEofAsEpsilon bool) bool {
if (LexerATNSimulatordebug) {
fmt.Println("closure(" + config.toString() + ")") // config.toString(this.recog, true) + ")")
if LexerATNSimulatordebug {
fmt.Println("closure(" + config.toString() + ")") // config.toString(this.recog, true) + ")")
}
_, ok :=config.state.(*RuleStopState)
if (ok) {
if (LexerATNSimulatordebug) {
if (this.recog != nil) {
_, ok := config.state.(*RuleStopState)
if ok {
if LexerATNSimulatordebug {
if this.recog != nil {
fmt.Println("closure at %s rule stop %s\n", this.recog.getRuleNames()[config.state.getRuleIndex()], config)
} else {
fmt.Println("closure at rule stop %s\n", config)
}
}
if (config.context == nil || config.context.hasEmptyPath()) {
if (config.context == nil || config.context.isEmpty()) {
if config.context == nil || config.context.hasEmptyPath() {
if config.context == nil || config.context.isEmpty() {
configs.add(config, nil)
return true
} else {
@ -388,9 +388,9 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig,
currentAltReachedAcceptState = true
}
}
if (config.context != nil && !config.context.isEmpty()) {
if config.context != nil && !config.context.isEmpty() {
for i := 0; i < config.context.length(); i++ {
if (config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE) {
if config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE {
var newContext = config.context.getParent(i) // "pop" return state
var returnState = this.atn.states[config.context.getReturnState(i)]
cfg := NewLexerATNConfig2(config, returnState, newContext)
@ -401,17 +401,17 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig,
return currentAltReachedAcceptState
}
// optimization
if (!config.state.getEpsilonOnlyTransitions()) {
if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) {
if !config.state.getEpsilonOnlyTransitions() {
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
configs.add(config, nil)
}
}
for j := 0; j < len(config.state.getTransitions()); j++ {
var trans = config.state.getTransitions()[j]
cfg := this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon)
if (cfg != nil) {
if cfg != nil {
currentAltReachedAcceptState = this.closure(input, cfg, configs,
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
}
}
return currentAltReachedAcceptState
@ -419,19 +419,19 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig,
// side-effect: can alter configs.hasSemanticContext
func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans ITransition,
configs *ATNConfigSet, speculative, treatEofAsEpsilon bool) *LexerATNConfig {
configs *ATNConfigSet, speculative, treatEofAsEpsilon bool) *LexerATNConfig {
var cfg *LexerATNConfig
if (trans.getSerializationType() == TransitionRULE) {
if trans.getSerializationType() == TransitionRULE {
rt := trans.(*RuleTransition)
var newContext = SingletonPredictionContextcreate(config.context, rt.followState.getStateNumber())
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext )
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
} else if (trans.getSerializationType() == TransitionPRECEDENCE) {
} else if trans.getSerializationType() == TransitionPRECEDENCE {
panic("Precedence predicates are not supported in lexers.")
} else if (trans.getSerializationType() == TransitionPREDICATE) {
} else if trans.getSerializationType() == TransitionPREDICATE {
// Track traversing semantic predicates. If we traverse,
// we cannot add a DFA state for this "reach" computation
// because the DFA would not test the predicate again in the
@ -452,15 +452,15 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
pt := trans.(*PredicateTransition)
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
configs.hasSemanticContext = true
if (this.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative)) {
if this.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if (trans.getSerializationType() == TransitionACTION) {
if (config.context == nil || config.context.hasEmptyPath()) {
} else if trans.getSerializationType() == TransitionACTION {
if config.context == nil || config.context.hasEmptyPath() {
// execute actions anywhere in the start rule for a token.
//
// TODO: if the entry rule is invoked recursively, some
@ -479,13 +479,13 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
// ignore actions in referenced rules
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if (trans.getSerializationType() == TransitionEPSILON) {
} else if trans.getSerializationType() == TransitionEPSILON {
cfg = NewLexerATNConfig4(config, trans.getTarget())
} else if (trans.getSerializationType() == TransitionATOM ||
trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET) {
if (treatEofAsEpsilon) {
if (trans.matches(TokenEOF, 0, 0xFFFF)) {
} else if trans.getSerializationType() == TransitionATOM ||
trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET {
if treatEofAsEpsilon {
if trans.matches(TokenEOF, 0, 0xFFFF) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
}
@ -513,12 +513,12 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
// @return {@code true} if the specified predicate evaluates to
// {@code true}.
// /
func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if (this.recog == nil) {
if this.recog == nil {
return true
}
if (!speculative) {
if !speculative {
return this.recog.sempred(nil, ruleIndex, predIndex)
}
var savedcolumn = this.column
@ -526,7 +526,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, p
var index = input.index()
var marker = input.mark()
defer func(){
defer func() {
this.column = savedcolumn
this.line = savedLine
input.seek(index)
@ -544,8 +544,8 @@ func (this *LexerATNSimulator) captureSimState(settings *SimState, input CharStr
settings.dfaState = dfaState
}
func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState , cfgs *ATNConfigSet) *DFAState {
if (to == nil && cfgs != nil) {
func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to this call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes this edge
// dependent on the specific input sequence, so the static edge in the
@ -562,23 +562,23 @@ func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState
to = this.addDFAState(cfgs)
if (suppressEdge) {
if suppressEdge {
return to
}
}
// add the edge
if (tk < LexerATNSimulatorMIN_DFA_EDGE || tk > LexerATNSimulatorMAX_DFA_EDGE) {
if tk < LexerATNSimulatorMIN_DFA_EDGE || tk > LexerATNSimulatorMAX_DFA_EDGE {
// Only track edges within the DFA bounds
return to
}
if (LexerATNSimulatordebug) {
if LexerATNSimulatordebug {
fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + strconv.Itoa(tk))
}
if (from_.edges == nil) {
if from_.edges == nil {
// make room for tokens 1..n and -1 masquerading as index 0
from_.edges = make([]*DFAState, LexerATNSimulatorMAX_DFA_EDGE-LexerATNSimulatorMIN_DFA_EDGE+1)
}
from_.edges[tk - LexerATNSimulatorMIN_DFA_EDGE] = to // connect
from_.edges[tk-LexerATNSimulatorMIN_DFA_EDGE] = to // connect
return to
}
@ -597,12 +597,12 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
_, ok := cfg.getState().(*RuleStopState)
if (ok) {
if ok {
firstConfigWithRuleStopState = cfg
break
}
}
if (firstConfigWithRuleStopState != nil) {
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.getState().getRuleIndex()]
@ -610,7 +610,7 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
var hash = proposed.hashString()
var dfa = this.decisionToDFA[this.mode]
var existing = dfa.getStates()[hash]
if (existing != nil) {
if existing != nil {
return existing
}
var newState = proposed
@ -628,12 +628,12 @@ func (this *LexerATNSimulator) getDFA(mode int) *DFA {
// Get the text matched so far for the current token.
func (this *LexerATNSimulator) getText(input CharStream) string {
// index is first lookahead char, don't include.
return input.getTextFromInterval(NewInterval(this.startIndex, input.index() - 1))
return input.getTextFromInterval(NewInterval(this.startIndex, input.index()-1))
}
func (this *LexerATNSimulator) consume(input CharStream) {
var curChar = input.LA(1)
if (curChar == int('\n')) {
if curChar == int('\n') {
this.line += 1
this.column = 0
} else {
@ -643,11 +643,9 @@ func (this *LexerATNSimulator) consume(input CharStream) {
}
func (this *LexerATNSimulator) getTokenName(tt int) string {
if (tt == -1) {
if tt == -1 {
return "EOF"
} else {
return "'" + string(tt) + "'"
}
}

View File

@ -1,59 +1,60 @@
package antlr4
import "strconv"
const (
LexerActionTypeCHANNEL = 0 //The type of a {@link LexerChannelAction} action.
LexerActionTypeCUSTOM = 1 //The type of a {@link LexerCustomAction} action.
LexerActionTypeMODE = 2 //The type of a {@link LexerModeAction} action.
LexerActionTypeMORE = 3 //The type of a {@link LexerMoreAction} action.
LexerActionTypePOP_MODE = 4 //The type of a {@link LexerPopModeAction} action.
LexerActionTypePUSH_MODE = 5 //The type of a {@link LexerPushModeAction} action.
LexerActionTypeSKIP = 6 //The type of a {@link LexerSkipAction} action.
LexerActionTypeTYPE = 7 //The type of a {@link LexerTypeAction} action.
LexerActionTypeCHANNEL = 0 //The type of a {@link LexerChannelAction} action.
LexerActionTypeCUSTOM = 1 //The type of a {@link LexerCustomAction} action.
LexerActionTypeMODE = 2 //The type of a {@link LexerModeAction} action.
LexerActionTypeMORE = 3 //The type of a {@link LexerMoreAction} action.
LexerActionTypePOP_MODE = 4 //The type of a {@link LexerPopModeAction} action.
LexerActionTypePUSH_MODE = 5 //The type of a {@link LexerPushModeAction} action.
LexerActionTypeSKIP = 6 //The type of a {@link LexerSkipAction} action.
LexerActionTypeTYPE = 7 //The type of a {@link LexerTypeAction} action.
)
type ILexerAction interface {
getActionType() int
getIsPositionDependent() bool
execute(lexer ILexer)
hashString() string
equals(other ILexerAction) bool
getActionType() int
getIsPositionDependent() bool
execute(lexer ILexer)
hashString() string
equals(other ILexerAction) bool
}
type LexerAction struct {
actionType int
isPositionDependent bool
type LexerAction struct {
actionType int
isPositionDependent bool
}
func NewLexerAction(action int) *LexerAction {
la := new(LexerAction)
la.InitLexerAction(action)
return la
la := new(LexerAction)
la.InitLexerAction(action)
return la
}
func (la *LexerAction) InitLexerAction(action int){
la.actionType = action
la.isPositionDependent = false
func (la *LexerAction) InitLexerAction(action int) {
la.actionType = action
la.isPositionDependent = false
}
func (this *LexerAction) execute(lexer ILexer) {
panic("Not implemented")
panic("Not implemented")
}
func (this *LexerAction) getActionType() int {
return this.actionType
return this.actionType
}
func (this *LexerAction) getIsPositionDependent() bool {
return this.isPositionDependent
return this.isPositionDependent
}
func (this *LexerAction) hashString() string {
return strconv.Itoa(this.actionType)
return strconv.Itoa(this.actionType)
}
func (this *LexerAction) equals(other ILexerAction) bool {
return this == other
return this == other
}
//
@ -62,12 +63,12 @@ func (this *LexerAction) equals(other ILexerAction) bool {
// <p>The {@code skip} command does not have any parameters, so this action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerSkipAction struct {
*LexerAction
*LexerAction
}
func NewLexerSkipAction() *LexerSkipAction {
la := new(LexerSkipAction)
la.InitLexerAction(LexerActionTypeSKIP)
la := new(LexerSkipAction)
la.InitLexerAction(LexerActionTypeSKIP)
return la
}
@ -75,7 +76,7 @@ func NewLexerSkipAction() *LexerSkipAction {
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (this *LexerSkipAction) execute(lexer ILexer) {
lexer.skip()
lexer.skip()
}
func (this *LexerSkipAction) toString() string {
@ -87,18 +88,18 @@ func (this *LexerSkipAction) toString() string {
type LexerTypeAction struct {
*LexerAction
_type int
_type int
}
func NewLexerTypeAction(_type int) *LexerTypeAction {
this := new(LexerTypeAction)
this.InitLexerAction( LexerActionTypeTYPE )
this.InitLexerAction(LexerActionTypeTYPE)
this._type = _type
return this
}
func (this *LexerTypeAction) execute(lexer ILexer) {
lexer.setType( this._type )
lexer.setType(this._type)
}
func (this *LexerTypeAction) hashString() string {
@ -106,17 +107,17 @@ func (this *LexerTypeAction) hashString() string {
}
func (this *LexerTypeAction) equals(other ILexerAction) bool {
if(this == other) {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
return false
} else {
return this._type == other.(*LexerTypeAction)._type
}
if this == other {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
return false
} else {
return this._type == other.(*LexerTypeAction)._type
}
}
func (this *LexerTypeAction) toString() string {
return "actionType(" + strconv.Itoa(this._type) + ")"
return "actionType(" + strconv.Itoa(this._type) + ")"
}
// Implements the {@code pushMode} lexer action by calling
@ -124,36 +125,36 @@ func (this *LexerTypeAction) toString() string {
type LexerPushModeAction struct {
*LexerAction
mode int
mode int
}
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
this := new(LexerPushModeAction)
this.InitLexerAction( LexerActionTypePUSH_MODE )
this := new(LexerPushModeAction)
this.InitLexerAction(LexerActionTypePUSH_MODE)
this.mode = mode
return this
this.mode = mode
return this
}
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
// value provided by {@link //getMode}.</p>
func (this *LexerPushModeAction) execute(lexer ILexer) {
lexer.pushMode(this.mode)
lexer.pushMode(this.mode)
}
func (this *LexerPushModeAction) hashString() string {
return strconv.Itoa(this.actionType) + strconv.Itoa(this.mode)
return strconv.Itoa(this.actionType) + strconv.Itoa(this.mode)
}
func (this *LexerPushModeAction) equals(other ILexerAction) bool {
if (this == other) {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
return false
} else {
return this.mode == other.(*LexerPushModeAction).mode
}
if this == other {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
return false
} else {
return this.mode == other.(*LexerPushModeAction).mode
}
}
func (this *LexerPushModeAction) toString() string {
@ -165,14 +166,14 @@ func (this *LexerPushModeAction) toString() string {
// <p>The {@code popMode} command does not have any parameters, so this action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerPopModeAction struct {
*LexerAction
*LexerAction
}
func NewLexerPopModeAction() *LexerPopModeAction {
this := new(LexerPopModeAction)
this := new(LexerPopModeAction)
this.InitLexerAction( LexerActionTypePOP_MODE )
this.InitLexerAction(LexerActionTypePOP_MODE)
return this
}
@ -181,7 +182,7 @@ var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (this *LexerPopModeAction) execute(lexer ILexer) {
lexer.popMode()
lexer.popMode()
}
func (this *LexerPopModeAction) toString() string {
@ -194,12 +195,12 @@ func (this *LexerPopModeAction) toString() string {
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerMoreAction struct {
*LexerAction
*LexerAction
}
func NewLexerMoreAction() *LexerModeAction {
this := new(LexerModeAction)
this.InitLexerAction( LexerActionTypeMORE )
this := new(LexerModeAction)
this.InitLexerAction(LexerActionTypeMORE)
return this
}
@ -208,33 +209,32 @@ var LexerMoreActionINSTANCE = NewLexerMoreAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (this *LexerMoreAction) execute(lexer ILexer) {
lexer.more()
lexer.more()
}
func (this *LexerMoreAction) toString() string {
return "more"
return "more"
}
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
// the assigned mode.
type LexerModeAction struct {
*LexerAction
mode int
mode int
}
func NewLexerModeAction(mode int) *LexerModeAction {
this := new(LexerModeAction)
this.InitLexerAction( LexerActionTypeMODE )
this.mode = mode
return this
this.InitLexerAction(LexerActionTypeMODE)
this.mode = mode
return this
}
// <p>This action is implemented by calling {@link Lexer//mode} with the
// value provided by {@link //getMode}.</p>
func (this *LexerModeAction) execute(lexer ILexer) {
lexer.mode(this.mode)
lexer.mode(this.mode)
}
func (this *LexerModeAction) hashString() string {
@ -242,17 +242,17 @@ func (this *LexerModeAction) hashString() string {
}
func (this *LexerModeAction) equals(other ILexerAction) bool {
if (this == other) {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
return false
} else {
return this.mode == other.(*LexerModeAction).mode
}
if this == other {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
return false
} else {
return this.mode == other.(*LexerModeAction).mode
}
}
func (this *LexerModeAction) toString() string {
return "mode(" + strconv.Itoa(this.mode) + ")"
return "mode(" + strconv.Itoa(this.mode) + ")"
}
// Executes a custom lexer action by calling {@link Recognizer//action} with the
@ -274,36 +274,36 @@ func (this *LexerModeAction) toString() string {
type LexerCustomAction struct {
*LexerAction
ruleIndex, actionIndex int
ruleIndex, actionIndex int
}
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
this := new(LexerCustomAction)
this.InitLexerAction( LexerActionTypeCUSTOM )
this.ruleIndex = ruleIndex
this.actionIndex = actionIndex
this.isPositionDependent = true
return this
this.InitLexerAction(LexerActionTypeCUSTOM)
this.ruleIndex = ruleIndex
this.actionIndex = actionIndex
this.isPositionDependent = true
return this
}
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
// appropriate rule and action indexes.</p>
func (this *LexerCustomAction) execute(lexer ILexer) {
lexer.action(nil, this.ruleIndex, this.actionIndex)
lexer.action(nil, this.ruleIndex, this.actionIndex)
}
func (this *LexerCustomAction) hashString() string {
return strconv.Itoa(this.actionType) + strconv.Itoa(this.ruleIndex) + strconv.Itoa(this.actionIndex)
return strconv.Itoa(this.actionType) + strconv.Itoa(this.ruleIndex) + strconv.Itoa(this.actionIndex)
}
func (this *LexerCustomAction) equals(other ILexerAction) bool {
if (this == other) {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
return this.ruleIndex == other.(*LexerCustomAction).ruleIndex && this.actionIndex == other.(*LexerCustomAction).actionIndex
}
if this == other {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
return this.ruleIndex == other.(*LexerCustomAction).ruleIndex && this.actionIndex == other.(*LexerCustomAction).actionIndex
}
}
// Implements the {@code channel} lexer action by calling
@ -313,38 +313,38 @@ func (this *LexerCustomAction) equals(other ILexerAction) bool {
type LexerChannelAction struct {
*LexerAction
channel int
channel int
}
func NewLexerChannelAction(channel int) *LexerChannelAction {
this := new(LexerChannelAction)
this.InitLexerAction( LexerActionTypeCHANNEL )
this.channel = channel
return this
this.InitLexerAction(LexerActionTypeCHANNEL)
this.channel = channel
return this
}
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
// value provided by {@link //getChannel}.</p>
func (this *LexerChannelAction) execute(lexer ILexer) {
lexer.setChannel(this.channel)
lexer.setChannel(this.channel)
}
func (this *LexerChannelAction) hashString() string {
return strconv.Itoa(this.actionType) + strconv.Itoa(this.channel)
return strconv.Itoa(this.actionType) + strconv.Itoa(this.channel)
}
func (this *LexerChannelAction) equals(other ILexerAction) bool {
if (this == other) {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
return false
} else {
return this.channel == other.(*LexerChannelAction).channel
}
if this == other {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
return false
} else {
return this.channel == other.(*LexerChannelAction).channel
}
}
func (this *LexerChannelAction) toString() string {
return "channel(" + strconv.Itoa(this.channel) + ")"
return "channel(" + strconv.Itoa(this.channel) + ")"
}
// This implementation of {@link LexerAction} is used for tracking input offsets
@ -370,51 +370,40 @@ func (this *LexerChannelAction) toString() string {
type LexerIndexedCustomAction struct {
*LexerAction
offset int
lexerAction ILexerAction
isPositionDependent bool
offset int
lexerAction ILexerAction
isPositionDependent bool
}
func NewLexerIndexedCustomAction(offset int, lexerAction ILexerAction) *LexerIndexedCustomAction {
this := new(LexerIndexedCustomAction)
this.InitLexerAction( lexerAction.getActionType() )
this := new(LexerIndexedCustomAction)
this.InitLexerAction(lexerAction.getActionType())
this.offset = offset
this.lexerAction = lexerAction
this.isPositionDependent = true
this.offset = offset
this.lexerAction = lexerAction
this.isPositionDependent = true
return this
return this
}
// <p>This method calls {@link //execute} on the result of {@link //getAction}
// using the provided {@code lexer}.</p>
func (this *LexerIndexedCustomAction) execute(lexer ILexer) {
// assume the input stream position was properly set by the calling code
this.lexerAction.execute(lexer)
// assume the input stream position was properly set by the calling code
this.lexerAction.execute(lexer)
}
func (this *LexerIndexedCustomAction) hashString() string {
return strconv.Itoa(this.actionType) + strconv.Itoa(this.offset) + this.lexerAction.hashString()
return strconv.Itoa(this.actionType) + strconv.Itoa(this.offset) + this.lexerAction.hashString()
}
func (this *LexerIndexedCustomAction) equals(other ILexerAction) bool {
if (this == other) {
return true
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
return this.offset == other.(*LexerIndexedCustomAction).offset && this.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
}
if this == other {
return true
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
return this.offset == other.(*LexerIndexedCustomAction).offset && this.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
}
}

View File

@ -8,13 +8,13 @@ package antlr4
// not cause bloating of the {@link DFA} created for the lexer.</p>
type LexerActionExecutor struct {
lexerActions []ILexerAction
lexerActions []ILexerAction
cachedHashString string
}
func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor {
if (lexerActions == nil){
if lexerActions == nil {
lexerActions = make([]ILexerAction, 0)
}
@ -49,13 +49,13 @@ func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor {
// @return A {@link LexerActionExecutor} for executing the combine actions
// of {@code lexerActionExecutor} and {@code lexerAction}.
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction ILexerAction) *LexerActionExecutor {
if (lexerActionExecutor == nil) {
if lexerActionExecutor == nil {
return NewLexerActionExecutor([]ILexerAction{lexerAction})
}
var lexerActions = append(lexerActionExecutor.lexerActions, lexerAction )
var lexerActions = append(lexerActionExecutor.lexerActions, lexerAction)
// var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ])
// var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ])
return NewLexerActionExecutor(lexerActions)
}
@ -91,11 +91,11 @@ func (this *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionEx
var updatedLexerActions []ILexerAction = nil
for i := 0; i < len(this.lexerActions); i++ {
_, ok := this.lexerActions[i].(*LexerIndexedCustomAction)
if (this.lexerActions[i].getIsPositionDependent() && !ok){
if (updatedLexerActions == nil) {
updatedLexerActions = make([]ILexerAction,0)
if this.lexerActions[i].getIsPositionDependent() && !ok {
if updatedLexerActions == nil {
updatedLexerActions = make([]ILexerAction, 0)
for _,a:= range this.lexerActions {
for _, a := range this.lexerActions {
updatedLexerActions = append(updatedLexerActions, a)
}
}
@ -103,7 +103,7 @@ func (this *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionEx
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, this.lexerActions[i])
}
}
if (updatedLexerActions == nil) {
if updatedLexerActions == nil {
return this
} else {
return NewLexerActionExecutor(updatedLexerActions)
@ -132,8 +132,8 @@ func (this *LexerActionExecutor) execute(lexer *Lexer, input CharStream, startIn
var requiresSeek = false
var stopIndex = input.index()
defer func(){
if (requiresSeek) {
defer func() {
if requiresSeek {
input.seek(stopIndex)
}
}()
@ -145,7 +145,7 @@ func (this *LexerActionExecutor) execute(lexer *Lexer, input CharStream, startIn
input.seek(startIndex + offset)
lexerAction = la.lexerAction
requiresSeek = (startIndex + offset) != stopIndex
} else if (lexerAction.getIsPositionDependent()) {
} else if lexerAction.getIsPositionDependent() {
input.seek(stopIndex)
requiresSeek = false
}
@ -158,14 +158,12 @@ func (this *LexerActionExecutor) hashString() string {
}
func (this *LexerActionExecutor) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*LexerActionExecutor); !ok {
return false
} else {
return this.cachedHashString == other.(*LexerActionExecutor).cachedHashString &&
&this.lexerActions == &other.(*LexerActionExecutor).lexerActions
&this.lexerActions == &other.(*LexerActionExecutor).lexerActions
}
}

View File

@ -2,7 +2,7 @@ package antlr4
import (
"fmt"
)
)
type TraceListener struct {
parser *Parser
@ -10,7 +10,7 @@ type TraceListener struct {
func NewTraceListener(parser *Parser) *TraceListener {
tl := new(TraceListener)
tl.parser = parser
tl.parser = parser
return tl
}
@ -21,7 +21,7 @@ func (this *TraceListener) enterEveryRule(ctx IParserRuleContext) {
fmt.Println("enter " + this.parser.getRuleNames()[ctx.getRuleIndex()] + ", LT(1)=" + this.parser._input.LT(1).text())
}
func (this *TraceListener) visitTerminal( node TerminalNode ) {
func (this *TraceListener) visitTerminal(node TerminalNode) {
fmt.Println("consume " + fmt.Sprint(node.getSymbol()) + " rule " + this.parser.getRuleNames()[this.parser._ctx.getRuleIndex()])
}
@ -46,23 +46,23 @@ type IParser interface {
isExpectedToken(symbol int) bool
getPrecedence() int
getRuleInvocationStack(IParserRuleContext) []string
}
type Parser struct {
*Recognizer
_input TokenStream
_errHandler IErrorStrategy
_precedenceStack IntStack
_ctx IParserRuleContext
buildParseTrees bool
_tracer *TraceListener
_parseListeners []ParseTreeListener
_syntaxErrors int
_interp *ParserATNSimulator
Interpreter *ParserATNSimulator
literalNames []string
_input TokenStream
_errHandler IErrorStrategy
_precedenceStack IntStack
_ctx IParserRuleContext
buildParseTrees bool
_tracer *TraceListener
_parseListeners []ParseTreeListener
_syntaxErrors int
literalNames []string
symbolicNames []string
}
@ -72,6 +72,13 @@ func NewParser(input TokenStream) *Parser {
p := new(Parser)
p.InitParser(input)
return p
}
func (p *Parser) InitParser(input TokenStream) {
p.InitRecognizer()
// The input stream.
@ -100,8 +107,6 @@ func NewParser(input TokenStream) *Parser {
// incremented each time {@link //notifyErrorListeners} is called.
p._syntaxErrors = 0
p.setInputStream(input)
return p
}
// p.field maps from the serialized ATN string to the deserialized {@link
@ -114,7 +119,7 @@ var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
func (p *Parser) reset() {
if (p._input != nil) {
if p._input != nil {
p._input.seek(0)
}
p._errHandler.reset(p)
@ -123,8 +128,8 @@ func (p *Parser) reset() {
p.setTrace(nil)
p._precedenceStack = make([]int, 0)
p._precedenceStack.Push(0)
if (p._interp != nil) {
p._interp.reset()
if p.Interpreter != nil {
p.Interpreter.reset()
}
}
@ -147,12 +152,12 @@ func (p *Parser) reset() {
func (p *Parser) match(ttype int) *Token {
var t = p.getCurrentToken()
if (t.tokenType == ttype) {
if t.tokenType == ttype {
p._errHandler.reportMatch(p)
p.consume()
} else {
t = p._errHandler.recoverInline(p)
if (p.buildParseTrees && t.tokenIndex == -1) {
if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
@ -161,6 +166,7 @@ func (p *Parser) match(ttype int) *Token {
}
return t
}
// Match current input symbol as a wildcard. If the symbol type matches
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch}
// and {@link //consume} are called to complete the match process.
@ -179,12 +185,12 @@ func (p *Parser) match(ttype int) *Token {
func (p *Parser) matchWildcard() *Token {
var t = p.getCurrentToken()
if (t.tokenType > 0) {
if t.tokenType > 0 {
p._errHandler.reportMatch(p)
p.consume()
} else {
t = p._errHandler.recoverInline(p)
if (p.buildParseTrees && t.tokenIndex == -1) {
if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
@ -199,8 +205,8 @@ func (p *Parser) getParserRuleContext() IParserRuleContext {
}
func (p *Parser) getParseListeners() []ParseTreeListener {
if (p._parseListeners == nil){
return make([]ParseTreeListener,0)
if p._parseListeners == nil {
return make([]ParseTreeListener, 0)
}
return p._parseListeners
}
@ -234,10 +240,10 @@ func (p *Parser) getParseListeners() []ParseTreeListener {
// @panics nilPointerException if {@code} listener is {@code nil}
//
func (p *Parser) addParseListener(listener ParseTreeListener) {
if (listener == nil) {
if listener == nil {
panic("listener")
}
if (p._parseListeners == nil) {
if p._parseListeners == nil {
p._parseListeners = make([]ParseTreeListener, 0)
}
p._parseListeners = append(p._parseListeners, listener)
@ -252,15 +258,15 @@ func (p *Parser) addParseListener(listener ParseTreeListener) {
//
func (p *Parser) removeParseListener(listener ParseTreeListener) {
panic("Not implemented!")
// if (p._parseListeners != nil) {
// var idx = p._parseListeners.indexOf(listener)
// if (idx >= 0) {
// p._parseListeners.splice(idx, 1)
// }
// if (len(p._parseListeners) == 0) {
// p._parseListeners = nil
// }
// }
// if (p._parseListeners != nil) {
// var idx = p._parseListeners.indexOf(listener)
// if (idx >= 0) {
// p._parseListeners.splice(idx, 1)
// }
// if (len(p._parseListeners) == 0) {
// p._parseListeners = nil
// }
// }
}
// Remove all parse listeners.
@ -270,9 +276,9 @@ func (p *Parser) removeParseListeners() {
// Notify any parse listeners of an enter rule event.
func (p *Parser) triggerEnterRuleEvent() {
if (p._parseListeners != nil) {
var ctx = p._ctx
for _,listener := range p._parseListeners {
if p._parseListeners != nil {
var ctx = p._ctx
for _, listener := range p._parseListeners {
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
}
@ -285,9 +291,9 @@ func (p *Parser) triggerEnterRuleEvent() {
// @see //addParseListener
//
func (p *Parser) triggerExitRuleEvent() {
if (p._parseListeners != nil) {
if p._parseListeners != nil {
// reverse order walk of listeners
ctx := p._ctx
ctx := p._ctx
l := len(p._parseListeners) - 1
for i := range p._parseListeners {
@ -307,11 +313,11 @@ func (this *Parser) getSymbolicNames() []string {
}
func (this *Parser) getInterpreter() *ParserATNSimulator {
return this._interp
return this.Interpreter
}
func (this *Parser) getATN() *ATN {
return this._interp.atn
return this.Interpreter.atn
}
func (p *Parser) getTokenFactory() TokenFactory {
@ -320,7 +326,7 @@ func (p *Parser) getTokenFactory() TokenFactory {
// Tell our token source and error strategy about a Newway to create tokens.//
func (p *Parser) setTokenFactory(factory TokenFactory) {
p._input.getTokenSource().setTokenFactory( factory )
p._input.getTokenSource().setTokenFactory(factory)
}
// The ATN with bypass alternatives is expensive to create so we create it
@ -334,18 +340,18 @@ func (p *Parser) getATNWithBypassAlts() {
// TODO
panic("Not implemented!")
// var serializedAtn = p.getSerializedATN()
// if (serializedAtn == nil) {
// panic("The current parser does not support an ATN with bypass alternatives.")
// }
// var result = p.bypassAltsAtnCache[serializedAtn]
// if (result == nil) {
// var deserializationOptions = NewATNDeserializationOptions(nil)
// deserializationOptions.generateRuleBypassTransitions = true
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
// p.bypassAltsAtnCache[serializedAtn] = result
// }
// return result
// var serializedAtn = p.getSerializedATN()
// if (serializedAtn == nil) {
// panic("The current parser does not support an ATN with bypass alternatives.")
// }
// var result = p.bypassAltsAtnCache[serializedAtn]
// if (result == nil) {
// var deserializationOptions = NewATNDeserializationOptions(nil)
// deserializationOptions.generateRuleBypassTransitions = true
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
// p.bypassAltsAtnCache[serializedAtn] = result
// }
// return result
}
// The preferred method of getting a tree pattern. For example, here's a
@ -362,21 +368,21 @@ func (p *Parser) getATNWithBypassAlts() {
func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer) {
panic("NewParseTreePatternMatcher not implemented!")
//
// if (lexer == nil) {
// if (p.getTokenStream() != nil) {
// var tokenSource = p.getTokenStream().getTokenSource()
// if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource
// }
// }
// }
// if (lexer == nil) {
// panic("Parser can't discover a lexer to use")
// }
//
// if (lexer == nil) {
// if (p.getTokenStream() != nil) {
// var tokenSource = p.getTokenStream().getTokenSource()
// if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource
// }
// }
// }
// if (lexer == nil) {
// panic("Parser can't discover a lexer to use")
// }
// var m = NewParseTreePatternMatcher(lexer, p)
// return m.compile(pattern, patternRuleIndex)
// var m = NewParseTreePatternMatcher(lexer, p)
// return m.compile(pattern, patternRuleIndex)
}
func (p *Parser) getInputStream() CharStream {
@ -406,7 +412,7 @@ func (p *Parser) getCurrentToken() *Token {
}
func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRecognitionException) {
if (offendingToken == nil) {
if offendingToken == nil {
offendingToken = p.getCurrentToken()
}
p._syntaxErrors += 1
@ -418,28 +424,28 @@ func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRe
func (p *Parser) consume() *Token {
var o = p.getCurrentToken()
if (o.tokenType != TokenEOF) {
if o.tokenType != TokenEOF {
p.getInputStream().consume()
}
var hasListener = p._parseListeners != nil && len(p._parseListeners) > 0
if (p.buildParseTrees || hasListener) {
if (p._errHandler.inErrorRecoveryMode(p)) {
if p.buildParseTrees || hasListener {
if p._errHandler.inErrorRecoveryMode(p) {
var node = p._ctx.addErrorNode(o)
if (p._parseListeners != nil) {
if p._parseListeners != nil {
for _, l := range p._parseListeners {
l.visitErrorNode(node);
l.visitErrorNode(node)
}
}
} else {
node := p._ctx.addTokenNode(o);
if (p._parseListeners != nil) {
node := p._ctx.addTokenNode(o)
if p._parseListeners != nil {
for _, l := range p._parseListeners {
l.visitTerminal(node)
}
}
}
// node.invokingState = p.state
// node.invokingState = p.state
}
return o
@ -447,27 +453,27 @@ func (p *Parser) consume() *Token {
func (p *Parser) addContextToParseTree() {
// add current context to parent if we have a parent
if (p._ctx.getParent() != nil) {
p._ctx.getParent().setChildren( append(p._ctx.getParent().getChildren(), p._ctx) )
if p._ctx.getParent() != nil {
p._ctx.getParent().setChildren(append(p._ctx.getParent().getChildren(), p._ctx))
}
}
func (p *Parser) enterRule(localctx IParserRuleContext, state, ruleIndex int) {
p.state = state
p._ctx = localctx
p._ctx.setStart( p._input.LT(1) )
if (p.buildParseTrees) {
p._ctx.setStart(p._input.LT(1))
if p.buildParseTrees {
p.addContextToParseTree()
}
if (p._parseListeners != nil) {
if p._parseListeners != nil {
p.triggerEnterRuleEvent()
}
}
func (p *Parser) exitRule() {
p._ctx.setStop( p._input.LT(-1) )
p._ctx.setStop(p._input.LT(-1))
// trigger event on _ctx, before it reverts to parent
if (p._parseListeners != nil) {
if p._parseListeners != nil {
p.triggerExitRuleEvent()
}
p.state = p._ctx.getInvokingState()
@ -477,8 +483,8 @@ func (p *Parser) exitRule() {
func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) {
// if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if (p.buildParseTrees && p._ctx != localctx) {
if (p._ctx.getParent() != nil) {
if p.buildParseTrees && p._ctx != localctx {
if p._ctx.getParent() != nil {
p._ctx.getParent().(IParserRuleContext).removeLastChild()
p._ctx.getParent().(IParserRuleContext).addChild(localctx)
}
@ -492,10 +498,10 @@ func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) {
// the parser context is not nested within a precedence rule.
func (p *Parser) getPrecedence() int {
if ( len(p._precedenceStack) == 0) {
if len(p._precedenceStack) == 0 {
return -1
} else {
return p._precedenceStack[ len(p._precedenceStack) -1]
return p._precedenceStack[len(p._precedenceStack)-1]
}
}
@ -503,10 +509,10 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde
p.state = state
p._precedenceStack.Push(precedence)
p._ctx = localctx
p._ctx.setStart( p._input.LT(1) )
if (p._parseListeners != nil) {
p._ctx.setStart(p._input.LT(1))
if p._parseListeners != nil {
p.triggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
// left-recursive rules
}
}
@ -515,28 +521,28 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde
func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, ruleIndex int) {
var previous = p._ctx
previous.setParent( localctx )
previous.setInvokingState( state )
previous.setStart( p._input.LT(-1) )
previous.setParent(localctx)
previous.setInvokingState(state)
previous.setStart(p._input.LT(-1))
p._ctx = localctx
p._ctx.setStart( previous.getStart() )
if (p.buildParseTrees) {
p._ctx.setStart(previous.getStart())
if p.buildParseTrees {
p._ctx.addChild(previous)
}
if (p._parseListeners != nil) {
if p._parseListeners != nil {
p.triggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
// left-recursive rules
}
}
func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
p._precedenceStack.Pop()
p._ctx.setStop( p._input.LT(-1) )
p._ctx.setStop(p._input.LT(-1))
var retCtx = p._ctx // save current ctx (return value)
// unroll so _ctx is as it was before call to recursive method
if (p._parseListeners != nil) {
for (p._ctx != parentCtx) {
if p._parseListeners != nil {
for p._ctx != parentCtx {
p.triggerExitRuleEvent()
p._ctx = p._ctx.getParent().(IParserRuleContext)
}
@ -544,8 +550,8 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
p._ctx = parentCtx
}
// hook into tree
retCtx.setParent( parentCtx )
if (p.buildParseTrees && parentCtx != nil) {
retCtx.setParent(parentCtx)
if p.buildParseTrees && parentCtx != nil {
// add return ctx into invoking rule's tree
parentCtx.addChild(retCtx)
}
@ -553,8 +559,8 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext {
var ctx = p._ctx
for (ctx != nil) {
if (ctx.getRuleIndex() == ruleIndex) {
for ctx != nil {
if ctx.getRuleIndex() == ruleIndex {
return ctx
}
ctx = ctx.getParent().(IParserRuleContext)
@ -563,7 +569,7 @@ func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext {
}
func (p *Parser) precpred(localctx IRuleContext, precedence int) bool {
return precedence >= p._precedenceStack[ len(p._precedenceStack) -1]
return precedence >= p._precedenceStack[len(p._precedenceStack)-1]
}
func (p *Parser) inContext(context IParserRuleContext) bool {
@ -586,26 +592,26 @@ func (p *Parser) inContext(context IParserRuleContext) bool {
// the ATN, otherwise {@code false}.
func (p *Parser) isExpectedToken(symbol int) bool {
var atn *ATN = p._interp.atn
var atn *ATN = p.Interpreter.atn
var ctx = p._ctx
var s = atn.states[p.state]
var following = atn.nextTokens(s,nil)
if (following.contains(symbol)) {
var following = atn.nextTokens(s, nil)
if following.contains(symbol) {
return true
}
if (!following.contains(TokenEpsilon)) {
if !following.contains(TokenEpsilon) {
return false
}
for (ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon)) {
for ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon) {
var invokingState = atn.states[ctx.getInvokingState()]
var rt = invokingState.getTransitions()[0]
following = atn.nextTokens(rt.(*RuleTransition).followState,nil)
if (following.contains(symbol)) {
following = atn.nextTokens(rt.(*RuleTransition).followState, nil)
if following.contains(symbol) {
return true
}
ctx = ctx.getParent().(IParserRuleContext)
}
if (following.contains(TokenEpsilon) && symbol == TokenEOF) {
if following.contains(TokenEpsilon) && symbol == TokenEOF {
return true
} else {
return false
@ -619,19 +625,19 @@ func (p *Parser) isExpectedToken(symbol int) bool {
// @see ATN//getExpectedTokens(int, RuleContext)
//
func (p *Parser) getExpectedTokens() *IntervalSet {
return p._interp.atn.getExpectedTokens(p.state, p._ctx)
return p.Interpreter.atn.getExpectedTokens(p.state, p._ctx)
}
func (p *Parser) getExpectedTokensWithinCurrentRule() *IntervalSet {
var atn = p._interp.atn
var atn = p.Interpreter.atn
var s = atn.states[p.state]
return atn.nextTokens(s,nil)
return atn.nextTokens(s, nil)
}
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *Parser) getRuleIndex(ruleName string) int {
var ruleIndex, ok = p.getRuleIndexMap()[ruleName]
if (ok) {
if ok {
return ruleIndex
} else {
return -1
@ -646,45 +652,45 @@ func (p *Parser) getRuleIndex(ruleName string) int {
// this very useful for error messages.
func (this *Parser) getRuleInvocationStack(p IParserRuleContext) []string {
if (p == nil) {
p = this._ctx;
if p == nil {
p = this._ctx
}
var stack = make([]string,0)
for (p != nil) {
var stack = make([]string, 0)
for p != nil {
// compute what follows who invoked us
var ruleIndex = p.getRuleIndex();
if (ruleIndex < 0) {
var ruleIndex = p.getRuleIndex()
if ruleIndex < 0 {
stack = append(stack, "n/a")
} else {
stack = append(stack, this.getRuleNames()[ruleIndex]);
stack = append(stack, this.getRuleNames()[ruleIndex])
}
p = p.getParent().(IParserRuleContext);
p = p.getParent().(IParserRuleContext)
}
return stack;
};
return stack
}
// For debugging and other purposes.//
func (p *Parser) getDFAStrings() {
panic("dumpDFA Not implemented!")
// return p._interp.decisionToDFA.toString()
// return p._interp.decisionToDFA.toString()
}
// For debugging and other purposes.//
func (p *Parser) dumpDFA() {
panic("dumpDFA Not implemented!")
// var seenOne = false
// for i := 0; i < p._interp.decisionToDFA.length; i++ {
// var dfa = p._interp.decisionToDFA[i]
// if ( len(dfa.states) > 0) {
// if (seenOne) {
// fmt.Println()
// }
// p.printer.println("Decision " + dfa.decision + ":")
// p.printer.print(dfa.toString(p.literalNames, p.symbolicNames))
// seenOne = true
// }
// }
// var seenOne = false
// for i := 0; i < p._interp.decisionToDFA.length; i++ {
// var dfa = p._interp.decisionToDFA[i]
// if ( len(dfa.states) > 0) {
// if (seenOne) {
// fmt.Println()
// }
// p.printer.println("Decision " + dfa.decision + ":")
// p.printer.print(dfa.toString(p.literalNames, p.symbolicNames))
// seenOne = true
// }
// }
}
/*
@ -702,15 +708,14 @@ func (p *Parser) getSourceName() string {
// events as well as token matches. p.is for quick and dirty debugging.
//
func (p *Parser) setTrace(trace *TraceListener) {
if (trace == nil) {
if trace == nil {
p.removeParseListener(p._tracer)
p._tracer = nil
} else {
if (p._tracer != nil) {
if p._tracer != nil {
p.removeParseListener(p._tracer)
}
p._tracer = NewTraceListener(p)
p.addParseListener(p._tracer)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -21,34 +21,31 @@ type IParserRuleContext interface {
addChild(child IRuleContext) IRuleContext
removeLastChild()
}
type ParserRuleContext struct {
*RuleContext
ruleIndex int
children []ParseTree
children []ParseTree
start, stop *Token
exception IRecognitionException
exception IRecognitionException
}
func NewParserRuleContext(parent IParserRuleContext, invokingStateNumber int) *ParserRuleContext {
prc := new(ParserRuleContext)
prc.InitRuleContext(parent, invokingStateNumber)
prc.InitParserRuleContext(parent, invokingStateNumber)
return prc
}
func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, invokingStateNumber int){
func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, invokingStateNumber int) {
prc.InitRuleContext(parent, invokingStateNumber)
prc.ruleIndex = -1
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a visitor,
// we need to track all of the tokens and rule invocations associated
// with prc rule's context. This is empty for parsing w/o tree constr.
@ -101,18 +98,18 @@ func (prc *ParserRuleContext) exitRule(listener ParseTreeListener) {
// * Does not set parent link other add methods do that///
func (prc *ParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if (prc.children == nil) {
if prc.children == nil {
prc.children = make([]ParseTree, 0)
}
prc.children = append( prc.children, child )
prc.children = append(prc.children, child)
return child
}
func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext {
if (prc.children == nil) {
if prc.children == nil {
prc.children = make([]ParseTree, 0)
}
prc.children = append( prc.children, child )
prc.children = append(prc.children, child)
return child
}
@ -121,8 +118,8 @@ func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext {
// generic ruleContext object.
// /
func (prc *ParserRuleContext) removeLastChild() {
if (prc.children != nil && len(prc.children) > 0) {
prc.children = prc.children[0:len(prc.children)-1]
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
}
}
@ -143,7 +140,7 @@ func (prc *ParserRuleContext) addErrorNode(badToken *Token) *ErrorNodeImpl {
}
func (prc *ParserRuleContext) getChild(i int) Tree {
if (prc.children != nil && len(prc.children) >= i){
if prc.children != nil && len(prc.children) >= i {
return prc.children[i]
} else {
return nil
@ -151,13 +148,13 @@ func (prc *ParserRuleContext) getChild(i int) Tree {
}
func (prc *ParserRuleContext) getChildOfType(i int, childType reflect.Type) IRuleContext {
if (childType == nil) {
if childType == nil {
return prc.getChild(i).(IRuleContext)
} else {
for j :=0; j<len(prc.children); j++ {
for j := 0; j < len(prc.children); j++ {
var child = prc.children[j]
if reflect.TypeOf(child) == childType {
if(i==0) {
if i == 0 {
return child.(IRuleContext)
} else {
i -= 1
@ -176,7 +173,7 @@ func (prc *ParserRuleContext) getStart() *Token {
return prc.start
}
func (prc *ParserRuleContext) setStop(t *Token){
func (prc *ParserRuleContext) setStop(t *Token) {
prc.stop = t
}
@ -186,11 +183,11 @@ func (prc *ParserRuleContext) getStop() *Token {
func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode {
for j :=0; j<len(prc.children); j++ {
for j := 0; j < len(prc.children); j++ {
var child = prc.children[j]
if c2, ok := child.(TerminalNode); ok {
if (c2.getSymbol().tokenType == ttype) {
if(i==0) {
if c2.getSymbol().tokenType == ttype {
if i == 0 {
return c2
} else {
i -= 1
@ -202,14 +199,14 @@ func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode {
}
func (prc *ParserRuleContext) getTokens(ttype int) []TerminalNode {
if (prc.children== nil) {
if prc.children == nil {
return make([]TerminalNode, 0)
} else {
var tokens = make([]TerminalNode, 0)
for j:=0; j<len(prc.children); j++ {
for j := 0; j < len(prc.children); j++ {
var child = prc.children[j]
if tchild, ok := child.(TerminalNode); ok {
if (tchild.getSymbol().tokenType == ttype) {
if tchild.getSymbol().tokenType == ttype {
tokens = append(tokens, tchild)
}
}
@ -240,7 +237,7 @@ func (prc *ParserRuleContext) getTypedRuleContexts(ctxType reflect.Type) []*inte
}
func (prc *ParserRuleContext) getChildCount() int {
if (prc.children== nil) {
if prc.children == nil {
return 0
} else {
return len(prc.children)
@ -248,7 +245,7 @@ func (prc *ParserRuleContext) getChildCount() int {
}
func (prc *ParserRuleContext) getSourceInterval() *Interval {
if( prc.start == nil || prc.stop == nil) {
if prc.start == nil || prc.stop == nil {
return TreeINVALID_INTERVAL
} else {
return NewInterval(prc.start.tokenIndex, prc.stop.tokenIndex)
@ -262,16 +259,16 @@ type IInterpreterRuleContext interface {
}
type InterpreterRuleContext struct {
ParserRuleContext
ruleIndex int
*ParserRuleContext
}
func NewInterpreterRuleContext(parent IInterpreterRuleContext, invokingStateNumber, ruleIndex int) *InterpreterRuleContext {
func NewInterpreterRuleContext(parent InterpreterRuleContext, invokingStateNumber, ruleIndex int) *InterpreterRuleContext {
prc := new(InterpreterRuleContext)
prc.InitParserRuleContext( parent, invokingStateNumber )
prc.ruleIndex = ruleIndex
prc.InitParserRuleContext(parent, invokingStateNumber)
prc.RuleIndex = ruleIndex
return prc
}

View File

@ -73,7 +73,7 @@ func (this *PredictionContext) isEmpty() bool {
}
func (this *PredictionContext) hasEmptyPath() bool {
return this.getReturnState(this.length() - 1) == PredictionContextEMPTY_RETURN_STATE
return this.getReturnState(this.length()-1) == PredictionContextEMPTY_RETURN_STATE
}
func (this *PredictionContext) hashString() string {
@ -104,7 +104,6 @@ func (this *PredictionContext) getReturnState(index int) int {
panic("Not implemented")
}
// Used to cache {@link PredictionContext} objects. Its used for the shared
// context cash associated with contexts in DFA states. This cache
// can be used for both lexers and parsers.
@ -124,11 +123,11 @@ func NewPredictionContextCache() *PredictionContextCache {
// Protect shared cache from unsafe thread access.
//
func (this *PredictionContextCache) add(ctx IPredictionContext) IPredictionContext {
if (ctx == PredictionContextEMPTY) {
if ctx == PredictionContextEMPTY {
return PredictionContextEMPTY
}
var existing = this.cache[ctx]
if (existing != nil) {
if existing != nil {
return existing
}
this.cache[ctx] = ctx
@ -150,7 +149,7 @@ type ISingletonPredictionContext interface {
type SingletonPredictionContext struct {
*PredictionContext
parentCtx IPredictionContext
parentCtx IPredictionContext
returnState int
}
@ -162,7 +161,7 @@ func NewSingletonPredictionContext(parent IPredictionContext, returnState int) *
func (s *SingletonPredictionContext) InitSingletonPredictionContext(parent IPredictionContext, returnState int) {
if (parent != nil){
if parent != nil {
s.cachedHashString = calculateHashString(parent, returnState)
} else {
s.cachedHashString = calculateEmptyHashString()
@ -174,7 +173,7 @@ func (s *SingletonPredictionContext) InitSingletonPredictionContext(parent IPred
}
func SingletonPredictionContextcreate(parent IPredictionContext, returnState int) IPredictionContext {
if (returnState == PredictionContextEMPTY_RETURN_STATE && parent == nil) {
if returnState == PredictionContextEMPTY_RETURN_STATE && parent == nil {
// someone can pass in the bits of an array ctx that mean $
return PredictionContextEMPTY
} else {
@ -195,11 +194,11 @@ func (this *SingletonPredictionContext) getReturnState(index int) int {
}
func (this *SingletonPredictionContext) equals(other IPredictionContext) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*SingletonPredictionContext); !ok {
return false
} else if (this.hashString() != other.hashString()) {
} else if this.hashString() != other.hashString() {
return false // can't be same if hash is different
} else {
@ -207,7 +206,7 @@ func (this *SingletonPredictionContext) equals(other IPredictionContext) bool {
if this.returnState != other.getReturnState(0) {
return false
} else if(this.parentCtx==nil) {
} else if this.parentCtx == nil {
return otherP.parentCtx == nil
} else {
return this.parentCtx.equals(otherP.parentCtx)
@ -222,20 +221,20 @@ func (this *SingletonPredictionContext) hashString() string {
func (this *SingletonPredictionContext) toString() string {
var up string
if (this.parentCtx == nil){
if this.parentCtx == nil {
up = ""
} else {
up = this.parentCtx.toString()
}
if (len(up) == 0) {
if (this.returnState == PredictionContextEMPTY_RETURN_STATE) {
if len(up) == 0 {
if this.returnState == PredictionContextEMPTY_RETURN_STATE {
return "$"
} else {
return strconv.Itoa( this.returnState )
return strconv.Itoa(this.returnState)
}
} else {
return strconv.Itoa( this.returnState )+ " " + up
return strconv.Itoa(this.returnState) + " " + up
}
}
@ -277,7 +276,7 @@ func (this *EmptyPredictionContext) toString() string {
type ArrayPredictionContext struct {
*PredictionContext
parents []IPredictionContext
parents []IPredictionContext
returnStates []int
}
@ -320,11 +319,11 @@ func (this *ArrayPredictionContext) getReturnState(index int) int {
}
func (this *ArrayPredictionContext) equals(other IPredictionContext) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*ArrayPredictionContext); !ok {
return false
} else if (this.cachedHashString != other.hashString()) {
} else if this.cachedHashString != other.hashString() {
return false // can't be same if hash is different
} else {
otherP := other.(*ArrayPredictionContext)
@ -333,20 +332,20 @@ func (this *ArrayPredictionContext) equals(other IPredictionContext) bool {
}
func (this *ArrayPredictionContext) toString() string {
if (this.isEmpty()) {
if this.isEmpty() {
return "[]"
} else {
var s = "["
for i := 0; i < len(this.returnStates); i++ {
if (i > 0) {
if i > 0 {
s = s + ", "
}
if (this.returnStates[i] == PredictionContextEMPTY_RETURN_STATE) {
if this.returnStates[i] == PredictionContextEMPTY_RETURN_STATE {
s = s + "$"
continue
}
s = s + strconv.Itoa(this.returnStates[i])
if (this.parents[i] != nil) {
if this.parents[i] != nil {
s = s + " " + this.parents[i].toString()
} else {
s = s + "nil"
@ -360,12 +359,12 @@ func (this *ArrayPredictionContext) toString() string {
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
// /
func predictionContextFromRuleContext(a *ATN, outerContext IRuleContext) IPredictionContext {
if (outerContext == nil) {
if outerContext == nil {
outerContext = RuleContextEMPTY
}
// if we are in RuleContext of start rule, s, then PredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
if (outerContext.getParent() == nil || outerContext == RuleContextEMPTY) {
if outerContext.getParent() == nil || outerContext == RuleContextEMPTY {
return PredictionContextEMPTY
}
// If we have a parent, convert it to a PredictionContext graph
@ -392,19 +391,19 @@ func calculateListsHashString(parents []PredictionContext, returnStates []int) s
func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext {
// share same graph if both same
if (a == b) {
if a == b {
return a
}
ac, ok1 := a.(*SingletonPredictionContext)
bc, ok2 := a.(*SingletonPredictionContext)
if (ok1 && ok2) {
if ok1 && ok2 {
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
}
// At least one of a or b is array
// If one is $ and rootIsWildcard, return $ as// wildcard
if (rootIsWildcard) {
if rootIsWildcard {
if _, ok := a.(*EmptyPredictionContext); ok {
return a
}
@ -414,10 +413,10 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
}
// convert singleton so both are arrays to normalize
if _, ok := a.(*SingletonPredictionContext); ok {
a = NewArrayPredictionContext([]IPredictionContext{ a.getParent(0) }, []int{ a.getReturnState(0) })
a = NewArrayPredictionContext([]IPredictionContext{a.getParent(0)}, []int{a.getReturnState(0)})
}
if _, ok := b.(*SingletonPredictionContext); ok {
b = NewArrayPredictionContext( []IPredictionContext{ b.getParent(0) }, []int{ b.getReturnState(0) })
b = NewArrayPredictionContext([]IPredictionContext{b.getParent(0)}, []int{b.getReturnState(0)})
}
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
}
@ -454,32 +453,32 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
// @param mergeCache
// /
func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext {
if (mergeCache != nil) {
if mergeCache != nil {
var previous = mergeCache.get(a.hashString(), b.hashString())
if (previous != nil) {
if previous != nil {
return previous.(IPredictionContext)
}
previous = mergeCache.get(b.hashString(), a.hashString())
if (previous != nil) {
if previous != nil {
return previous.(IPredictionContext)
}
}
var rootMerge = mergeRoot(a, b, rootIsWildcard)
if (rootMerge != nil) {
if (mergeCache != nil) {
if rootMerge != nil {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), rootMerge)
}
return rootMerge
}
if (a.returnState == b.returnState) {
if a.returnState == b.returnState {
var parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
// if parent is same as existing a or b parent or reduced to a parent,
// return it
if (parent == a.parentCtx) {
if parent == a.parentCtx {
return a // ax + bx = ax, if a=b
}
if (parent == b.parentCtx) {
if parent == b.parentCtx {
return b // ax + bx = bx, if a=b
}
// else: ax + ay = a'[x,y]
@ -487,28 +486,28 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg
// of those graphs. dup a, a' points at merged array
// Newjoined parent so create Newsingleton pointing to it, a'
var spc = SingletonPredictionContextcreate(parent, a.returnState)
if (mergeCache != nil) {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), spc)
}
return spc
} else { // a != b payloads differ
// see if we can collapse parents due to $+x parents if local ctx
var singleParent IPredictionContext = nil
if (a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx)) { // ax +
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
// bx =
// [a,b]x
singleParent = a.parentCtx
}
if (singleParent != nil) { // parents are same
if singleParent != nil { // parents are same
// sort payloads and use same parent
var payloads = []int{ a.returnState, b.returnState }
if (a.returnState > b.returnState) {
var payloads = []int{a.returnState, b.returnState}
if a.returnState > b.returnState {
payloads[0] = b.returnState
payloads[1] = a.returnState
}
var parents = []IPredictionContext{ singleParent, singleParent }
var parents = []IPredictionContext{singleParent, singleParent}
var apc = NewArrayPredictionContext(parents, payloads)
if (mergeCache != nil) {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), apc)
}
return apc
@ -516,15 +515,15 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg
// parents differ and can't merge them. Just pack together
// into array can't merge.
// ax + by = [ax,by]
var payloads = []int{ a.returnState, b.returnState }
var parents = []IPredictionContext{ a.parentCtx, b.parentCtx }
if (a.returnState > b.returnState) { // sort by payload
var payloads = []int{a.returnState, b.returnState}
var parents = []IPredictionContext{a.parentCtx, b.parentCtx}
if a.returnState > b.returnState { // sort by payload
payloads[0] = b.returnState
payloads[1] = a.returnState
parents = []IPredictionContext{ b.parentCtx, a.parentCtx }
parents = []IPredictionContext{b.parentCtx, a.parentCtx}
}
var a_ = NewArrayPredictionContext(parents, payloads)
if (mergeCache != nil) {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), a_)
}
return a_
@ -570,23 +569,23 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg
// otherwise false to indicate a full-context merge
// /
func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictionContext {
if (rootIsWildcard) {
if (a == PredictionContextEMPTY) {
if rootIsWildcard {
if a == PredictionContextEMPTY {
return PredictionContextEMPTY // // + b =//
}
if (b == PredictionContextEMPTY) {
if b == PredictionContextEMPTY {
return PredictionContextEMPTY // a +// =//
}
} else {
if (a == PredictionContextEMPTY && b == PredictionContextEMPTY) {
if a == PredictionContextEMPTY && b == PredictionContextEMPTY {
return PredictionContextEMPTY // $ + $ = $
} else if (a == PredictionContextEMPTY) { // $ + x = [$,x]
var payloads = []int{ b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE }
var parents = []IPredictionContext{ b.getParent(-1), nil }
} else if a == PredictionContextEMPTY { // $ + x = [$,x]
var payloads = []int{b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{b.getParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
} else if (b == PredictionContextEMPTY) { // x + $ = [$,x] ($ is always first if present)
var payloads = []int{ a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE }
var parents = []IPredictionContext{ a.getParent(-1), nil }
} else if b == PredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
var payloads = []int{a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{a.getParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
}
}
@ -614,13 +613,13 @@ func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictio
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext {
if (mergeCache != nil) {
if mergeCache != nil {
var previous = mergeCache.get(a.hashString(), b.hashString())
if (previous != nil) {
if previous != nil {
return previous.(IPredictionContext)
}
previous = mergeCache.get(b.hashString(), a.hashString())
if (previous != nil) {
if previous != nil {
return previous.(IPredictionContext)
}
}
@ -629,13 +628,13 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
var j = 0 // walks b
var k = 0 // walks target M array
var mergedReturnStates = make([]int,0)
var mergedParents = make([]IPredictionContext,0)
var mergedReturnStates = make([]int, 0)
var mergedParents = make([]IPredictionContext, 0)
// walk and merge to yield mergedParents, mergedReturnStates
for i < len(a.returnStates) && j < len(b.returnStates) {
var a_parent = a.parents[i]
var b_parent = b.parents[j]
if (a.returnStates[i] == b.returnStates[j]) {
if a.returnStates[i] == b.returnStates[j] {
// same payload (stack tops are equal), must yield merged singleton
var payload = a.returnStates[i]
// $+$ = $
@ -643,7 +642,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
var ax_ax = (a_parent != nil && b_parent != nil && a_parent == b_parent) // ax+ax
// ->
// ax
if (bothDollars || ax_ax) {
if bothDollars || ax_ax {
mergedParents[k] = a_parent // choose left
mergedReturnStates[k] = payload
} else { // ax+ay -> a'[x,y]
@ -653,7 +652,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
}
i += 1 // hop over left one as usual
j += 1 // but also skip one in right side since we merge
} else if (a.returnStates[i] < b.returnStates[j]) { // copy a[i] to M
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
mergedParents[k] = a_parent
mergedReturnStates[k] = a.returnStates[i]
i += 1
@ -665,7 +664,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
k += 1
}
// copy over any payloads remaining in either array
if (i < len(a.returnStates)) {
if i < len(a.returnStates) {
for p := i; p < len(a.returnStates); p++ {
mergedParents[k] = a.parents[p]
mergedReturnStates[k] = a.returnStates[p]
@ -679,10 +678,10 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
}
}
// trim merged if we combined a few that had same stack tops
if (k < len(mergedParents)) { // write index < last position trim
if (k == 1) { // for just one merged element, return singleton top
if k < len(mergedParents) { // write index < last position trim
if k == 1 { // for just one merged element, return singleton top
var a_ = SingletonPredictionContextcreate(mergedParents[0], mergedReturnStates[0])
if (mergeCache != nil) {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), a_)
}
return a_
@ -695,21 +694,21 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
if (M == a) {
if (mergeCache != nil) {
if M == a {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), a)
}
return a
}
if (M == b) {
if (mergeCache != nil) {
if M == b {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), b)
}
return b
}
combineCommonParents(mergedParents)
if (mergeCache != nil) {
if mergeCache != nil {
mergeCache.set(a.hashString(), b.hashString(), M)
}
return M
@ -805,10 +804,3 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic
// return nodes
// }
//}

View File

@ -1,7 +1,8 @@
package antlr4
import (
"strings"
"strconv"
"strconv"
"strings"
)
//
@ -69,7 +70,6 @@ const (
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLL_EXACT_AMBIG_DETECTION = 2
)
//
@ -164,37 +164,37 @@ const (
// the configurations to strip out all of the predicates so that a standard
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
//
func PredictionModehasSLLConflictTerminatingPrediction( mode int, configs *ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able
// to match additional input so we terminate prediction.
//
if (PredictionModeallConfigsInRuleStopStates(configs)) {
return true
}
// pure SLL mode parsing
if (mode == PredictionModeSLL) {
// Don't bother with combining configs from different semantic
// contexts if we can fail over to full LL costs more time
// since we'll often fail over anyway.
if (configs.hasSemanticContext) {
// dup configs, tossing out semantic predicates
var dup = NewATNConfigSet(false)
for i:= 0; i< len(configs.configs); i++ {
var c = configs.configs[i]
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able
// to match additional input so we terminate prediction.
//
if PredictionModeallConfigsInRuleStopStates(configs) {
return true
}
// pure SLL mode parsing
if mode == PredictionModeSLL {
// Don't bother with combining configs from different semantic
// contexts if we can fail over to full LL costs more time
// since we'll often fail over anyway.
if configs.hasSemanticContext {
// dup configs, tossing out semantic predicates
var dup = NewATNConfigSet(false)
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
// NewATNConfig({semanticContext:}, c)
c = NewATNConfig2(c, SemanticContextNONE)
dup.add(c, nil)
}
configs = dup
}
// now we have combined contexts for configs with dissimilar preds
}
// pure SLL or combined SLL+LL mode parsing
var altsets = PredictionModegetConflictingAltSubsets(configs)
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
c = NewATNConfig2(c, SemanticContextNONE)
dup.add(c, nil)
}
configs = dup
}
// now we have combined contexts for configs with dissimilar preds
}
// pure SLL or combined SLL+LL mode parsing
var altsets = PredictionModegetConflictingAltSubsets(configs)
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
}
// Checks if any configuration in {@code configs} is in a
@ -206,13 +206,13 @@ func PredictionModehasSLLConflictTerminatingPrediction( mode int, configs *ATNCo
// @return {@code true} if any configuration in {@code configs} is in a
// {@link RuleStopState}, otherwise {@code false}
func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
for i:= 0; i< len(configs.configs); i++ {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); ok {
return true
}
return true
}
}
return false
return false
}
// Checks if all configurations in {@code configs} are in a
@ -225,14 +225,14 @@ func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
// {@link RuleStopState}, otherwise {@code false}
func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
for i:= 0; i < len(configs.configs); i++ {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); !ok {
return false
}
if _, ok := c.getState().(*RuleStopState); !ok {
return false
}
}
return true
return true
}
//
@ -377,7 +377,7 @@ func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
//
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
return PredictionModegetSingleViableAlt(altsets)
return PredictionModegetSingleViableAlt(altsets)
}
//
@ -389,8 +389,9 @@ func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
return !PredictionModehasNonConflictingAltSet(altsets)
return !PredictionModehasNonConflictingAltSet(altsets)
}
//
// Determines if any single alternative subset in {@code altsets} contains
// exactly one alternative.
@ -400,13 +401,13 @@ func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
//
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
for i:=0; i<len(altsets); i++{
for i := 0; i < len(altsets); i++ {
var alts = altsets[i]
if (alts.length()==1) {
return true
}
if alts.length() == 1 {
return true
}
}
return false
return false
}
//
@ -418,13 +419,13 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
for i:=0; i<len(altsets); i++{
for i := 0; i < len(altsets); i++ {
var alts = altsets[i]
if (alts.length()>1) {
return true
}
if alts.length() > 1 {
return true
}
}
return false
return false
}
//
@ -435,18 +436,18 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
// others, otherwise {@code false}
//
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
var first *BitSet = nil
var first *BitSet = nil
for i:=0; i<len(altsets); i++{
for i := 0; i < len(altsets); i++ {
var alts = altsets[i]
if (first == nil) {
first = alts
} else if (alts!=first) {
return false
}
if first == nil {
first = alts
} else if alts != first {
return false
}
}
return true
return true
}
//
@ -457,12 +458,12 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
// @param altsets a collection of alternative subsets
//
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
var all = PredictionModegetAlts(altsets)
if (all.length()==1) {
return all.minValue()
} else {
return ATNINVALID_ALT_NUMBER
}
var all = PredictionModegetAlts(altsets)
if all.length() == 1 {
return all.minValue()
} else {
return ATNINVALID_ALT_NUMBER
}
}
// Gets the complete set of represented alternatives for a collection of
@ -473,11 +474,11 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int {
// @return the set of represented alternatives in {@code altsets}
//
func PredictionModegetAlts(altsets []*BitSet) *BitSet {
var all = NewBitSet()
var all = NewBitSet()
for _, alts := range altsets {
all.or(alts)
}
return all
return all
}
//
@ -490,28 +491,28 @@ func PredictionModegetAlts(altsets []*BitSet) *BitSet {
// </pre>
//
func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
var configToAlts = make(map[string]*BitSet)
var configToAlts = make(map[string]*BitSet)
for i :=0; i < len(configs.configs); i++ {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString()
var alts = configToAlts[key]
if (alts != nil) {
alts = NewBitSet()
configToAlts[key] = alts
}
alts.add(c.getAlt())
var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString()
var alts = configToAlts[key]
if alts != nil {
alts = NewBitSet()
configToAlts[key] = alts
}
alts.add(c.getAlt())
}
var values = make([]*BitSet, 0)
for k,_ := range configToAlts {
if( strings.Index( k, "key_") != 0) {
for k, _ := range configToAlts {
if strings.Index(k, "key_") != 0 {
continue
}
values = append(values, configToAlts[k])
}
return values
return values
}
//
@ -523,41 +524,40 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
// </pre>
//
func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict {
var m = NewAltDict()
var m = NewAltDict()
for _, c := range configs.configs {
var alts = m.get(c.getState().toString())
if (alts == nil) {
alts = NewBitSet()
m.put(c.getState().toString(), alts)
}
alts.(*BitSet).add(c.getAlt())
}
return m
}
func PredictionModehasStateAssociatedWithOneAlt (configs *ATNConfigSet) bool {
var values = PredictionModegetStateToAltMap(configs).values()
for i:=0; i<len(values); i++ {
if ( values[i].(*BitSet).length() ==1) {
return true
}
}
return false
}
func PredictionModegetSingleViableAlt (altsets []*BitSet) int {
var result = ATNINVALID_ALT_NUMBER
for i:=0; i<len(altsets); i++{
var alts = altsets[i]
var minAlt = alts.minValue()
if(result==ATNINVALID_ALT_NUMBER) {
result = minAlt
} else if(result!=minAlt) { // more than 1 viable alt
return ATNINVALID_ALT_NUMBER
}
var alts = m.get(c.getState().toString())
if alts == nil {
alts = NewBitSet()
m.put(c.getState().toString(), alts)
}
alts.(*BitSet).add(c.getAlt())
}
return result
return m
}
func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
var values = PredictionModegetStateToAltMap(configs).values()
for i := 0; i < len(values); i++ {
if values[i].(*BitSet).length() == 1 {
return true
}
}
return false
}
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
var result = ATNINVALID_ALT_NUMBER
for i := 0; i < len(altsets); i++ {
var alts = altsets[i]
var minAlt = alts.minValue()
if result == ATNINVALID_ALT_NUMBER {
result = minAlt
} else if result != minAlt { // more than 1 viable alt
return ATNINVALID_ALT_NUMBER
}
}
return result
}

View File

@ -1,8 +1,8 @@
package antlr4
import (
"fmt"
"strings"
"fmt"
"strings"
"strconv"
)
@ -10,7 +10,7 @@ import (
type IRecognizer interface {
getState() int
getATN() *ATN
action( _localctx IRuleContext, ruleIndex, actionIndex int)
action(_localctx IRuleContext, ruleIndex, actionIndex int)
getRuleNames() []string
getErrorListenerDispatch() IErrorListener
@ -19,35 +19,33 @@ type IRecognizer interface {
}
type Recognizer struct {
_listeners []IErrorListener
state int
_listeners []IErrorListener
state int
}
func NewRecognizer() *Recognizer {
rec := new(Recognizer)
rec.InitRecognizer()
return rec
rec := new(Recognizer)
rec.InitRecognizer()
return rec
}
func (rec *Recognizer) InitRecognizer() {
rec._listeners = []IErrorListener{ ConsoleErrorListenerINSTANCE }
rec.state = -1
rec._listeners = []IErrorListener{ConsoleErrorListenerINSTANCE}
rec.state = -1
}
var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (this *Recognizer) checkVersion(toolVersion string) {
var runtimeVersion = "4.5.2"
if (runtimeVersion!=toolVersion) {
fmt.Println("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion)
}
var runtimeVersion = "4.5.2"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
}
func (this *Recognizer) action( context IRuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
func (this *Recognizer) action(context IRuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
func (this *Recognizer) addErrorListener(listener IErrorListener) {
@ -55,15 +53,15 @@ func (this *Recognizer) addErrorListener(listener IErrorListener) {
}
func (this *Recognizer) removeErrorListeners() {
this._listeners = make([]IErrorListener, 0)
this._listeners = make([]IErrorListener, 0)
}
func (this *Recognizer) getRuleNames() []string {
return nil
return nil
}
func (this *Recognizer) getTokenNames() []string {
return nil
return nil
}
func (this *Recognizer) getState() int {
@ -89,28 +87,28 @@ func (this *Recognizer) getState() int {
// <p>Used for XPath and tree pattern compilation.</p>
//
func (this *Recognizer) getRuleIndexMap() map[string]int {
panic("Method not defined!")
// var ruleNames = this.getRuleNames()
// if (ruleNames==nil) {
// panic("The current recognizer does not provide a list of rule names.")
// }
//
// var result = ruleIndexMapCache[ruleNames]
// if(result==nil) {
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
// ruleIndexMapCache[ruleNames] = result
// }
// return result
panic("Method not defined!")
// var ruleNames = this.getRuleNames()
// if (ruleNames==nil) {
// panic("The current recognizer does not provide a list of rule names.")
// }
//
// var result = ruleIndexMapCache[ruleNames]
// if(result==nil) {
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
// ruleIndexMapCache[ruleNames] = result
// }
// return result
}
func (this *Recognizer) getTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = this.getTokenTypeMap()[tokenName]
// if (ttype !=nil) {
// return ttype
// } else {
// return TokenInvalidType
// }
panic("Method not defined!")
// var ttype = this.getTokenTypeMap()[tokenName]
// if (ttype !=nil) {
// return ttype
// } else {
// return TokenInvalidType
// }
}
//func (this *Recognizer) getTokenTypeMap() map[string]int {
@ -143,12 +141,11 @@ func (this *Recognizer) getTokenType(tokenName string) int {
// What is the error header, normally line/character position information?//
func (this *Recognizer) getErrorHeader(e IRecognitionException) string {
var line = e.getOffendingToken().line
var column = e.getOffendingToken().column
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
var line = e.getOffendingToken().line
var column = e.getOffendingToken().column
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
}
// How should a token be displayed in an error message? The default
// is to display just the text, but during development you might
// want to have a lot of information spit out. Override in that case
@ -163,34 +160,34 @@ func (this *Recognizer) getErrorHeader(e IRecognitionException) string {
// {@link DefaultErrorStrategy//getTokenErrorDisplay}.
//
func (this *Recognizer) getTokenErrorDisplay(t *Token) string {
if (t==nil) {
return "<no token>"
}
var s = t.text()
if s=="" {
if (t.tokenType==TokenEOF) {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.tokenType) + ">"
}
}
s = strings.Replace(s,"\t","\\t", -1)
s = strings.Replace(s,"\n","\\n", -1)
s = strings.Replace(s,"\r","\\r", -1)
if t == nil {
return "<no token>"
}
var s = t.text()
if s == "" {
if t.tokenType == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.tokenType) + ">"
}
}
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
return "'" + s + "'"
}
func (this *Recognizer) getErrorListenerDispatch() IErrorListener {
return NewProxyErrorListener(this._listeners)
return NewProxyErrorListener(this._listeners)
}
// subclass needs to override these if there are sempreds or actions
// that the ATN interp needs to execute
func (this *Recognizer) sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool {
return true
return true
}
func (this *Recognizer) precpred(localctx IRuleContext, precedence int) bool {
return true
}
return true
}

View File

@ -28,23 +28,24 @@ import (
type IRuleContext interface {
RuleNode
getInvokingState()int
getInvokingState() int
setInvokingState(int)
getRuleIndex()int
getRuleIndex() int
isEmpty() bool
toString([]string, IRuleContext) string
}
type RuleContext struct {
parentCtx IRuleContext
parentCtx IRuleContext
invokingState int
ruleIndex int
children []Tree
RuleIndex int
children []Tree
}
func NewRuleContext(parent IRuleContext, invokingState int) *RuleContext {
func NewRuleContext(parent IRuleContext, invokingState int) *RuleContext {
rn := new(RuleContext)
@ -61,22 +62,21 @@ func (rn *RuleContext) InitRuleContext(parent IRuleContext, invokingState int) {
// What state invoked the rule associated with this context?
// The "return address" is the followState of invokingState
// If parent is nil, this should be -1.
if (parent == nil){
if parent == nil {
rn.invokingState = -1
} else {
rn.invokingState = invokingState
}
}
func (this *RuleContext) setChildren(elems []Tree){
func (this *RuleContext) setChildren(elems []Tree) {
this.children = elems
}
func (this *RuleContext) setParent(v Tree){
func (this *RuleContext) setParent(v Tree) {
this.parentCtx = v.(IRuleContext)
}
func (this *RuleContext) getInvokingState() int {
return this.getInvokingState()
}
@ -85,8 +85,8 @@ func (this *RuleContext) setInvokingState(t int) {
this.invokingState = t
}
func (this *RuleContext) getRuleIndex() int{
return this.ruleIndex
func (this *RuleContext) getRuleIndex() int {
return this.RuleIndex
}
func (this *RuleContext) getChildren() []Tree {
@ -96,7 +96,7 @@ func (this *RuleContext) getChildren() []Tree {
func (this *RuleContext) depth() int {
var n = 0
var p Tree = this
for (p != nil) {
for p != nil {
p = p.getParent()
n += 1
}
@ -131,7 +131,7 @@ func (this *RuleContext) getPayload() interface{} {
// method.
//
func (this *RuleContext) getText() string {
if (this.getChildCount() == 0) {
if this.getChildCount() == 0 {
return ""
} else {
var s string
@ -173,22 +173,22 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string
var p IRuleContext = this
var s = "["
for (p != nil && p != stop) {
if (ruleNames == nil) {
if (!p.isEmpty()) {
for p != nil && p != stop {
if ruleNames == nil {
if !p.isEmpty() {
s += strconv.Itoa(p.getInvokingState())
}
} else {
var ri = p.getRuleIndex()
var ruleName string
if (ri >= 0 && ri < len(ruleNames)) {
if ri >= 0 && ri < len(ruleNames) {
ruleName = ruleNames[ri]
} else {
ruleName = strconv.Itoa(ri)
}
s += ruleName
}
if (p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty())) {
if p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty()) {
s += " "
}
p = p.getParent().(IRuleContext)
@ -196,4 +196,3 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string
s += "]"
return s
}

View File

@ -1,9 +1,8 @@
package antlr4
import (
"strconv"
"fmt"
"strconv"
)
// A tree structure used to record the semantic context in which
@ -22,14 +21,14 @@ type SemanticContext interface {
}
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
if (a == nil || a == SemanticContextNONE) {
if a == nil || a == SemanticContextNONE {
return b
}
if (b == nil || b == SemanticContextNONE) {
if b == nil || b == SemanticContextNONE {
return a
}
var result = NewAND(a, b)
if ( len(result.opnds) == 1) {
if len(result.opnds) == 1 {
return result.opnds[0]
} else {
return result
@ -37,27 +36,26 @@ func SemanticContextandContext(a, b SemanticContext) SemanticContext {
}
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
if (a == nil) {
if a == nil {
return b
}
if (b == nil) {
if b == nil {
return a
}
if (a == SemanticContextNONE || b == SemanticContextNONE) {
if a == SemanticContextNONE || b == SemanticContextNONE {
return SemanticContextNONE
}
var result = NewOR(a, b)
if ( len(result.opnds) == 1) {
if len(result.opnds) == 1 {
return result.opnds[0]
} else {
return result
}
}
type Predicate struct {
ruleIndex int
predIndex int
ruleIndex int
predIndex int
isCtxDependent bool
}
@ -73,7 +71,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
//The default {@link SemanticContext}, which is semantically equivalent to
//a predicate of the form {@code {true}?}.
var SemanticContextNONE SemanticContext = NewPredicate(-1,-1,false)
var SemanticContextNONE SemanticContext = NewPredicate(-1, -1, false)
func (this *Predicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext {
return this
@ -83,7 +81,7 @@ func (this *Predicate) evaluate(parser IRecognizer, outerContext IRuleContext) b
var localctx IRuleContext = nil
if (this.isCtxDependent){
if this.isCtxDependent {
localctx = outerContext
}
@ -95,14 +93,14 @@ func (this *Predicate) hashString() string {
}
func (this *Predicate) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*Predicate); !ok {
return false
} else {
return this.ruleIndex == other.(*Predicate).ruleIndex &&
this.predIndex == other.(*Predicate).predIndex &&
this.isCtxDependent == other.(*Predicate).isCtxDependent
this.predIndex == other.(*Predicate).predIndex &&
this.isCtxDependent == other.(*Predicate).isCtxDependent
}
}
@ -127,7 +125,7 @@ func (this *PrecedencePredicate) evaluate(parser IRecognizer, outerContext IRule
}
func (this *PrecedencePredicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext {
if (parser.precpred(outerContext, this.precedence)) {
if parser.precpred(outerContext, this.precedence) {
return SemanticContextNONE
} else {
return nil
@ -143,7 +141,7 @@ func (this *PrecedencePredicate) hashString() string {
}
func (this *PrecedencePredicate) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*PrecedencePredicate); !ok {
return false
@ -153,14 +151,13 @@ func (this *PrecedencePredicate) equals(other interface{}) bool {
}
func (this *PrecedencePredicate) toString() string {
return "{"+strconv.Itoa(this.precedence)+">=prec}?"
return "{" + strconv.Itoa(this.precedence) + ">=prec}?"
}
func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate {
var result = make([]*PrecedencePredicate, 0)
for _,v := range set.values() {
for _, v := range set.values() {
if c2, ok := v.(*PrecedencePredicate); ok {
result = append(result, c2)
}
@ -172,14 +169,13 @@ func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredic
// A semantic context which is true whenever none of the contained contexts
// is false.`
type AND struct {
opnds []SemanticContext
}
func NewAND(a, b SemanticContext) *AND {
var operands = NewSet(nil,nil)
var operands = NewSet(nil, nil)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.add(o)
@ -196,12 +192,12 @@ func NewAND(a, b SemanticContext) *AND {
operands.add(b)
}
var precedencePredicates = PrecedencePredicatefilterPrecedencePredicates(operands)
if ( len(precedencePredicates) > 0) {
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate = nil
for _,p := range precedencePredicates {
if(reduced==nil || p.precedence < reduced.precedence) {
for _, p := range precedencePredicates {
if reduced == nil || p.precedence < reduced.precedence {
reduced = p
}
}
@ -209,7 +205,6 @@ func NewAND(a, b SemanticContext) *AND {
operands.add(reduced)
}
vs := operands.values()
opnds := make([]SemanticContext, len(vs))
for i, v := range vs {
@ -223,7 +218,7 @@ func NewAND(a, b SemanticContext) *AND {
}
func (this *AND) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*AND); !ok {
return false
@ -240,6 +235,7 @@ func (this *AND) equals(other interface{}) bool {
func (this *AND) hashString() string {
return fmt.Sprint(this.opnds) + "/AND"
}
//
// {@inheritDoc}
//
@ -249,7 +245,7 @@ func (this *AND) hashString() string {
//
func (this *AND) evaluate(parser IRecognizer, outerContext IRuleContext) bool {
for i := 0; i < len(this.opnds); i++ {
if (!this.opnds[i].evaluate(parser, outerContext)) {
if !this.opnds[i].evaluate(parser, outerContext) {
return false
}
}
@ -264,29 +260,29 @@ func (this *AND) evalPrecedence(parser IRecognizer, outerContext IRuleContext) S
var context = this.opnds[i]
var evaluated = context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if (evaluated == nil) {
if evaluated == nil {
// The AND context is false if any element is false
return nil
} else if (evaluated != SemanticContextNONE) {
} else if evaluated != SemanticContextNONE {
// Reduce the result by skipping true elements
operands = append (operands, evaluated)
operands = append(operands, evaluated)
}
}
if (!differs) {
if !differs {
return this
}
if ( len(operands) == 0) {
if len(operands) == 0 {
// all elements were true, so the AND context is true
return SemanticContextNONE
}
var result SemanticContext = nil
for _,o := range operands {
if (result == nil){
for _, o := range operands {
if result == nil {
result = o
} else {
} else {
result = SemanticContextandContext(result, o)
}
}
@ -297,11 +293,11 @@ func (this *AND) evalPrecedence(parser IRecognizer, outerContext IRuleContext) S
func (this *AND) toString() string {
var s = ""
for _,o := range this.opnds {
for _, o := range this.opnds {
s += "&& " + o.toString()
}
if (len(s) > 3){
if len(s) > 3 {
return s[0:3]
} else {
return s
@ -318,7 +314,7 @@ type OR struct {
}
func NewOR(a, b SemanticContext) *OR {
var operands = NewSet(nil,nil)
var operands = NewSet(nil, nil)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.add(o)
@ -335,12 +331,12 @@ func NewOR(a, b SemanticContext) *OR {
operands.add(b)
}
var precedencePredicates = PrecedencePredicatefilterPrecedencePredicates(operands)
if ( len(precedencePredicates) > 0) {
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate = nil
for _,p := range precedencePredicates {
if(reduced==nil || p.precedence > reduced.precedence) {
for _, p := range precedencePredicates {
if reduced == nil || p.precedence > reduced.precedence {
reduced = p
}
}
@ -360,9 +356,8 @@ func NewOR(a, b SemanticContext) *OR {
return this
}
func (this *OR) equals(other interface{}) bool {
if (this == other) {
if this == other {
return true
} else if _, ok := other.(*OR); !ok {
return false
@ -386,7 +381,7 @@ func (this *OR) hashString() string {
//
func (this *OR) evaluate(parser IRecognizer, outerContext IRuleContext) bool {
for i := 0; i < len(this.opnds); i++ {
if (this.opnds[i].evaluate(parser, outerContext)) {
if this.opnds[i].evaluate(parser, outerContext) {
return true
}
}
@ -400,28 +395,28 @@ func (this *OR) evalPrecedence(parser IRecognizer, outerContext IRuleContext) Se
var context = this.opnds[i]
var evaluated = context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if (evaluated == SemanticContextNONE) {
if evaluated == SemanticContextNONE {
// The OR context is true if any element is true
return SemanticContextNONE
} else if (evaluated != nil) {
} else if evaluated != nil {
// Reduce the result by skipping false elements
operands = append(operands, evaluated)
}
}
if (!differs) {
if !differs {
return this
}
if (len(operands) == 0) {
if len(operands) == 0 {
// all elements were false, so the OR context is false
return nil
}
var result SemanticContext = nil
for _,o := range operands {
if (result == nil) {
for _, o := range operands {
if result == nil {
result = o
} else {
result = SemanticContextorContext(result, o);
result = SemanticContextorContext(result, o)
}
}
@ -431,17 +426,13 @@ func (this *OR) evalPrecedence(parser IRecognizer, outerContext IRuleContext) Se
func (this *OR) toString() string {
var s = ""
for _,o := range this.opnds {
for _, o := range this.opnds {
s += "|| " + o.toString()
}
if (len(s) > 3){
if len(s) > 3 {
return s[0:3]
} else {
return s
}
}

View File

@ -1,13 +1,13 @@
package antlr4
import (
"strings"
"strconv"
"strings"
)
type TokenSourceCharStreamPair struct {
tokenSource TokenSource
charStream CharStream
charStream CharStream
}
// A token has properties: text, type, line, character position in the line
@ -15,16 +15,16 @@ type TokenSourceCharStreamPair struct {
// we obtained this token.
type Token struct {
source *TokenSourceCharStreamPair
tokenType int // token type of the token
channel int // The parser ignores everything not on DEFAULT_CHANNEL
start int // optional return -1 if not implemented.
stop int // optional return -1 if not implemented.
tokenIndex int // from 0..n-1 of the token object in the input stream
line int // line=1..n of the 1st character
column int // beginning of the line at which it occurs, 0..n-1
_text string // text of the token.
readOnly bool
source *TokenSourceCharStreamPair
tokenType int // token type of the token
channel int // The parser ignores everything not on DEFAULT_CHANNEL
start int // optional return -1 if not implemented.
stop int // optional return -1 if not implemented.
tokenIndex int // from 0..n-1 of the token object in the input stream
line int // line=1..n of the 1st character
column int // beginning of the line at which it occurs, 0..n-1
_text string // text of the token.
readOnly bool
}
const (
@ -58,7 +58,7 @@ const (
// should be obtained from the input along with the start and stop indexes
// of the token.
func (this *Token) text() string{
func (this *Token) text() string {
return this._text
}
@ -88,7 +88,7 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
t.start = start
t.stop = stop
t.tokenIndex = -1
if (t.source.tokenSource != nil) {
if t.source.tokenSource != nil {
t.line = source.tokenSource.getLine()
t.column = source.tokenSource.getCharPositionInLine()
} else {
@ -116,7 +116,7 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
//
func (ct *CommonToken) clone() *CommonToken {
var t = NewCommonToken(ct.source, ct.tokenType, ct.channel, ct.start,
ct.stop)
ct.stop)
t.tokenIndex = ct.tokenIndex
t.line = ct.line
t.column = ct.column
@ -125,15 +125,15 @@ func (ct *CommonToken) clone() *CommonToken {
}
func (this *CommonToken) text() string {
if (this._text != "") {
if this._text != "" {
return this._text
}
var input = this.getInputStream()
if (input == nil) {
if input == nil {
return ""
}
var n = input.size()
if (this.start < n && this.stop < n) {
if this.start < n && this.stop < n {
return input.getTextFromInterval(NewInterval(this.start, this.stop))
} else {
return "<EOF>"
@ -146,7 +146,7 @@ func (this *CommonToken) setText(text string) {
func (this *CommonToken) toString() string {
var txt = this.text()
if (txt != "") {
if txt != "" {
txt = strings.Replace(txt, "\n", "", -1)
txt = strings.Replace(txt, "\r", "", -1)
txt = strings.Replace(txt, "\t", "", -1)
@ -154,17 +154,14 @@ func (this *CommonToken) toString() string {
txt = "<no text>"
}
var ch string;
if (this.channel > 0){
var ch string
if this.channel > 0 {
ch = ",channel=" + strconv.Itoa(this.channel)
} else {
ch = ""
}
return "[@" + strconv.Itoa(this.tokenIndex) + "," + strconv.Itoa(this.start) + ":" + strconv.Itoa(this.stop) + "='" +
txt + "',<" + strconv.Itoa(this.tokenType) + ">" +
ch + "," + strconv.Itoa(this.line) + ":" + strconv.Itoa(this.column) + "]"
txt + "',<" + strconv.Itoa(this.tokenType) + ">" +
ch + "," + strconv.Itoa(this.line) + ":" + strconv.Itoa(this.column) + "]"
}

View File

@ -1,7 +1,6 @@
package antlr4
type TokenSource interface {
nextToken() *Token
skip()
more()
@ -11,6 +10,4 @@ type TokenSource interface {
getSourceName() string
setTokenFactory(factory TokenFactory)
getTokenFactory() TokenFactory
}

View File

@ -1,4 +1,5 @@
package antlr4
import (
"fmt"
"strconv"
@ -19,19 +20,19 @@ type ITransition interface {
getIsEpsilon() bool
getLabel() *IntervalSet
getSerializationType() int
matches( int, int, int ) bool
matches(int, int, int) bool
}
type Transition struct {
target IATNState
isEpsilon bool
label *IntervalSet
target IATNState
isEpsilon bool
label *IntervalSet
serializationType int
}
func NewTransition (target IATNState) *Transition {
func NewTransition(target IATNState) *Transition {
if (target==nil || target==nil) {
if target == nil || target == nil {
panic("target cannot be nil.")
}
@ -68,24 +69,23 @@ func (t *Transition) getSerializationType() int {
return t.serializationType
}
func (t *Transition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool {
func (t *Transition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented")
}
const(
TransitionEPSILON = 1
TransitionRANGE = 2
TransitionRULE = 3
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
TransitionATOM = 5
TransitionACTION = 6
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
TransitionNOT_SET = 8
TransitionWILDCARD = 9
const (
TransitionEPSILON = 1
TransitionRANGE = 2
TransitionRULE = 3
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
TransitionATOM = 5
TransitionACTION = 6
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
TransitionNOT_SET = 8
TransitionWILDCARD = 9
TransitionPRECEDENCE = 10
)
var TransitionserializationNames = []string{
"INVALID",
"EPSILON",
@ -124,18 +124,17 @@ var TransitionserializationNames = []string{
// TransitionPRECEDENCE
//}
// TODO: make all transitions sets? no, should remove set edges
type AtomTransition struct {
*Transition
label_ int
label *IntervalSet
label *IntervalSet
}
func NewAtomTransition ( target IATNState, label int ) *AtomTransition {
func NewAtomTransition(target IATNState, label int) *AtomTransition {
t := new(AtomTransition)
t.InitTransition( target )
t.InitTransition(target)
t.label_ = label // The token type or character value or, signifies special label.
t.label = t.makeLabel()
@ -150,7 +149,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
return s
}
func (t *AtomTransition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool {
func (t *AtomTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label_ == symbol
}
@ -161,15 +160,14 @@ func (t *AtomTransition) toString() string {
type RuleTransition struct {
*Transition
followState IATNState
followState IATNState
ruleIndex, precedence int
}
func NewRuleTransition ( ruleStart IATNState, ruleIndex, precedence int, followState IATNState ) *RuleTransition {
func NewRuleTransition(ruleStart IATNState, ruleIndex, precedence int, followState IATNState) *RuleTransition {
t := new(RuleTransition)
t.InitTransition( ruleStart )
t.InitTransition(ruleStart)
t.ruleIndex = ruleIndex
t.precedence = precedence
@ -180,23 +178,21 @@ func NewRuleTransition ( ruleStart IATNState, ruleIndex, precedence int, followS
return t
}
func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
type EpsilonTransition struct {
*Transition
isEpsilon bool
isEpsilon bool
outermostPrecedenceReturn int
}
func NewEpsilonTransition ( target IATNState, outermostPrecedenceReturn int ) *EpsilonTransition {
func NewEpsilonTransition(target IATNState, outermostPrecedenceReturn int) *EpsilonTransition {
t := new(EpsilonTransition)
t.InitTransition( target )
t.InitTransition(target)
t.serializationType = TransitionEPSILON
t.isEpsilon = true
@ -204,8 +200,7 @@ func NewEpsilonTransition ( target IATNState, outermostPrecedenceReturn int ) *E
return t
}
func (t *EpsilonTransition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool {
func (t *EpsilonTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -219,10 +214,10 @@ type RangeTransition struct {
start, stop int
}
func NewRangeTransition ( target IATNState, start, stop int ) *RangeTransition {
func NewRangeTransition(target IATNState, start, stop int) *RangeTransition {
t := new(RangeTransition)
t.InitTransition( target )
t.InitTransition(target)
t.serializationType = TransitionRANGE
t.start = start
@ -231,14 +226,13 @@ func NewRangeTransition ( target IATNState, start, stop int ) *RangeTransition {
return t
}
func (t *RangeTransition) makeLabel() *IntervalSet {
var s = NewIntervalSet()
s.addRange(t.start, t.stop)
return s
}
func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop
}
@ -250,10 +244,10 @@ type AbstractPredicateTransition struct {
*Transition
}
func NewAbstractPredicateTransition ( target IATNState ) *AbstractPredicateTransition {
func NewAbstractPredicateTransition(target IATNState) *AbstractPredicateTransition {
t := new(AbstractPredicateTransition)
t.InitTransition( target )
t.InitTransition(target)
return t
}
@ -261,11 +255,11 @@ func NewAbstractPredicateTransition ( target IATNState ) *AbstractPredicateTrans
type PredicateTransition struct {
*Transition
isCtxDependent bool
isCtxDependent bool
ruleIndex, predIndex int
}
func NewPredicateTransition ( target IATNState, ruleIndex, predIndex int, isCtxDependent bool ) *PredicateTransition {
func NewPredicateTransition(target IATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
t := new(PredicateTransition)
t.InitTransition(target)
@ -278,8 +272,7 @@ func NewPredicateTransition ( target IATNState, ruleIndex, predIndex int, isCtxD
return t
}
func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -294,14 +287,14 @@ func (t *PredicateTransition) toString() string {
type ActionTransition struct {
*Transition
isCtxDependent bool
isCtxDependent bool
ruleIndex, actionIndex, predIndex int
}
func NewActionTransition ( target IATNState, ruleIndex, actionIndex int, isCtxDependent bool ) *ActionTransition {
func NewActionTransition(target IATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
t := new(ActionTransition)
t.InitTransition( target )
t.InitTransition(target)
t.serializationType = TransitionACTION
t.ruleIndex = ruleIndex
@ -311,9 +304,7 @@ func NewActionTransition ( target IATNState, ruleIndex, actionIndex int, isCtxDe
return t
}
func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -321,24 +312,23 @@ func (t *ActionTransition) toString() string {
return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
}
type SetTransition struct {
*Transition
}
func NewSetTransition ( target IATNState, set *IntervalSet ) *SetTransition {
func NewSetTransition(target IATNState, set *IntervalSet) *SetTransition {
t := new(SetTransition)
t.InitTransition( target )
t.InitSetTransition( set )
t.InitTransition(target)
t.InitSetTransition(set)
return t
}
func (t *SetTransition) InitSetTransition( set *IntervalSet ) {
func (t *SetTransition) InitSetTransition(set *IntervalSet) {
t.serializationType = TransitionSET
if (set !=nil && set !=nil) {
if set != nil && set != nil {
t.label = set
} else {
t.label = NewIntervalSet()
@ -347,35 +337,31 @@ func (t *SetTransition) InitSetTransition( set *IntervalSet ) {
}
func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label.contains(symbol)
}
func (t *SetTransition) toString() string {
return t.label.toString()
}
type NotSetTransition struct {
SetTransition
}
func NewNotSetTransition ( target IATNState, set *IntervalSet) *NotSetTransition {
func NewNotSetTransition(target IATNState, set *IntervalSet) *NotSetTransition {
t := new(NotSetTransition)
t.InitTransition( target )
t.InitSetTransition( set )
t.InitTransition(target)
t.InitSetTransition(set)
t.serializationType = TransitionNOT_SET
return t
}
func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains( symbol)
func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains(symbol)
}
func (t *NotSetTransition) toString() string {
@ -386,16 +372,16 @@ type WildcardTransition struct {
*Transition
}
func NewWildcardTransition ( target IATNState ) *WildcardTransition {
func NewWildcardTransition(target IATNState) *WildcardTransition {
t := new(WildcardTransition)
t.InitTransition( target )
t.InitTransition(target)
t.serializationType = TransitionWILDCARD
return t
}
func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
}
@ -409,10 +395,10 @@ type PrecedencePredicateTransition struct {
precedence int
}
func NewPrecedencePredicateTransition ( target IATNState, precedence int ) *PrecedencePredicateTransition {
func NewPrecedencePredicateTransition(target IATNState, precedence int) *PrecedencePredicateTransition {
t := new(PrecedencePredicateTransition)
t.InitTransition( target )
t.InitTransition(target)
t.serializationType = TransitionPRECEDENCE
t.precedence = precedence
@ -421,8 +407,7 @@ func NewPrecedencePredicateTransition ( target IATNState, precedence int ) *Prec
return t
}
func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -433,15 +418,3 @@ func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
func (t *PrecedencePredicateTransition) toString() string {
return fmt.Sprint(t.precedence) + " >= _p"
}

View File

@ -1,6 +1,5 @@
package antlr4
// The basic notion of a tree has a parent, a payload, and a list of children.
// It is the most abstract interface for all the trees used by ANTLR.
///
@ -15,7 +14,7 @@ type Tree interface {
getChildCount() int
getChildren() []Tree
setChildren([]Tree)
// toStringTree() string
// toStringTree() string
}
type SyntaxTree interface {
@ -27,10 +26,10 @@ type SyntaxTree interface {
type ParseTree interface {
SyntaxTree
// <T> T accept(ParseTreeVisitor<? extends T> visitor);
// <T> T accept(ParseTreeVisitor<? extends T> visitor);
accept(visitor ParseTreeVisitor) interface{}
getText() string
// toStringTree([]string, IRecognizer) string
// toStringTree([]string, IRecognizer) string
}
type RuleNode interface {
@ -116,7 +115,6 @@ func (this *TerminalNodeImpl) setChildren(t []Tree) {
panic("Cannot set children on terminal node")
}
func (this *TerminalNodeImpl) getSymbol() *Token {
return this.symbol
}
@ -134,7 +132,7 @@ func (this *TerminalNodeImpl) getPayload() interface{} {
}
func (this *TerminalNodeImpl) getSourceInterval() *Interval {
if (this.symbol == nil) {
if this.symbol == nil {
return TreeINVALID_INTERVAL
}
var tokenIndex = this.symbol.tokenIndex
@ -145,7 +143,7 @@ func (this *TerminalNodeImpl) getChildCount() int {
return 0
}
func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor ) interface{} {
func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor) interface{} {
return visitor.visitTerminal(this)
}
@ -154,14 +152,13 @@ func (this *TerminalNodeImpl) getText() string {
}
func (this *TerminalNodeImpl) toString() string {
if (this.symbol.tokenType == TokenEOF) {
if this.symbol.tokenType == TokenEOF {
return "<EOF>"
} else {
return this.symbol.text()
}
}
// Represents a token that was consumed during resynchronization
// rather than during a valid match operation. For example,
// we will create this kind of a node during single token insertion
@ -182,13 +179,11 @@ func (this *ErrorNodeImpl) isErrorNode() bool {
return true
}
func (this *ErrorNodeImpl) accept( visitor ParseTreeVisitor ) interface{} {
func (this *ErrorNodeImpl) accept(visitor ParseTreeVisitor) interface{} {
return visitor.visitErrorNode(this)
}
type ParseTreeWalker struct {
}
func NewParseTreeWalker() *ParseTreeWalker {
@ -210,6 +205,7 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) {
this.exitRule(listener, t.(RuleNode))
}
}
//
// The discovery of a rule node, involves sending two events: the generic
// {@link ParseTreeListener//enterEveryRule} and a

View File

@ -1,4 +1,5 @@
package antlr4
import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
@ -8,62 +9,61 @@ import "fmt"
// parse trees and extract data appropriately.
func TreestoStringTree(tree Tree, ruleNames []string, recog IRecognizer) string {
if(recog!=nil) {
ruleNames = recog.getRuleNames()
}
if recog != nil {
ruleNames = recog.getRuleNames()
}
var s = TreesgetNodeText(tree, ruleNames, nil)
var s = TreesgetNodeText(tree, ruleNames, nil)
s = EscapeWhitespace(s, false)
var c = tree.getChildCount()
if(c==0) {
return s
}
var res = "(" + s + " "
if(c>0) {
s = TreestoStringTree(tree.getChild(0), ruleNames, nil)
res += s
}
for i :=1; i<c; i++ {
s = TreestoStringTree(tree.getChild(i), ruleNames, nil)
res += (" " + s)
}
res += ")"
return res
s = EscapeWhitespace(s, false)
var c = tree.getChildCount()
if c == 0 {
return s
}
var res = "(" + s + " "
if c > 0 {
s = TreestoStringTree(tree.getChild(0), ruleNames, nil)
res += s
}
for i := 1; i < c; i++ {
s = TreestoStringTree(tree.getChild(i), ruleNames, nil)
res += (" " + s)
}
res += ")"
return res
}
func TreesgetNodeText(t Tree, ruleNames []string, recog *Parser) string {
if(recog!=nil) {
ruleNames = recog.getRuleNames()
}
if recog != nil {
ruleNames = recog.getRuleNames()
}
if(ruleNames!=nil) {
if t2, ok := t.(RuleNode); ok {
return ruleNames[t2.getRuleContext().getRuleIndex()]
} else if t2, ok := t.(ErrorNode); ok {
return fmt.Sprint(t2)
} else if t2, ok := t.(TerminalNode); ok {
if(t2.getSymbol()!=nil) {
return t2.getSymbol().text()
}
}
}
if ruleNames != nil {
if t2, ok := t.(RuleNode); ok {
return ruleNames[t2.getRuleContext().getRuleIndex()]
} else if t2, ok := t.(ErrorNode); ok {
return fmt.Sprint(t2)
} else if t2, ok := t.(TerminalNode); ok {
if t2.getSymbol() != nil {
return t2.getSymbol().text()
}
}
}
// no recog for rule names
var payload = t.getPayload()
if p2, ok := payload.(*Token); ok {
return p2.text()
}
// no recog for rule names
var payload = t.getPayload()
if p2, ok := payload.(*Token); ok {
return p2.text()
}
return fmt.Sprint(t.getPayload())
return fmt.Sprint(t.getPayload())
}
// Return ordered list of all children of this node
func TreesgetChildren(t Tree) []Tree {
var list = make([]Tree, 0)
for i := 0;i< t.getChildCount();i++ {
for i := 0; i < t.getChildCount(); i++ {
list = append(list, t.getChild(i))
}
return list
@ -73,21 +73,21 @@ func TreesgetChildren(t Tree) []Tree {
// list is the root and the last is the parent of this node.
//
func TreesgetAncestors(t Tree) []Tree {
var ancestors = make([]Tree, 0)
t = t.getParent()
for(t!=nil) {
f := []Tree { t }
var ancestors = make([]Tree, 0)
t = t.getParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.getParent()
}
return ancestors
}
func TreesfindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
t = t.getParent()
}
return ancestors
}
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
func TreesfindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
@ -104,26 +104,24 @@ func Trees_findAllNodes(t ParseTree, index int, findTokens bool, nodes []ParseTr
t3, ok2 := t.(IParserRuleContext)
if findTokens && ok {
if(t2.getSymbol().tokenType==index) {
if t2.getSymbol().tokenType == index {
nodes = append(nodes, t2)
}
} else if(!findTokens && ok2) {
if(t3.getRuleIndex()==index) {
} else if !findTokens && ok2 {
if t3.getRuleIndex() == index {
nodes = append(nodes, t3)
}
}
// check children
for i := 0;i<t.getChildCount(); i++ {
for i := 0; i < t.getChildCount(); i++ {
Trees_findAllNodes(t.getChild(i).(ParseTree), index, findTokens, nodes)
}
}
func Treesdescendants(t ParseTree) []ParseTree {
var nodes = []ParseTree{ t }
for i := 0; i<t.getChildCount(); i++ {
var nodes = []ParseTree{t}
for i := 0; i < t.getChildCount(); i++ {
nodes = append(nodes, Treesdescendants(t.getChild(i).(ParseTree))...)
}
return nodes
}
return nodes
}

View File

@ -1,23 +1,23 @@
package antlr4
import (
"fmt"
"errors"
"strings"
"fmt"
"hash/fnv"
// "regexp"
"strings"
// "regexp"
"bytes"
"encoding/gob"
)
func intMin(a,b int) int {
func intMin(a, b int) int {
if a < b {
return a
}
return b
}
func intMax(a,b int) int {
func intMax(a, b int) int {
if a > b {
return a
}
@ -31,7 +31,7 @@ type IntStack []int
var ErrEmptyStack = errors.New("Stack is empty")
func (s *IntStack) Pop() (int, error) {
l := len(*s)-1
l := len(*s) - 1
if l < 0 {
return 0, ErrEmptyStack
}
@ -44,8 +44,8 @@ func (s *IntStack) Push(e int) {
*s = append(*s, e)
}
func arrayToString(a []interface{}) string{
return fmt.Sprint( a )
func arrayToString(a []interface{}) string {
return fmt.Sprint(a)
}
func hashCode(s string) string {
@ -55,24 +55,24 @@ func hashCode(s string) string {
}
type Set struct {
data map[string][]interface{}
hashFunction func(interface{}) string
equalsFunction func(interface{},interface{}) bool
data map[string][]interface{}
hashFunction func(interface{}) string
equalsFunction func(interface{}, interface{}) bool
}
func NewSet(hashFunction func(interface{}) string, equalsFunction func(interface{},interface{}) bool) *Set {
func NewSet(hashFunction func(interface{}) string, equalsFunction func(interface{}, interface{}) bool) *Set {
s := new(Set)
s.data = make( map[string][]interface{})
s.data = make(map[string][]interface{})
if (hashFunction == nil){
if hashFunction == nil {
s.hashFunction = standardHashFunction
} else {
s.hashFunction = hashFunction
}
if (equalsFunction == nil){
if equalsFunction == nil {
s.equalsFunction = standardEqualsFunction
} else {
s.equalsFunction = equalsFunction
@ -97,7 +97,7 @@ func getBytes(key interface{}) ([]byte, error) {
func standardHashFunction(a interface{}) string {
h := fnv.New32a()
v,_ := getBytes(a)
v, _ := getBytes(a)
h.Write(v)
return fmt.Sprint(h.Sum32())
}
@ -115,16 +115,16 @@ func (this *Set) add(value interface{}) interface{} {
if this.data[key] != nil {
for i := 0; i < len(values); i++ {
if(this.equalsFunction(value, values[i])) {
if this.equalsFunction(value, values[i]) {
return values[i]
}
}
this.data[key] = append( this.data[key], value )
this.data[key] = append(this.data[key], value)
return value
}
this.data[key] = []interface{}{ value }
this.data[key] = []interface{}{value}
return value
}
@ -138,7 +138,7 @@ func (this *Set) contains(value interface{}) bool {
if this.data[key] != nil {
for i := 0; i < len(values); i++ {
if(this.equalsFunction(value, values[i])) {
if this.equalsFunction(value, values[i]) {
return true
}
}
@ -149,7 +149,7 @@ func (this *Set) contains(value interface{}) bool {
func (this *Set) values() []interface{} {
var l = make([]interface{}, len(this.data))
for key,_ := range this.data {
for key, _ := range this.data {
if strings.Index(key, "hash_") == 0 {
l = append(l, this.data[key]...)
}
@ -161,7 +161,6 @@ func (this *Set) toString() string {
return fmt.Sprint(this.data)
}
type BitSet struct {
data map[int]bool
}
@ -181,7 +180,7 @@ func (this *BitSet) clear(index int) {
}
func (this *BitSet) or(set *BitSet) {
for k,_ := range set.data {
for k, _ := range set.data {
this.add(k)
}
}
@ -197,7 +196,7 @@ func (this *BitSet) contains(value int) bool {
func (this *BitSet) values() []int {
ks := make([]int, len(this.data))
i := 0
for k,_ := range this.data {
for k, _ := range this.data {
ks[i] = k
i++
}
@ -207,7 +206,7 @@ func (this *BitSet) values() []int {
func (this *BitSet) minValue() int {
min := 0
for k,_ := range this.data {
for k, _ := range this.data {
if k < min {
min = k
}
@ -218,15 +217,15 @@ func (this *BitSet) minValue() int {
func (this *BitSet) equals(other interface{}) bool {
otherBitSet, ok := other.(*BitSet)
if !ok {
if !ok {
return false
}
if len(this.data) != len(otherBitSet.data){
if len(this.data) != len(otherBitSet.data) {
return false
}
for k,v := range this.data {
for k, v := range this.data {
if otherBitSet.data[k] != v {
return false
}
@ -243,7 +242,6 @@ func (this *BitSet) toString() string {
return fmt.Sprint(this.data)
}
type AltDict struct {
data map[string]interface{}
}
@ -267,15 +265,13 @@ func (this *AltDict) put(key string, value interface{}) {
func (this *AltDict) values() []interface{} {
vs := make([]interface{}, len(this.data))
i := 0
for _,v := range this.data {
for _, v := range this.data {
vs[i] = v
i++
}
return vs
}
type DoubleDict struct {
data map[string]map[string]interface{}
}
@ -289,7 +285,7 @@ func NewDoubleDict() *DoubleDict {
func (this *DoubleDict) get(a string, b string) interface{} {
var d = this.data[a]
if (d == nil){
if d == nil {
return nil
}
@ -299,7 +295,7 @@ func (this *DoubleDict) get(a string, b string) interface{} {
func (this *DoubleDict) set(a, b string, o interface{}) {
var d = this.data[a]
if(d==nil) {
if d == nil {
d = make(map[string]interface{})
this.data[a] = d
}
@ -309,11 +305,11 @@ func (this *DoubleDict) set(a, b string, o interface{}) {
func EscapeWhitespace(s string, escapeSpaces bool) string {
s = strings.Replace(s,"\t","\\t", -1)
s = strings.Replace(s,"\n","\\n", -1)
s = strings.Replace(s,"\r","\\r", -1)
if(escapeSpaces) {
s = strings.Replace(s," ","\u00B7", -1)
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
if escapeSpaces {
s = strings.Replace(s, " ", "\u00B7", -1)
}
return s
}
@ -325,17 +321,10 @@ func TitleCase(str string) string {
panic("Not implemented")
// re := regexp.MustCompile("\w\S*")
// return re.ReplaceAllStringFunc(str, func(s string) {
// return strings.ToUpper(s[0:1]) + s[1:2]
// })
// re := regexp.MustCompile("\w\S*")
// return re.ReplaceAllStringFunc(str, func(s string) {
// return strings.ToUpper(s[0:1]) + s[1:2]
// })
return ""
}

View File

@ -15,10 +15,7 @@ ParserFile(file, parser, namedActions) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import(
"antlr4"
"strings"
)
import "antlr4"
<namedActions.header>
@ -31,11 +28,7 @@ ListenerFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
// TODO: this should probably be an interface
import(
"antlr4"
)
import "antlr4"
// This class defines a complete listener for a parse tree produced by <file.parserName>.
@ -45,11 +38,11 @@ type <file.grammarName>Listener struct {
<file.listenerNames:{lname |
// Enter a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) enter<lname; format="cap">(ctx *ParserRuleContext) {
func (l *<file.grammarName>Listener) enter<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
// Exit a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx *ParserRuleContext) {
func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
}; separator="\n">
@ -61,21 +54,19 @@ VisitorFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import(
"antlr4"
)
import "antlr4"
<header>
// This class defines a complete generic visitor for a parse tree produced by <file.parserName>.
type <file.grammarName>Visitor struct {
ParseTreeVisitor
}
<file.visitorNames:{lname |
// Visit a parse tree produced by <file.parserName>#<lname>.
func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx *ParserRuleContext) {
func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx IParserRuleContext) {
\}
}; separator="\n">
@ -91,38 +82,36 @@ var <superClass> = require('./<superClass>').<superClass> // TODO
<atn>
type <parser.name> struct {
<superClass; null="Parser">
var deserializer = antlr4.NewATNDeserializer()
var deserializedAtn = deserializer.Deserialize(serializedATN)
var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <parser.name> struct {
<superClass; null="*antlr4.Parser">
_interp *ParserATNSimulator
ruleNames []string
literalNames []string
symbolicNames []string
grammarFileName string
}
func New<parser.name>(input) <parser.name> {
func New<parser.name>(input TokenStream) <parser.name> {
// TODO could be package level variable
var decisionToDFA = make([]antlr4.DFA,len(deserializedAtn.DecisionToState))
var sharedContextCache = antlr4.NewPredictionContextCache()
var deserializer = NewATNDeserializer()
var deserializedAtn = deserializer.deserialize(serializedATN)
var decisionToDFA = make([]DFA,len(deserializedAtn.decisionToState))
for index, ds := range deserializedAtn.decisionToState {
decisionToDFA[index] = NewDFA(ds, index)
for index, ds := range deserializedAtn.DecisionToState {
decisionToDFA[index] = antlr4.NewDFA(ds, index)
}
var sharedContextCache = NewPredictionContextCache()
var literalNames = [...]string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = [...]string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = [...]string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
// init the parser
parser := new(<parser.name>)
parser._interp = NewParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache)
parser.InitParser(input)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedAtn, decisionToDFA, sharedContextCache)
parser.ruleNames = ruleNames
parser.literalNames = literalNames
parser.symbolicNames = symbolicNames
@ -133,7 +122,7 @@ func New<parser.name>(input) <parser.name> {
}
const(
<parser.name>EOF = TokenEOF
<parser.name>EOF = antlr4.TokenEOF
<if(parser.tokens)>
<parser.tokens:{k | <parser.name><k> = <parser.tokens.(k)>}; separator="\n", wrap, anchor>
<endif>
@ -242,7 +231,7 @@ func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; sep
<if(exceptions)>
<exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause
<else>
if v, ok = x.(error.RecognitionException); ok {
if v, ok = x.(RecognitionException); ok {
localctx.exception = v
p._errHandler.reportError(p, v)
p._errHandler.recover(p, v)
@ -273,23 +262,27 @@ LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) {
// if(_p==undefined) {
// _p = 0
//}
_parentctx := p._ctx
_parentState := p.state
_parentctx := p.getParent()
_parentState := p.getState()
localctx := New<currentRule.ctxType>(p, p._ctx, _parentState<args:{a | , <a.name>}>)
_prevctx := localctx
_startState := <currentRule.startState>
p.enterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
<namedActions.init>
<locals; separator="\n">
defer func(){
<finallyAction>
p.unrollRecursionContexts(_parentctx)
}
try {
<code>
<postamble; separator="\n">
<namedActions.after>
} catch( error) {
if(error instanceof error.RecognitionException) {
if(error instanceof IRecognitionException) {
localctx.exception = error
p._errHandler.reportError(p, error)
p._errHandler.recover(p, error)
@ -441,7 +434,7 @@ case <i><if(!choice.ast.greedy)>+1<endif>:
Sync(s) ::= "sync(<s.expecting.name>)"
ThrowNoViableAlt(t) ::= "panic(new error.NoViableAltException(p))"
ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))"
TestSetInline(s) ::= <<
<s.bitsets:{bits | <if(rest(rest(bits.ttypes)))><bitsetBitfieldComparison(s, bits)><else><bitsetInlineComparison(s, bits)><endif>}; separator=" || ">
@ -519,7 +512,7 @@ ArgAction(a, chunks) ::= "<chunks>"
SemPred(p, chunks, failChunks) ::= <<
p.state = <p.stateNumber>
if !( <chunks>) {
panic( error.FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
}
>>
@ -564,13 +557,13 @@ TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : parseInt(<ctx(t)>
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)"
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p._input.getText(new Interval(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop)))"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p._input.getText(NewInterval(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop)))"
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
RulePropertyRef_parser(r) ::= "this"
ThisRulePropertyRef_start(r) ::= "localctx.start"
ThisRulePropertyRef_stop(r) ::= "localctx.stop"
ThisRulePropertyRef_text(r) ::= "p._input.getText(new Interval(localctx.start, p._input.LT(-1)))"
ThisRulePropertyRef_text(r) ::= "p._input.getText(NewInterval(localctx.start, p._input.LT(-1)))"
ThisRulePropertyRef_ctx(r) ::= "localctx"
ThisRulePropertyRef_parser(r) ::= "p"
@ -653,20 +646,19 @@ StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMe
superClass={ParserRuleContext}) ::= <<
type <struct.name> struct {
*ParserRuleContext
*antlr4.ParserRuleContext
parent *ParserRuleContext
parser *Parser
ruleIndex
parser antlr4.IParser
}
func New<struct.name>(parser *Parser, parent *ParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) <struct.name> {
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) <struct.name> {
var p = new(<struct.name>)
p.InitParserRuleContext( parent, invokingState )
p.parser = parser
p.ruleIndex = <parser.name>RULE_<struct.derivedFromName>
p.RuleIndex = <parser.name>RULE_<struct.derivedFromName>
<attrs:{a | <a>}; separator="\n">
<struct.ctorAttrs:{a | p.<a.name> = <a.name> || null;}; separator="\n">
return p
@ -688,18 +680,16 @@ func (s *<struct.name>) copyFrom(ctx <struct.name>) {
AltLabelStructDecl(struct,attrs,getters,dispatchMethods) ::= <<
type <struct.name> struct {
parent *ParserRuleContext
parser *Parser
ruleIndex int
parent antlr4.IParserRuleContext
parser antlr4.IParser
}
func New<struct.name>(parser *Parser, ctx *ParserRuleContext) <struct.name> {
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) <struct.name> {
var p = new(<struct.name>)
<currentRule.name; format="cap">Context.call(this, parser)
<attrs:{a | <a>;}; separator="\n">
<currentRule.name; format="cap">Context.prototype.copyFrom.call(this, ctx)
@ -713,25 +703,23 @@ func New<struct.name>(parser *Parser, ctx *ParserRuleContext) <struct.name> {
ListenerDispatchMethod(method) ::= <<
func (s *<struct.name>) <if(method.isEnter)>enter<else>exit<endif>Rule(listener *ParseTreeListener) {
// TODO
switch t := listener.(type) {
case *<parser.grammarName>Listener:
listener.<if(method.isEnter)>enter<else>exit<endif><struct.derivedFromName; format="cap">(s)
}
func (s *<struct.name>) <if(method.isEnter)>enter<else>exit<endif>Rule(listener antlr4.ParseTreeListener) {
listener.(*<parser.grammarName>Listener).<if(method.isEnter)>enter<else>exit<endif><struct.derivedFromName; format="cap">(s)
}
>>
VisitorDispatchMethod(method) ::= <<
func (s *<struct.name>) accept(visitor *ParseTreeVisitor) {
func (s *<struct.name>) accept(visitor antlr4.ParseTreeVisitor) interface{} {
switch t := listener.(type) {
case *<parser.grammarName>Listener:
return visitor.visit<struct.derivedFromName; format="cap">(s)
return t.visit<struct.derivedFromName; format="cap">(s)
default:
return visitor.visitChildren(s)
return t.visitChildren(s)
}
}
@ -754,13 +742,13 @@ recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>"
recRuleSetStopToken() ::= "p._ctx.stop = p._input.LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
localctx = new <ctxName>Context(this, _parentctx, _parentState)
localctx = New<ctxName>Context(this, _parentctx, _parentState)
<if(label)>localctx.<label> = _prevctx;<endif>
p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label, isListLabel) ::= <<
localctx = new <currentAltLabel; format="cap">Context(this, new <ruleName; format="cap">Context(this, _parentctx, _parentState))
localctx = New<currentAltLabel; format="cap">Context(this, New<ruleName; format="cap">Context(this, _parentctx, _parentState))
<if(label)>
<if(isListLabel)>
localctx.<label>.push(_prevctx)
@ -772,7 +760,7 @@ p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleReplaceContext(ctxName) ::= <<
localctx = new <ctxName>Context(this, localctx)
localctx = New<ctxName>Context(this, localctx)
p._ctx = localctx
_prevctx = localctx
>>
@ -789,10 +777,7 @@ LexerFile(lexerFile, lexer, namedActions) ::= <<
<fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)>
package parser
import (
"antlr4"
"strings"
)
import "antlr4"
<namedActions.header>
@ -804,9 +789,17 @@ Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= <<
<atn>
var lexerDeserializer = antlr4.NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.Deserialize(serializedATN)
var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerSymbolicNames = []string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerRuleNames = []string{ <lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <lexer.name> struct {
<if(superClass)><superClass><else>Lexer<endif>
_interp *LexerATNSimulator
<if(superClass)><superClass><else>*antlr4.Lexer<endif>
modeNames []string
literalNames []string
symbolicNames []string
@ -815,30 +808,26 @@ type <lexer.name> struct {
EOF string
}
func New<lexer.name>(input *TokenStream) *<lexer.name> {
func New<lexer.name>(input antlr4.CharStream) *<lexer.name> {
// TODO could be package level variables
var lexerDecisionToDFA = make([]*antlr4.DFA,len(lexerAtn.DecisionToState))
var deserializer = NewATNDeserializer()
var deserializedAtn = deserializer.deserialize(serializedATN)
var decisionToDFA = make([]DFA,len(deserializedAtn.decisionToState))
for index, ds := range deserializedAtn.decisionToState {
decisionToDFA[index] = NewDFA(ds, index)
for index, ds := range lexerAtn.DecisionToState {
lexerDecisionToDFA[index] = antlr4.NewDFA(ds, index)
}
lex := new(<lexer.name>)
this.InitLexer(lex, input);
lex.InitLexer(input)
lex._interp = NewLexerATNSimulator(lex, atn, decisionToDFA, NewPredictionContextCache())
lex.modeNames = [...]string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
lex.literalNames = [...]string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
lex.symbolicNames = [...]string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
lex.ruleNames = [...]string{ <lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
lex.Interpreter = antlr4.NewLexerATNSimulator(lex, lexerAtn, lexerDecisionToDFA, antlr4.NewPredictionContextCache())
lex.modeNames = lexerModeNames
lex.ruleNames = lexerRuleNames
lex.literalNames = lexerLiteralNames
lex.symbolicNames = lexerSymbolicNames
lex.grammarFileName = "<lexer.grammarFileName>"
lex.EOF = TokenEOF
lex.EOF = antlr4.TokenEOF
return lex
}
@ -860,7 +849,7 @@ const (
SerializedATN(model) ::= <<
<! only one segment, can be inlined !>
var serializedATN = strings.Join( [...]string{"<model.serialized; wrap={",<\n> "}>"}, "")
var serializedATN = []rune("<model.serialized>")
>>

View File

@ -1,32 +1,3 @@
/*
* [The "BSD license"]
* Copyright (c) 2012 Terence Parr
* Copyright (c) 2012 Sam Harwell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.v4.codegen.target;
@ -48,7 +19,6 @@ import java.util.Set;
* */
public class GoTarget extends Target {
/** Source: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar */
protected static final String[] goKeywords = {
"break","default","func","interface","select",
"case","defer","go","map","struct",
@ -82,99 +52,107 @@ public class GoTarget extends Target {
badWords.add("rule");
badWords.add("parserRule");
}
/**
* {@inheritDoc}
* <p/>
* For Java, this is the translation {@code 'a\n"'} &rarr; {@code "a\n\""}.
* Expect single quotes around the incoming literal. Just flip the quotes
* and replace double quotes with {@code \"}.
* <p/>
* Note that we have decided to allow people to use '\"' without penalty, so
* we must build the target string in a loop as {@link String#replace}
* cannot handle both {@code \"} and {@code "} without a lot of messing
* around.
*/
@Override
public String getTargetStringLiteralFromANTLRStringLiteral(
CodeGenerator generator,
String literal, boolean addQuotes)
{
StringBuilder sb = new StringBuilder();
String is = literal;
if ( addQuotes ) sb.append('"');
for (int i = 1; i < is.length() -1; i++) {
if (is.charAt(i) == '\\') {
// Anything escaped is what it is! We assume that
// people know how to escape characters correctly. However
// we catch anything that does not need an escape in Java (which
// is what the default implementation is dealing with and remove
// the escape. The C target does this for instance.
//
switch (is.charAt(i+1)) {
// Pass through any escapes that Java also needs
//
case '"':
case 'n':
case 'r':
case 't':
case 'b':
case 'f':
case '\\':
// Pass the escape through
sb.append('\\');
break;
case 'u': // Assume unnnn
// Pass the escape through as double \\
// so that Java leaves as \u0000 string not char
sb.append('\\');
sb.append('\\');
break;
default:
// Remove the escape by virtue of not adding it here
// Thus \' becomes ' and so on
break;
}
// Go past the \ character
i++;
} else {
// Characters that don't need \ in ANTLR 'strings' but do in Java
if (is.charAt(i) == '"') {
// We need to escape " in Java
sb.append('\\');
}
}
// Add in the next character, which may have been escaped
sb.append(is.charAt(i));
}
if ( addQuotes ) sb.append('"');
return sb.toString();
}
@Override
public String encodeIntAsCharEscape(int v) {
if (v < Character.MIN_VALUE || v > Character.MAX_VALUE) {
throw new IllegalArgumentException(String.format("Cannot encode the specified value: %d", v));
}
if (v >= 0 && v < targetCharValueEscape.length && targetCharValueEscape[v] != null) {
return targetCharValueEscape[v];
}
if (v >= 0x20 && v < 127) {
return String.valueOf((char)v);
}
String hex = Integer.toHexString(v|0x10000).substring(1,5);
return "\\u"+hex;
}
//
// /**
// * {@inheritDoc}
// * <p/>
// * For Java, this is the translation {@code 'a\n"'} &rarr; {@code "a\n\""}.
// * Expect single quotes around the incoming literal. Just flip the quotes
// * and replace double quotes with {@code \"}.
// * <p/>
// * Note that we have decided to allow people to use '\"' without penalty, so
// * we must build the target string in a loop as {@link String#replace}
// * cannot handle both {@code \"} and {@code "} without a lot of messing
// * around.
// */
// @Override
// public String getTargetStringLiteralFromANTLRStringLiteral(
// CodeGenerator generator,
// String literal, boolean addQuotes)
// {
// System.out.println(literal);
// System.out.println("GO TARGET!");
//
// StringBuilder sb = new StringBuilder();
// String is = literal;
//
// if ( addQuotes ) sb.append('"');
//
// for (int i = 1; i < is.length() -1; i++) {
// if (is.charAt(i) == '\\') {
// // Anything escaped is what it is! We assume that
// // people know how to escape characters correctly. However
// // we catch anything that does not need an escape in Java (which
// // is what the default implementation is dealing with and remove
// // the escape. The C target does this for instance.
// //
// switch (is.charAt(i+1)) {
// // Pass through any escapes that Java also needs
// //
// case '"':
// case 'n':
// case 'r':
// case 't':
// case 'b':
// case 'f':
// case '\\':
// // Pass the escape through
// sb.append('\\');
// break;
//
// case 'u': // Assume unnnn
// // Pass the escape through as double \\
// // so that Java leaves as \u0000 string not char
// sb.append('\\');
// sb.append('\\');
// break;
//
// default:
// // Remove the escape by virtue of not adding it here
// // Thus \' becomes ' and so on
// break;
// }
//
// // Go past the \ character
// i++;
// } else {
// // Characters that don't need \ in ANTLR 'strings' but do in Java
// if (is.charAt(i) == '"') {
// // We need to escape " in Java
// sb.append('\\');
// }
// }
// // Add in the next character, which may have been escaped
// sb.append(is.charAt(i));
// }
//
// if ( addQuotes ) sb.append('"');
//
// String s = sb.toString();
// System.out.println("AfTER: " + s);
// return s;
// }
//
// @Override
// public String encodeIntAsCharEscape(int v) {
// if (v < Character.MIN_VALUE || v > Character.MAX_VALUE) {
// throw new IllegalArgumentException(String.format("Cannot encode the specified value: %d", v));
// }
//
// if (v >= 0 && v < targetCharValueEscape.length && targetCharValueEscape[v] != null) {
// return targetCharValueEscape[v];
// }
//
// if (v >= 0x20 && v < 127) {
// return String.valueOf((char)v);
// }
//
// String hex = Integer.toHexString(v|0x10000).substring(1,5);
// String h2 = "\\u"+hex;
//
// System.out.println("Token : " + h2);
// return h2;
// }
@Override
public int getSerializedATNSegmentLimit() {
@ -202,12 +180,13 @@ public class GoTarget extends Target {
@Override
public String toString(Object o, String formatString, Locale locale) {
if ("java-escape".equals(formatString)) {
// 5C is the hex code for the \ itself
return ((String)o).replace("\\u", "\\u005Cu");
}
return super.toString(o, formatString, locale);
return super.toString(o, formatString, locale).replace("\\'", "\'");
}
}
@ -224,3 +203,4 @@ public class GoTarget extends Target {
return false;
}
}