diff --git a/runtime/Go/src/antlr4/ATN.go b/runtime/Go/src/antlr4/ATN.go index a5e01cd62..e629726b8 100644 --- a/runtime/Go/src/antlr4/ATN.go +++ b/runtime/Go/src/antlr4/ATN.go @@ -1,105 +1,106 @@ package antlr4 type ATN struct { - grammarType int - maxTokenType int - states []IATNState - decisionToState []*DecisionState - ruleToStartState []*RuleStartState - ruleToStopState []*RuleStopState - modeNameToStartState map[string]*TokensStartState - modeToStartState []*TokensStartState - ruleToTokenType []int - lexerActions []ILexerAction + DecisionToState []*DecisionState + + grammarType int + maxTokenType int + states []IATNState + ruleToStartState []*RuleStartState + ruleToStopState []*RuleStopState + modeNameToStartState map[string]*TokensStartState + modeToStartState []*TokensStartState + ruleToTokenType []int + lexerActions []ILexerAction } func NewATN(grammarType int, maxTokenType int) *ATN { - atn := new(ATN) + atn := new(ATN) - // Used for runtime deserialization of ATNs from strings/// - // The type of the ATN. - atn.grammarType = grammarType - // The maximum value for any symbol recognized by a transition in the ATN. - atn.maxTokenType = maxTokenType - atn.states = make([]IATNState,0) - // Each subrule/rule is a decision point and we must track them so we - // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... - atn.decisionToState = make([]*DecisionState, 0) - // Maps from rule index to starting state number. - atn.ruleToStartState = make([]*RuleStartState, 0) - // Maps from rule index to stop state number. - atn.ruleToStopState = nil - atn.modeNameToStartState = make( map[string]*TokensStartState ) - // For lexer ATNs, atn.maps the rule index to the resulting token type. - // For parser ATNs, atn.maps the rule index to the generated bypass token - // type if the - // {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions} - // deserialization option was specified otherwise, atn.is {@code nil}. - atn.ruleToTokenType = nil - // For lexer ATNs, atn.is an array of {@link LexerAction} objects which may - // be referenced by action transitions in the ATN. - atn.lexerActions = nil - atn.modeToStartState = make([]*TokensStartState, 0) + // Used for runtime deserialization of ATNs from strings/// + // The type of the ATN. + atn.grammarType = grammarType + // The maximum value for any symbol recognized by a transition in the ATN. + atn.maxTokenType = maxTokenType + atn.states = make([]IATNState, 0) + // Each subrule/rule is a decision point and we must track them so we + // can go back later and build DFA predictors for them. This includes + // all the rules, subrules, optional blocks, ()+, ()* etc... + atn.DecisionToState = make([]*DecisionState, 0) + // Maps from rule index to starting state number. + atn.ruleToStartState = make([]*RuleStartState, 0) + // Maps from rule index to stop state number. + atn.ruleToStopState = nil + atn.modeNameToStartState = make(map[string]*TokensStartState) + // For lexer ATNs, atn.maps the rule index to the resulting token type. + // For parser ATNs, atn.maps the rule index to the generated bypass token + // type if the + // {@link ATNDeserializationOptions//isGenerateRuleBypassTransitions} + // deserialization option was specified otherwise, atn.is {@code nil}. + atn.ruleToTokenType = nil + // For lexer ATNs, atn.is an array of {@link LexerAction} objects which may + // be referenced by action transitions in the ATN. + atn.lexerActions = nil + atn.modeToStartState = make([]*TokensStartState, 0) - return atn + return atn } - + // Compute the set of valid tokens that can occur starting in state {@code s}. // If {@code ctx} is nil, the set of tokens will not include what can follow // the rule surrounding {@code s}. In other words, the set will be // restricted to tokens reachable staying within {@code s}'s rule. func (this *ATN) nextTokensInContext(s IATNState, ctx IRuleContext) *IntervalSet { - var anal = NewLL1Analyzer(this) - return anal.LOOK(s, nil, ctx) + var anal = NewLL1Analyzer(this) + return anal.LOOK(s, nil, ctx) } // Compute the set of valid tokens that can occur starting in {@code s} and // staying in same rule. {@link Token//EPSILON} is in set if we reach end of // rule. func (this *ATN) nextTokensNoContext(s IATNState) *IntervalSet { - if (s.getNextTokenWithinRule() != nil ) { - return s.getNextTokenWithinRule() - } - s.setNextTokenWithinRule( this.nextTokensInContext(s, nil) ) - s.getNextTokenWithinRule().readOnly = true - return s.getNextTokenWithinRule() + if s.getNextTokenWithinRule() != nil { + return s.getNextTokenWithinRule() + } + s.setNextTokenWithinRule(this.nextTokensInContext(s, nil)) + s.getNextTokenWithinRule().readOnly = true + return s.getNextTokenWithinRule() } func (this *ATN) nextTokens(s IATNState, ctx IRuleContext) *IntervalSet { - if ( ctx==nil ) { - return this.nextTokensNoContext(s) - } else { - return this.nextTokensInContext(s, ctx) - } + if ctx == nil { + return this.nextTokensNoContext(s) + } else { + return this.nextTokensInContext(s, ctx) + } } -func (this *ATN) addState( state IATNState ) { - if ( state != nil ) { - state.setATN(this) - state.setStateNumber(len(this.states)) - } - this.states = append(this.states, state) +func (this *ATN) addState(state IATNState) { + if state != nil { + state.setATN(this) + state.setStateNumber(len(this.states)) + } + this.states = append(this.states, state) } -func (this *ATN) removeState( state IATNState ) { - this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list +func (this *ATN) removeState(state IATNState) { + this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list } -func (this *ATN) defineDecisionState( s *DecisionState ) int { - this.decisionToState = append( this.decisionToState, s) - s.decision = len(this.decisionToState)-1 - return s.decision +func (this *ATN) defineDecisionState(s *DecisionState) int { + this.DecisionToState = append(this.DecisionToState, s) + s.decision = len(this.DecisionToState) - 1 + return s.decision } -func (this *ATN) getDecisionState( decision int) *DecisionState { - if (len(this.decisionToState)==0) { - return nil - } else { - return this.decisionToState[decision] - } +func (this *ATN) getDecisionState(decision int) *DecisionState { + if len(this.DecisionToState) == 0 { + return nil + } else { + return this.DecisionToState[decision] + } } // Computes the set of input symbols which could follow ATN state number @@ -122,31 +123,30 @@ func (this *ATN) getDecisionState( decision int) *DecisionState { //var Token = require('./../Token').Token -func (this *ATN) getExpectedTokens( stateNumber int, ctx IRuleContext ) *IntervalSet { - if ( stateNumber < 0 || stateNumber >= len(this.states) ) { - panic("Invalid state number.") - } - var s = this.states[stateNumber] - var following = this.nextTokens(s, nil) - if (!following.contains(TokenEpsilon)) { - return following - } - var expected = NewIntervalSet() - expected.addSet(following) - expected.removeOne(TokenEpsilon) - for (ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon)) { - var invokingState = this.states[ctx.getInvokingState()] - var rt = invokingState.getTransitions()[0] - following = this.nextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.getParent().(IRuleContext) - } - if (following.contains(TokenEpsilon)) { - expected.addOne(TokenEOF) - } - return expected +func (this *ATN) getExpectedTokens(stateNumber int, ctx IRuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(this.states) { + panic("Invalid state number.") + } + var s = this.states[stateNumber] + var following = this.nextTokens(s, nil) + if !following.contains(TokenEpsilon) { + return following + } + var expected = NewIntervalSet() + expected.addSet(following) + expected.removeOne(TokenEpsilon) + for ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon) { + var invokingState = this.states[ctx.getInvokingState()] + var rt = invokingState.getTransitions()[0] + following = this.nextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.getParent().(IRuleContext) + } + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + return expected } var ATNINVALID_ALT_NUMBER = 0 - diff --git a/runtime/Go/src/antlr4/ATNConfig.go b/runtime/Go/src/antlr4/ATNConfig.go index 6fc2b64c9..7f7651e19 100644 --- a/runtime/Go/src/antlr4/ATNConfig.go +++ b/runtime/Go/src/antlr4/ATNConfig.go @@ -1,8 +1,8 @@ package antlr4 import ( - "reflect" "fmt" + "reflect" "strconv" ) @@ -33,25 +33,25 @@ type IATNConfig interface { type ATNConfig struct { precedenceFilterSuppressed bool - state IATNState - alt int - context IPredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int + state IATNState + alt int + context IPredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int } func NewATNConfig7(old *ATNConfig) *ATNConfig { // dup a := new(ATNConfig) - a.state = old.state; - a.alt = old.alt; - a.context = old.context; - a.semanticContext = old.semanticContext; - a.reachesIntoOuterContext = old.reachesIntoOuterContext; + a.state = old.state + a.alt = old.alt + a.context = old.context + a.semanticContext = old.semanticContext + a.reachesIntoOuterContext = old.reachesIntoOuterContext return a } func NewATNConfig6(state IATNState, alt int, context IPredictionContext) *ATNConfig { - return NewATNConfig5(state, alt, context, SemanticContextNONE); + return NewATNConfig5(state, alt, context, SemanticContextNONE) } func NewATNConfig5(state IATNState, alt int, context IPredictionContext, semanticContext SemanticContext) *ATNConfig { @@ -61,23 +61,23 @@ func NewATNConfig5(state IATNState, alt int, context IPredictionContext, semanti return a } -func NewATNConfig4(c IATNConfig , state IATNState) *ATNConfig { - return NewATNConfig(c, state, c.getContext(), c.getSemanticContext()); +func NewATNConfig4(c IATNConfig, state IATNState) *ATNConfig { + return NewATNConfig(c, state, c.getContext(), c.getSemanticContext()) } -func NewATNConfig3(c IATNConfig , state IATNState, semanticContext SemanticContext) *ATNConfig { - return NewATNConfig(c, state, c.getContext(), semanticContext); +func NewATNConfig3(c IATNConfig, state IATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.getContext(), semanticContext) } -func NewATNConfig2(c IATNConfig , semanticContext SemanticContext) *ATNConfig { - return NewATNConfig(c, c.getState(), c.getContext(), semanticContext); +func NewATNConfig2(c IATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.getState(), c.getContext(), semanticContext) } -func NewATNConfig1(c IATNConfig , state IATNState, context IPredictionContext) *ATNConfig { - return NewATNConfig(c, state, context, c.getSemanticContext()); +func NewATNConfig1(c IATNConfig, state IATNState, context IPredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.getSemanticContext()) } -func NewATNConfig(c IATNConfig , state IATNState, context IPredictionContext, semanticContext SemanticContext) *ATNConfig { +func NewATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) *ATNConfig { a := new(ATNConfig) a.InitATNConfig(c, state, context, semanticContext) @@ -119,23 +119,22 @@ func (this *ATNConfig) setReachesIntoOuterContext(v int) { this.reachesIntoOuterContext = v } +func (a *ATNConfig) InitATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) { -func (a *ATNConfig) InitATNConfig(c IATNConfig, state IATNState, context IPredictionContext, semanticContext SemanticContext) { - - a.state = state; - a.alt = c.getAlt(); - a.context = context; - a.semanticContext = semanticContext; - a.reachesIntoOuterContext = c.getReachesIntoOuterContext(); + a.state = state + a.alt = c.getAlt() + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.getReachesIntoOuterContext() } func (a *ATNConfig) InitATNConfig2(state IATNState, alt int, context IPredictionContext, semanticContext SemanticContext) { - a.state = state; - a.alt = alt; - a.context = context; - a.semanticContext = semanticContext; + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext } @@ -144,57 +143,55 @@ func (a *ATNConfig) InitATNConfig2(state IATNState, alt int, context IPrediction // syntactic/semantic contexts are the same. /// func (this *ATNConfig) equals(other interface{}) bool { - if (this == other) { - return true - } else if _, ok := other.(*ATNConfig); !ok { - return false - } else { - return reflect.DeepEqual(this, other) - } + if this == other { + return true + } else if _, ok := other.(*ATNConfig); !ok { + return false + } else { + return reflect.DeepEqual(this, other) + } } func (this *ATNConfig) shortHashString() string { - return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString() + return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString() } func (this *ATNConfig) hashString() string { var c string - if (this.context == nil){ + if this.context == nil { c = "" } else { c = this.context.hashString() } - return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString() + return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString() } func (this *ATNConfig) toString() string { var a string - if (this.context != nil){ + if this.context != nil { a = ",[" + fmt.Sprint(this.context) + "]" } var b string - if (this.semanticContext != SemanticContextNONE){ + if this.semanticContext != SemanticContextNONE { b = ("," + fmt.Sprint(this.semanticContext)) } var c string - if (this.reachesIntoOuterContext > 0){ + if this.reachesIntoOuterContext > 0 { c = ",up=" + fmt.Sprint(this.reachesIntoOuterContext) } - return "(" + fmt.Sprint(this.state) + "," + strconv.Itoa(this.alt) + a + b + c + ")" + return "(" + fmt.Sprint(this.state) + "," + strconv.Itoa(this.alt) + a + b + c + ")" } - - type LexerATNConfig struct { ATNConfig - lexerActionExecutor *LexerActionExecutor + lexerActionExecutor *LexerActionExecutor passedThroughNonGreedyDecision bool } @@ -219,7 +216,7 @@ func NewLexerATNConfig5(state IATNState, alt int, context IPredictionContext, le return this } -func NewLexerATNConfig4(c *LexerATNConfig, state IATNState) *LexerATNConfig { +func NewLexerATNConfig4(c *LexerATNConfig, state IATNState) *LexerATNConfig { this := new(LexerATNConfig) @@ -239,7 +236,7 @@ func NewLexerATNConfig3(c *LexerATNConfig, state IATNState, lexerActionExecutor return this } -func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPredictionContext) *LexerATNConfig { +func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPredictionContext) *LexerATNConfig { this := new(LexerATNConfig) @@ -249,21 +246,19 @@ func NewLexerATNConfig2(c *LexerATNConfig, state IATNState, context IPrediction return this } - -func NewLexerATNConfig1( state IATNState, alt int, context IPredictionContext) *LexerATNConfig { +func NewLexerATNConfig1(state IATNState, alt int, context IPredictionContext) *LexerATNConfig { this := new(LexerATNConfig) // c IATNConfig , state IATNState, context IPredictionContext, semanticContext SemanticContext this.InitATNConfig2(state, alt, context, SemanticContextNONE) - this.lexerActionExecutor = nil - this.passedThroughNonGreedyDecision = false + this.lexerActionExecutor = nil + this.passedThroughNonGreedyDecision = false - return this + return this } - func (this *LexerATNConfig) hashString() string { var f string @@ -273,7 +268,7 @@ func (this *LexerATNConfig) hashString() string { f = "0" } - return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) + + return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) + fmt.Sprint(this.semanticContext) + f + fmt.Sprint(this.lexerActionExecutor) } @@ -281,27 +276,27 @@ func (this *LexerATNConfig) equals(other interface{}) bool { othert, ok := other.(*LexerATNConfig) - if (this == other) { - return true - } else if !ok { - return false - } else if (this.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision) { - return false - } + if this == other { + return true + } else if !ok { + return false + } else if this.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { + return false + } var b bool - if (this.lexerActionExecutor != nil){ - b = !this.lexerActionExecutor.equals(othert.lexerActionExecutor) + if this.lexerActionExecutor != nil { + b = !this.lexerActionExecutor.equals(othert.lexerActionExecutor) } else { b = othert.lexerActionExecutor != nil } - if (b) { - return false - } else { + if b { + return false + } else { panic("Not implemented") -// return ATNConfig.prototype.equals.call(this, other) - } + // return ATNConfig.prototype.equals.call(this, other) + } } func checkNonGreedyDecision(source *LexerATNConfig, target IATNState) bool { diff --git a/runtime/Go/src/antlr4/ATNConfigSet.go b/runtime/Go/src/antlr4/ATNConfigSet.go index 45d1f5add..76956645b 100644 --- a/runtime/Go/src/antlr4/ATNConfigSet.go +++ b/runtime/Go/src/antlr4/ATNConfigSet.go @@ -1,4 +1,5 @@ package antlr4 + import ( "fmt" ) @@ -14,27 +15,27 @@ func hashATNConfig(c interface{}) string { } func equalATNConfigs(a, b interface{}) bool { - if ( a==b ) { + if a == b { return true } - if ( a==nil || b==nil ) { + if a == nil || b == nil { return false } - return a.(*ATNConfig).state.getStateNumber()==b.(*ATNConfig).state.getStateNumber() && - a.(*ATNConfig).alt==b.(*ATNConfig).alt && + return a.(*ATNConfig).state.getStateNumber() == b.(*ATNConfig).state.getStateNumber() && + a.(*ATNConfig).alt == b.(*ATNConfig).alt && a.(*ATNConfig).semanticContext.equals(b.(*ATNConfig).semanticContext) } type ATNConfigSet struct { - readOnly bool - fullCtx bool - configLookup *Set - conflictingAlts *BitSet - cachedHashString string - hasSemanticContext bool + readOnly bool + fullCtx bool + configLookup *Set + conflictingAlts *BitSet + cachedHashString string + hasSemanticContext bool dipsIntoOuterContext bool - configs []IATNConfig - uniqueAlt int + configs []IATNConfig + uniqueAlt int } func NewATNConfigSet(fullCtx bool) *ATNConfigSet { @@ -98,19 +99,19 @@ func (a *ATNConfigSet) InitATNConfigSet(fullCtx bool) { // / func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool { - if (this.readOnly) { + if this.readOnly { panic("This set is readonly") } - if (config.getSemanticContext() != SemanticContextNONE) { + if config.getSemanticContext() != SemanticContextNONE { this.hasSemanticContext = true } - if (config.getReachesIntoOuterContext() > 0) { + if config.getReachesIntoOuterContext() > 0 { this.dipsIntoOuterContext = true } var existing = this.configLookup.add(config).(IATNConfig) - if (existing == config) { + if existing == config { this.cachedHashString = "-1" - this.configs = append( this.configs, config )// track order here + this.configs = append(this.configs, config) // track order here return true } // a previous (s,i,pi,_), merge with it and save result @@ -119,17 +120,17 @@ func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool { // no need to check for existing.context, config.context in cache // since only way to create Newgraphs is "call rule" and here. We // cache at both places. - existing.setReachesIntoOuterContext( intMax( existing.getReachesIntoOuterContext(), config.getReachesIntoOuterContext()) ) + existing.setReachesIntoOuterContext(intMax(existing.getReachesIntoOuterContext(), config.getReachesIntoOuterContext())) // make sure to preserve the precedence filter suppression during the merge - if (config.getPrecedenceFilterSuppressed()) { - existing.setPrecedenceFilterSuppressed( true ) + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) } - existing.setContext( merged )// replace context no need to alt mapping + existing.setContext(merged) // replace context no need to alt mapping return true } func (this *ATNConfigSet) getStates() *Set { - var states = NewSet(nil,nil) + var states = NewSet(nil, nil) for i := 0; i < len(this.configs); i++ { states.add(this.configs[i].getState()) } @@ -137,10 +138,10 @@ func (this *ATNConfigSet) getStates() *Set { } func (this *ATNConfigSet) getPredicates() []SemanticContext { - var preds = make([]SemanticContext,0) + var preds = make([]SemanticContext, 0) for i := 0; i < len(this.configs); i++ { c := this.configs[i].getSemanticContext() - if (c != SemanticContextNONE) { + if c != SemanticContextNONE { preds = append(preds, c) } } @@ -152,10 +153,10 @@ func (this *ATNConfigSet) getItems() []IATNConfig { } func (this *ATNConfigSet) optimizeConfigs(interpreter *ATNSimulator) { - if (this.readOnly) { + if this.readOnly { panic("This set is readonly") } - if (this.configLookup.length() == 0) { + if this.configLookup.length() == 0 { return } for i := 0; i < len(this.configs); i++ { @@ -164,15 +165,15 @@ func (this *ATNConfigSet) optimizeConfigs(interpreter *ATNSimulator) { } } -func (this *ATNConfigSet) addAll(coll []*ATNConfig) bool{ +func (this *ATNConfigSet) addAll(coll []*ATNConfig) bool { for i := 0; i < len(coll); i++ { - this.add(coll[i],nil) + this.add(coll[i], nil) } return false } func (this *ATNConfigSet) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*ATNConfigSet); !ok { return false @@ -181,17 +182,17 @@ func (this *ATNConfigSet) equals(other interface{}) bool { other2 := other.(*ATNConfigSet) return this.configs != nil && -// this.configs.equals(other2.configs) && // TODO is this necessary? - this.fullCtx == other2.fullCtx && - this.uniqueAlt == other2.uniqueAlt && - this.conflictingAlts == other2.conflictingAlts && - this.hasSemanticContext == other2.hasSemanticContext && - this.dipsIntoOuterContext == other2.dipsIntoOuterContext + // this.configs.equals(other2.configs) && // TODO is this necessary? + this.fullCtx == other2.fullCtx && + this.uniqueAlt == other2.uniqueAlt && + this.conflictingAlts == other2.conflictingAlts && + this.hasSemanticContext == other2.hasSemanticContext && + this.dipsIntoOuterContext == other2.dipsIntoOuterContext } func (this *ATNConfigSet) hashString() string { - if (this.readOnly) { - if (this.cachedHashString == "-1") { + if this.readOnly { + if this.cachedHashString == "-1" { this.cachedHashString = this.hashConfigs() } return this.cachedHashString @@ -216,22 +217,22 @@ func (this *ATNConfigSet) isEmpty() bool { return len(this.configs) == 0 } -func (this *ATNConfigSet) contains(item *ATNConfig ) bool { - if (this.configLookup == nil) { +func (this *ATNConfigSet) contains(item *ATNConfig) bool { + if this.configLookup == nil { panic("This method is not implemented for readonly sets.") } return this.configLookup.contains(item) } -func (this *ATNConfigSet) containsFast(item *ATNConfig ) bool { - if (this.configLookup == nil) { +func (this *ATNConfigSet) containsFast(item *ATNConfig) bool { + if this.configLookup == nil { panic("This method is not implemented for readonly sets.") } return this.configLookup.contains(item) // TODO containsFast is not implemented for Set } func (this *ATNConfigSet) clear() { - if (this.readOnly) { + if this.readOnly { panic("This set is readonly") } this.configs = make([]IATNConfig, 0) @@ -241,7 +242,7 @@ func (this *ATNConfigSet) clear() { func (this *ATNConfigSet) setReadonly(readOnly bool) { this.readOnly = readOnly - if (readOnly) { + if readOnly { this.configLookup = nil // can't mod, no need for lookup cache } } @@ -249,18 +250,17 @@ func (this *ATNConfigSet) setReadonly(readOnly bool) { func (this *ATNConfigSet) toString() string { panic("not implemented") return "" -// return Utils.arrayToString(this.configs) + -// (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + -// (this.uniqueAlt != ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + -// (this.conflictingAlts != nil ? ",conflictingAlts=" + this.conflictingAlts : "") + -// (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : "") + // return Utils.arrayToString(this.configs) + + // (this.hasSemanticContext ? ",hasSemanticContext=" + this.hasSemanticContext : "") + + // (this.uniqueAlt != ATN.INVALID_ALT_NUMBER ? ",uniqueAlt=" + this.uniqueAlt : "") + + // (this.conflictingAlts != nil ? ",conflictingAlts=" + this.conflictingAlts : "") + + // (this.dipsIntoOuterContext ? ",dipsIntoOuterContext" : "") } type OrderedATNConfigSet struct { *ATNConfigSet } - func NewOrderedATNConfigSet() *OrderedATNConfigSet { this := new(OrderedATNConfigSet) @@ -270,6 +270,3 @@ func NewOrderedATNConfigSet() *OrderedATNConfigSet { return this } - - - diff --git a/runtime/Go/src/antlr4/ATNDeserializationOptions.go b/runtime/Go/src/antlr4/ATNDeserializationOptions.go index 8bf4b4852..e021cfb20 100644 --- a/runtime/Go/src/antlr4/ATNDeserializationOptions.go +++ b/runtime/Go/src/antlr4/ATNDeserializationOptions.go @@ -1,23 +1,21 @@ package antlr4 type ATNDeserializationOptions struct { - readOnly bool - verifyATN bool + readOnly bool + verifyATN bool generateRuleBypassTransitions bool } func NewATNDeserializationOptions(copyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { o := new(ATNDeserializationOptions) - if (copyFrom != nil){ + if copyFrom != nil { o.readOnly = copyFrom.readOnly o.verifyATN = copyFrom.verifyATN o.generateRuleBypassTransitions = copyFrom.generateRuleBypassTransitions } - return o + return o } -var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true,false,false} - - +var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} diff --git a/runtime/Go/src/antlr4/ATNDeserializer.go b/runtime/Go/src/antlr4/ATNDeserializer.go index a4c450f02..a33ed5073 100644 --- a/runtime/Go/src/antlr4/ATNDeserializer.go +++ b/runtime/Go/src/antlr4/ATNDeserializer.go @@ -1,9 +1,10 @@ package antlr4 + import ( - "strings" - "fmt" - "encoding/hex" - "strconv" + "encoding/hex" + "fmt" + "strconv" + "strings" ) // This is the earliest supported serialized UUID. @@ -12,43 +13,41 @@ var BASE_SERIALIZED_UUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" // This list contains all of the currently supported UUIDs, ordered by when // the feature first appeared in this branch. -var SUPPORTED_UUIDS = []string{ BASE_SERIALIZED_UUID } +var SUPPORTED_UUIDS = []string{BASE_SERIALIZED_UUID} var SERIALIZED_VERSION = 3 // This is the current serialized UUID. var SERIALIZED_UUID = BASE_SERIALIZED_UUID -func initIntArray( length, value int) []int { +func initIntArray(length, value int) []int { var tmp = make([]int, length) - for i := range tmp { - tmp[i] = value - } + for i := range tmp { + tmp[i] = value + } return tmp } type ATNDeserializer struct { - - deserializationOptions *ATNDeserializationOptions - data []rune - pos int - uuid string - + deserializationOptions *ATNDeserializationOptions + data []rune + pos int + uuid string } -func NewATNDeserializer (options *ATNDeserializationOptions) *ATNDeserializer { - - if ( options == nil ) { - options = ATNDeserializationOptionsdefaultOptions - } +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { - this := new(ATNDeserializer) + if options == nil { + options = ATNDeserializationOptionsdefaultOptions + } - this.deserializationOptions = options - - return this + this := new(ATNDeserializer) + + this.deserializationOptions = options + + return this } // Determines if a particular serialized representation of an ATN supports @@ -64,411 +63,411 @@ func NewATNDeserializer (options *ATNDeserializationOptions) *ATNDeserializer { // introduced otherwise, {@code false}. func stringInSlice(a string, list []string) int { - for i, b := range list { - if b == a { - return i - } - } - return -1 + for i, b := range list { + if b == a { + return i + } + } + return -1 } func (this *ATNDeserializer) isFeatureSupported(feature, actualUuid string) bool { - var idx1 = stringInSlice( feature, SUPPORTED_UUIDS ) - if (idx1 < 0) { - return false - } - var idx2 = stringInSlice( actualUuid, SUPPORTED_UUIDS ) - return idx2 >= idx1 + var idx1 = stringInSlice(feature, SUPPORTED_UUIDS) + if idx1 < 0 { + return false + } + var idx2 = stringInSlice(actualUuid, SUPPORTED_UUIDS) + return idx2 >= idx1 } -func (this *ATNDeserializer) deserialize(data []rune) *ATN { +func (this *ATNDeserializer) Deserialize(data []rune) *ATN { - this.reset(data) - this.checkVersion() - this.checkUUID() - var atn = this.readATN() - this.readStates(atn) - this.readRules(atn) - this.readModes(atn) - var sets = this.readSets(atn) - this.readEdges(atn, sets) - this.readDecisions(atn) - this.readLexerActions(atn) - this.markPrecedenceDecisions(atn) - this.verifyATN(atn) - if (this.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser ) { - this.generateRuleBypassTransitions(atn) - // re-verify after modification - this.verifyATN(atn) - } - return atn + this.reset(data) + this.checkVersion() + this.checkUUID() + var atn = this.readATN() + this.readStates(atn) + this.readRules(atn) + this.readModes(atn) + var sets = this.readSets(atn) + this.readEdges(atn, sets) + this.readDecisions(atn) + this.readLexerActions(atn) + this.markPrecedenceDecisions(atn) + this.verifyATN(atn) + if this.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { + this.generateRuleBypassTransitions(atn) + // re-verify after modification + this.verifyATN(atn) + } + return atn } func (this *ATNDeserializer) reset(data []rune) { - // TODO not sure the copy is necessary here - temp := make([]rune, len(data)) + // TODO not sure the copy is necessary here + temp := make([]rune, len(data)) - for i, c := range data { - // don't adjust the first value since that's the version number - if (i == 0) { - temp[i] = c - } else if c > 1 { - temp[i] = c-2 - } else { - temp[i] = -1 - } - } + for i, c := range data { + // don't adjust the first value since that's the version number + if i == 0 { + temp[i] = c + } else if c > 1 { + temp[i] = c - 2 + } else { + temp[i] = -1 + } + } -// var adjust = func(c) { -// var v = c.charCodeAt(0) -// return v>1 ? v-2 : -1 -// } + // var adjust = func(c) { + // var v = c.charCodeAt(0) + // return v>1 ? v-2 : -1 + // } -// var temp = data.split("").map(adjust) -// // don't adjust the first value since that's the version number -// temp[0] = data.charCodeAt(0) + // var temp = data.split("").map(adjust) + // // don't adjust the first value since that's the version number + // temp[0] = data.charCodeAt(0) - this.data = temp - this.pos = 0 + this.data = temp + this.pos = 0 } func (this *ATNDeserializer) checkVersion() { - var version = this.readInt() - if ( version != SERIALIZED_VERSION ) { - panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SERIALIZED_VERSION) + ").") - } + var version = this.readInt() + if version != SERIALIZED_VERSION { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SERIALIZED_VERSION) + ").") + } } func (this *ATNDeserializer) checkUUID() { - var uuid = this.readUUID() - if ( stringInSlice(uuid, SUPPORTED_UUIDS ) <0 ) { - panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SERIALIZED_UUID + " or a legacy UUID).") - } - this.uuid = uuid + var uuid = this.readUUID() + if stringInSlice(uuid, SUPPORTED_UUIDS) < 0 { + panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SERIALIZED_UUID + " or a legacy UUID).") + } + this.uuid = uuid } func (this *ATNDeserializer) readATN() *ATN { - var grammarType = this.readInt() - var maxTokenType = this.readInt() - return NewATN(grammarType, maxTokenType) + var grammarType = this.readInt() + var maxTokenType = this.readInt() + return NewATN(grammarType, maxTokenType) } type LoopEndStateIntPair struct { - item0 *LoopEndState - item1 int + item0 *LoopEndState + item1 int } type BlockStartStateIntPair struct { - item0 *BlockStartState - item1 int + item0 *BlockStartState + item1 int } func (this *ATNDeserializer) readStates(atn *ATN) { - var loopBackStateNumbers = make([]LoopEndStateIntPair,0) - var endStateNumbers = make([]BlockStartStateIntPair,0) + var loopBackStateNumbers = make([]LoopEndStateIntPair, 0) + var endStateNumbers = make([]BlockStartStateIntPair, 0) - var nstates = this.readInt() - for i :=0; i 0) { - bypassStart.addTransition(ruleToStartState.getTransitions()[count-1],-1) - ruleToStartState.setTransitions( []ITransition{ ruleToStartState.getTransitions()[len(ruleToStartState.getTransitions()) - 1] }) - } - // link the new states - atn.ruleToStartState[idx].addTransition(NewEpsilonTransition(bypassStart,-1), -1) - bypassStop.addTransition(NewEpsilonTransition(endState, -1), -1) + var ruleToStartState = atn.ruleToStartState[idx] + var count = len(ruleToStartState.getTransitions()) + for count > 0 { + bypassStart.addTransition(ruleToStartState.getTransitions()[count-1], -1) + ruleToStartState.setTransitions([]ITransition{ruleToStartState.getTransitions()[len(ruleToStartState.getTransitions())-1]}) + } + // link the new states + atn.ruleToStartState[idx].addTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.addTransition(NewEpsilonTransition(endState, -1), -1) - var matchState = NewBasicState() - atn.addState(matchState) - matchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) - bypassStart.addTransition(NewEpsilonTransition(matchState, -1), -1) + var matchState = NewBasicState() + atn.addState(matchState) + matchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.addTransition(NewEpsilonTransition(matchState, -1), -1) } func (this *ATNDeserializer) stateIsEndStateFor(state IATNState, idx int) IATNState { - if ( state.getRuleIndex() != idx) { - return nil - } - if _,ok := state.(*StarLoopEntryState); !ok { - return nil - } - var maybeLoopEndState = state.getTransitions()[len(state.getTransitions()) - 1].getTarget() - if _,ok := maybeLoopEndState.(*LoopEndState); !ok { - return nil - } + if state.getRuleIndex() != idx { + return nil + } + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + var maybeLoopEndState = state.getTransitions()[len(state.getTransitions())-1].getTarget() + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } - _,ok := maybeLoopEndState.getTransitions()[0].getTarget().(*RuleStopState) + _, ok := maybeLoopEndState.getTransitions()[0].getTarget().(*RuleStopState) - if (maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok) { - return state - } else { - return nil - } + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } else { + return nil + } } // @@ -479,103 +478,103 @@ func (this *ATNDeserializer) stateIsEndStateFor(state IATNState, idx int) IATNSt // @param atn The ATN. // func (this *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { - for i :=0; i< len(atn.states); i++ { + for i := 0; i < len(atn.states); i++ { var state = atn.states[i] - if _,ok := state.(*StarLoopEntryState); !ok { - continue - } - // We analyze the ATN to determine if this ATN decision state is the - // decision for the closure block that determines whether a - // precedence rule should continue or complete. - // - if ( atn.ruleToStartState[state.getRuleIndex()].isPrecedenceRule) { - var maybeLoopEndState = state.getTransitions()[len(state.getTransitions()) - 1].getTarget() - if s3, ok := maybeLoopEndState.(*LoopEndState); ok { - s := maybeLoopEndState.getTransitions()[0].getTarget() - _,ok2 := s.(*RuleStopState) - if ( s3.epsilonOnlyTransitions && ok2) { - s.(*StarLoopEntryState).precedenceRuleDecision = true - } - } - } + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + // We analyze the ATN to determine if this ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + // + if atn.ruleToStartState[state.getRuleIndex()].isPrecedenceRule { + var maybeLoopEndState = state.getTransitions()[len(state.getTransitions())-1].getTarget() + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + s := maybeLoopEndState.getTransitions()[0].getTarget() + _, ok2 := s.(*RuleStopState) + if s3.epsilonOnlyTransitions && ok2 { + s.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } } } func (this *ATNDeserializer) verifyATN(atn *ATN) { - if (!this.deserializationOptions.verifyATN) { - return - } - // verify assumptions - for i:=0; i= 0, "") - default: - _, ok := s2.(*RuleStopState) - this.checkCondition(len(s2.getTransitions()) <= 1 || ok, "") - } + case *StarLoopbackState: + this.checkCondition(len(state.getTransitions()) == 1, "") + _, ok2 := state.getTransitions()[0].getTarget().(*StarLoopEntryState) + this.checkCondition(ok2, "") + case *LoopEndState: + this.checkCondition(s2.loopBackState != nil, "") + case *RuleStartState: + this.checkCondition(s2.stopState != nil, "") + case *BlockStartState: + this.checkCondition(s2.endState != nil, "") + case *BlockEndState: + this.checkCondition(s2.startState != nil, "") + case *DecisionState: + this.checkCondition(len(s2.getTransitions()) <= 1 || s2.decision >= 0, "") + default: + _, ok := s2.(*RuleStopState) + this.checkCondition(len(s2.getTransitions()) <= 1 || ok, "") + } } } func (this *ATNDeserializer) checkCondition(condition bool, message string) { - if (!condition) { - if (message=="") { - message = "IllegalState" - } - panic(message) - } + if !condition { + if message == "" { + message = "IllegalState" + } + panic(message) + } } func (this *ATNDeserializer) readInt() int { - v := this.data[this.pos] - this.pos += 1 - return int(v) + v := this.data[this.pos] + this.pos += 1 + return int(v) } func (this *ATNDeserializer) readInt32() int { - var low = this.readInt() - var high = this.readInt() - return low | (high << 16) + var low = this.readInt() + var high = this.readInt() + return low | (high << 16) } //func (this *ATNDeserializer) readLong() int64 { @@ -587,129 +586,128 @@ func (this *ATNDeserializer) readInt32() int { func createByteToHex() []string { var bth = make([]string, 256) - for i:= 0; i < 256; i++ { - bth[i] = strings.ToUpper(hex.EncodeToString( []byte{ byte(i) } )) - } + for i := 0; i < 256; i++ { + bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) + } return bth } var byteToHex = createByteToHex() - + func (this *ATNDeserializer) readUUID() string { var bb = make([]int, 16) - for i:=7; i>=0 ;i-- { + for i := 7; i >= 0; i-- { var integer = this.readInt() bb[(2*i)+1] = integer & 0xFF bb[2*i] = (integer >> 8) & 0xFF } - return byteToHex[bb[0]] + byteToHex[bb[1]] + - byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + - byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + - byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + - byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + - byteToHex[bb[10]] + byteToHex[bb[11]] + - byteToHex[bb[12]] + byteToHex[bb[13]] + - byteToHex[bb[14]] + byteToHex[bb[15]] + return byteToHex[bb[0]] + byteToHex[bb[1]] + + byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + + byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + + byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + + byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + + byteToHex[bb[10]] + byteToHex[bb[11]] + + byteToHex[bb[12]] + byteToHex[bb[13]] + + byteToHex[bb[14]] + byteToHex[bb[15]] } - func (this *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) ITransition { - var target = atn.states[trg] + var target = atn.states[trg] - switch (typeIndex) { - case TransitionEPSILON : - return NewEpsilonTransition(target, -1) - case TransitionRANGE : - if (arg3 != 0) { - return NewRangeTransition(target, TokenEOF, arg2) - } else { - return NewRangeTransition(target, arg1, arg2) - } - case TransitionRULE : - return NewRuleTransition(atn.states[arg1], arg2, arg3, target) - case TransitionPREDICATE : - return NewPredicateTransition(target, arg1, arg2, arg3 != 0) - case TransitionPRECEDENCE: - return NewPrecedencePredicateTransition(target, arg1) - case TransitionATOM : - if (arg3 != 0) { - return NewAtomTransition(target, TokenEOF) - } else { - return NewAtomTransition(target, arg1) - } - case TransitionACTION : - return NewActionTransition(target, arg1, arg2, arg3 != 0) - case TransitionSET : - return NewSetTransition(target, sets[arg1]) - case TransitionNOT_SET : - return NewNotSetTransition(target, sets[arg1]) - case TransitionWILDCARD : - return NewWildcardTransition(target) - } + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } else { + return NewRangeTransition(target, arg1, arg2) + } + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } else { + return NewAtomTransition(target, arg1) + } + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + case TransitionNOT_SET: + return NewNotSetTransition(target, sets[arg1]) + case TransitionWILDCARD: + return NewWildcardTransition(target) + } - panic("The specified transition type is not valid.") + panic("The specified transition type is not valid.") } func (this *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) IATNState { - var s IATNState - switch (typeIndex) { - case ATNStateInvalidType: - return nil; - case ATNStateBASIC : - s = NewBasicState() - case ATNStateRULE_START : - s = NewRuleStartState() - case ATNStateBLOCK_START : - s = NewBasicBlockStartState() - case ATNStatePLUS_BLOCK_START : - s = NewPlusBlockStartState() - case ATNStateSTAR_BLOCK_START : - s = NewStarBlockStartState() - case ATNStateTOKEN_START : - s = NewTokensStartState() - case ATNStateRULE_STOP : - s = NewRuleStopState() - case ATNStateBLOCK_END : - s = NewBlockEndState() - case ATNStateSTAR_LOOP_BACK : - s = NewStarLoopbackState() - case ATNStateSTAR_LOOP_ENTRY : - s = NewStarLoopEntryState() - case ATNStatePLUS_LOOP_BACK : - s = NewPlusLoopbackState() - case ATNStateLOOP_END : - s = NewLoopEndState() - default : - message := fmt.Sprintf("The specified state type %d is not valid.", typeIndex) - panic(message) - } + var s IATNState + switch typeIndex { + case ATNStateInvalidType: + return nil + case ATNStateBASIC: + s = NewBasicState() + case ATNStateRULE_START: + s = NewRuleStartState() + case ATNStateBLOCK_START: + s = NewBasicBlockStartState() + case ATNStatePLUS_BLOCK_START: + s = NewPlusBlockStartState() + case ATNStateSTAR_BLOCK_START: + s = NewStarBlockStartState() + case ATNStateTOKEN_START: + s = NewTokensStartState() + case ATNStateRULE_STOP: + s = NewRuleStopState() + case ATNStateBLOCK_END: + s = NewBlockEndState() + case ATNStateSTAR_LOOP_BACK: + s = NewStarLoopbackState() + case ATNStateSTAR_LOOP_ENTRY: + s = NewStarLoopEntryState() + case ATNStatePLUS_LOOP_BACK: + s = NewPlusLoopbackState() + case ATNStateLOOP_END: + s = NewLoopEndState() + default: + message := fmt.Sprintf("The specified state type %d is not valid.", typeIndex) + panic(message) + } - s.setRuleIndex(ruleIndex) - return s; + s.setRuleIndex(ruleIndex) + return s } func (this *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) ILexerAction { - switch (typeIndex) { - case LexerActionTypeCHANNEL: - return NewLexerChannelAction(data1) - case LexerActionTypeCUSTOM: - return NewLexerCustomAction(data1, data2) - case LexerActionTypeMODE: - return NewLexerModeAction(data1) - case LexerActionTypeMORE: - return LexerMoreActionINSTANCE - case LexerActionTypePOP_MODE: - return LexerPopModeActionINSTANCE - case LexerActionTypePUSH_MODE: - return NewLexerPushModeAction(data1) - case LexerActionTypeSKIP: - return LexerSkipActionINSTANCE - case LexerActionTypeTYPE: - return NewLexerTypeAction(data1) - default: - message := fmt.Sprintf("The specified lexer action typeIndex%d is not valid.", typeIndex) - panic(message) - } - return nil + switch typeIndex { + case LexerActionTypeCHANNEL: + return NewLexerChannelAction(data1) + case LexerActionTypeCUSTOM: + return NewLexerCustomAction(data1, data2) + case LexerActionTypeMODE: + return NewLexerModeAction(data1) + case LexerActionTypeMORE: + return LexerMoreActionINSTANCE + case LexerActionTypePOP_MODE: + return LexerPopModeActionINSTANCE + case LexerActionTypePUSH_MODE: + return NewLexerPushModeAction(data1) + case LexerActionTypeSKIP: + return LexerSkipActionINSTANCE + case LexerActionTypeTYPE: + return NewLexerTypeAction(data1) + default: + message := fmt.Sprintf("The specified lexer action typeIndex%d is not valid.", typeIndex) + panic(message) + } + return nil } diff --git a/runtime/Go/src/antlr4/ATNSimulator.go b/runtime/Go/src/antlr4/ATNSimulator.go index 25871c344..985bbb543 100644 --- a/runtime/Go/src/antlr4/ATNSimulator.go +++ b/runtime/Go/src/antlr4/ATNSimulator.go @@ -1,53 +1,51 @@ package antlr4 type ATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache + atn *ATN + sharedContextCache *PredictionContextCache } func NewATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *ATNSimulator { - - // The context cache maps all PredictionContext objects that are == - // to a single cached copy. This cache is shared across all contexts - // in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet - // to use only cached nodes/graphs in addDFAState(). We don't want to - // fill this during closure() since there are lots of contexts that - // pop up but are not used ever again. It also greatly slows down closure(). - // - //

This cache makes a huge difference in memory and a little bit in speed. - // For the Java grammar on java.*, it dropped the memory requirements - // at the end from 25M to 16M. We don't store any of the full context - // graphs in the DFA because they are limited to local context only, - // but apparently there's a lot of repetition there as well. We optimize - // the config contexts before storing the config set in the DFA states - // by literally rebuilding them with cached subgraphs only.

- // - //

I tried a cache for use during closure operations, that was - // whacked after each adaptivePredict(). It cost a little bit - // more time I think and doesn't save on the overall footprint - // so it's not worth the complexity.

- this := new(ATNSimulator) + // The context cache maps all PredictionContext objects that are == + // to a single cached copy. This cache is shared across all contexts + // in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + // to use only cached nodes/graphs in addDFAState(). We don't want to + // fill this during closure() since there are lots of contexts that + // pop up but are not used ever again. It also greatly slows down closure(). + // + //

This cache makes a huge difference in memory and a little bit in speed. + // For the Java grammar on java.*, it dropped the memory requirements + // at the end from 25M to 16M. We don't store any of the full context + // graphs in the DFA because they are limited to local context only, + // but apparently there's a lot of repetition there as well. We optimize + // the config contexts before storing the config set in the DFA states + // by literally rebuilding them with cached subgraphs only.

+ // + //

I tried a cache for use during closure operations, that was + // whacked after each adaptivePredict(). It cost a little bit + // more time I think and doesn't save on the overall footprint + // so it's not worth the complexity.

- this.InitATNSimulator(atn, sharedContextCache) + this := new(ATNSimulator) - return this + this.InitATNSimulator(atn, sharedContextCache) + + return this } func (this *ATNSimulator) InitATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) { - this.atn = atn - this.sharedContextCache = sharedContextCache + this.atn = atn + this.sharedContextCache = sharedContextCache } // Must distinguish between missing edge and edge we know leads nowhere/// var ATNSimulatorERROR = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) func (this *ATNSimulator) getCachedContext(context IPredictionContext) IPredictionContext { - if (this.sharedContextCache == nil) { - return context - } - var visited = make(map[IPredictionContext]IPredictionContext) - return getCachedPredictionContext(context, this.sharedContextCache, visited) + if this.sharedContextCache == nil { + return context + } + var visited = make(map[IPredictionContext]IPredictionContext) + return getCachedPredictionContext(context, this.sharedContextCache, visited) } - - diff --git a/runtime/Go/src/antlr4/ATNState.go b/runtime/Go/src/antlr4/ATNState.go index b49fd0800..2d2f61ecc 100644 --- a/runtime/Go/src/antlr4/ATNState.go +++ b/runtime/Go/src/antlr4/ATNState.go @@ -1,22 +1,22 @@ package antlr4 + import "strconv" - const ( -// constants for serialization - ATNStateInvalidType = 0 - ATNStateBASIC = 1 - ATNStateRULE_START = 2 - ATNStateBLOCK_START = 3 + // constants for serialization + ATNStateInvalidType = 0 + ATNStateBASIC = 1 + ATNStateRULE_START = 2 + ATNStateBLOCK_START = 3 ATNStatePLUS_BLOCK_START = 4 ATNStateSTAR_BLOCK_START = 5 - ATNStateTOKEN_START = 6 - ATNStateRULE_STOP = 7 - ATNStateBLOCK_END = 8 - ATNStateSTAR_LOOP_BACK = 9 - ATNStateSTAR_LOOP_ENTRY = 10 - ATNStatePLUS_LOOP_BACK = 11 - ATNStateLOOP_END = 12 + ATNStateTOKEN_START = 6 + ATNStateRULE_STOP = 7 + ATNStateBLOCK_END = 8 + ATNStateSTAR_LOOP_BACK = 9 + ATNStateSTAR_LOOP_ENTRY = 10 + ATNStatePLUS_LOOP_BACK = 11 + ATNStateLOOP_END = 12 ATNStateINVALID_STATE_NUMBER = -1 ) @@ -36,11 +36,9 @@ const ( // "PLUS_LOOP_BACK", // "LOOP_END" ] - var INITIAL_NUM_TRANSITIONS = 4 type IATNState interface { - getEpsilonOnlyTransitions() bool getRuleIndex() int @@ -58,7 +56,7 @@ type IATNState interface { setStateNumber(int) getTransitions() []ITransition - setTransitions( []ITransition ) + setTransitions([]ITransition) addTransition(ITransition, int) toString() string @@ -66,10 +64,10 @@ type IATNState interface { type ATNState struct { // Which ATN are we in? - atn *ATN - stateNumber int - stateType int - ruleIndex int + atn *ATN + stateNumber int + stateType int + ruleIndex int epsilonOnlyTransitions bool // Track the transitions emanating from this ATN state. transitions []ITransition @@ -85,7 +83,7 @@ func NewATNState() *ATNState { return as } -func (as *ATNState) InitATNState(){ +func (as *ATNState) InitATNState() { // Which ATN are we in? as.atn = nil @@ -143,7 +141,7 @@ func (as *ATNState) getNextTokenWithinRule() *IntervalSet { return as.nextTokenWithinRule } -func (as *ATNState) setNextTokenWithinRule(v *IntervalSet) { +func (as *ATNState) setNextTokenWithinRule(v *IntervalSet) { as.nextTokenWithinRule = v } @@ -164,15 +162,15 @@ func (this *ATNState) isNonGreedyExitState() bool { } func (this *ATNState) addTransition(trans ITransition, index int) { - if ( len(this.transitions) == 0 ) { + if len(this.transitions) == 0 { this.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if(this.epsilonOnlyTransitions != trans.getIsEpsilon()) { + } else if this.epsilonOnlyTransitions != trans.getIsEpsilon() { this.epsilonOnlyTransitions = false } - if (index==-1) { + if index == -1 { this.transitions = append(this.transitions, trans) } else { - this.transitions = append(this.transitions[:index], append([]ITransition{ trans }, this.transitions[index:]...)...) + this.transitions = append(this.transitions[:index], append([]ITransition{trans}, this.transitions[index:]...)...) // this.transitions.splice(index, 1, trans) } } @@ -192,7 +190,7 @@ func NewBasicState() *BasicState { type DecisionState struct { *ATNState - decision int + decision int nonGreedy bool } @@ -252,7 +250,6 @@ func NewBasicBlockStartState() *BasicBlockStartState { return this } - // Terminal node of a simple {@code (a|b|c)} block. type BlockEndState struct { ATNState @@ -291,7 +288,7 @@ func NewRuleStopState() *RuleStopState { type RuleStartState struct { ATNState - stopState IATNState + stopState IATNState isPrecedenceRule bool } @@ -369,7 +366,6 @@ func NewStarBlockStartState() *StarBlockStartState { return this } - type StarLoopbackState struct { *ATNState } @@ -384,11 +380,10 @@ func NewStarLoopbackState() *StarLoopbackState { return this } - type StarLoopEntryState struct { *DecisionState - loopBackState IATNState + loopBackState IATNState precedenceRuleDecision bool } @@ -408,7 +403,6 @@ func NewStarLoopEntryState() *StarLoopEntryState { return this } - // Mark the end of a * or + loop. type LoopEndState struct { *ATNState @@ -442,18 +436,3 @@ func NewTokensStartState() *TokensStartState { this.stateType = ATNStateTOKEN_START return this } - - - - - - - - - - - - - - - diff --git a/runtime/Go/src/antlr4/ATNType.go b/runtime/Go/src/antlr4/ATNType.go index 69e77450b..b5c990856 100644 --- a/runtime/Go/src/antlr4/ATNType.go +++ b/runtime/Go/src/antlr4/ATNType.go @@ -3,8 +3,6 @@ package antlr4 // Represents the type of recognizer an ATN applies to. const ( - ATNTypeLexer = 0 + ATNTypeLexer = 0 ATNTypeParser = 1 ) - - diff --git a/runtime/Go/src/antlr4/BufferedTokenStream.go b/runtime/Go/src/antlr4/BufferedTokenStream.go index fc45880ef..9cba9f7cf 100644 --- a/runtime/Go/src/antlr4/BufferedTokenStream.go +++ b/runtime/Go/src/antlr4/BufferedTokenStream.go @@ -1,4 +1,3 @@ - // This implementation of {@link TokenStream} loads tokens from a // {@link TokenSource} on-demand, and places the tokens in a buffer to provide // access to any previous token by index. @@ -11,16 +10,17 @@ // {@link CommonTokenStream}.

package antlr4 + import "strconv" // bt is just to keep meaningful parameter types to Parser type BufferedTokenStream struct { tokenSource TokenSource - tokens []*Token - index int + tokens []*Token + index int fetchedEOF bool - channel int + channel int } func NewBufferedTokenStream(tokenSource TokenSource) *BufferedTokenStream { @@ -30,7 +30,7 @@ func NewBufferedTokenStream(tokenSource TokenSource) *BufferedTokenStream { return ts } -func (ts *BufferedTokenStream) InitBufferedTokenStream(tokenSource TokenSource){ +func (ts *BufferedTokenStream) InitBufferedTokenStream(tokenSource TokenSource) { // The {@link TokenSource} from which tokens for bt stream are fetched. ts.tokenSource = tokenSource @@ -94,11 +94,11 @@ func (bt *BufferedTokenStream) get(index int) *Token { func (bt *BufferedTokenStream) consume() { var skipEofCheck = false - if (bt.index >= 0) { - if (bt.fetchedEOF) { + if bt.index >= 0 { + if bt.fetchedEOF { // the last token in tokens is EOF. skip check if p indexes any // fetched token except the last. - skipEofCheck = bt.index < len(bt.tokens) - 1 + skipEofCheck = bt.index < len(bt.tokens)-1 } else { // no EOF token in tokens. skip check if p indexes a fetched token. skipEofCheck = bt.index < len(bt.tokens) @@ -107,10 +107,10 @@ func (bt *BufferedTokenStream) consume() { // not yet initialized skipEofCheck = false } - if (!skipEofCheck && bt.LA(1) == TokenEOF) { - panic( "cannot consume EOF" ) + if !skipEofCheck && bt.LA(1) == TokenEOF { + panic("cannot consume EOF") } - if (bt.sync(bt.index + 1)) { + if bt.sync(bt.index + 1) { bt.index = bt.adjustSeekIndex(bt.index + 1) } } @@ -123,7 +123,7 @@ func (bt *BufferedTokenStream) consume() { // / func (bt *BufferedTokenStream) sync(i int) bool { var n = i - len(bt.tokens) + 1 // how many more elements we need? - if (n > 0) { + if n > 0 { var fetched = bt.fetch(n) return fetched >= n } @@ -143,7 +143,7 @@ func (bt *BufferedTokenStream) fetch(n int) int { var t *Token = bt.tokenSource.nextToken() t.tokenIndex = len(bt.tokens) bt.tokens = append(bt.tokens, t) - if (t.tokenType == TokenEOF) { + if t.tokenType == TokenEOF { bt.fetchedEOF = true return i + 1 } @@ -154,20 +154,20 @@ func (bt *BufferedTokenStream) fetch(n int) int { // Get all tokens from start..stop inclusively/// func (bt *BufferedTokenStream) getTokens(start int, stop int, types *IntervalSet) []*Token { - if (start < 0 || stop < 0) { + if start < 0 || stop < 0 { return nil } bt.lazyInit() var subset = make([]*Token, 0) - if (stop >= len(bt.tokens)) { + if stop >= len(bt.tokens) { stop = len(bt.tokens) - 1 } for i := start; i < stop; i++ { var t = bt.tokens[i] - if (t.tokenType == TokenEOF) { + if t.tokenType == TokenEOF { break } - if (types == nil || types.contains(t.tokenType)) { + if types == nil || types.contains(t.tokenType) { subset = append(subset, t) } } @@ -179,25 +179,25 @@ func (bt *BufferedTokenStream) LA(i int) int { } func (bt *BufferedTokenStream) LB(k int) *Token { - if (bt.index - k < 0) { + if bt.index-k < 0 { return nil } - return bt.tokens[bt.index - k] + return bt.tokens[bt.index-k] } func (bt *BufferedTokenStream) LT(k int) *Token { bt.lazyInit() - if (k == 0) { + if k == 0 { return nil } - if (k < 0) { + if k < 0 { return bt.LB(-k) } var i = bt.index + k - 1 bt.sync(i) - if (i >= len(bt.tokens)) { // return EOF token + if i >= len(bt.tokens) { // return EOF token // EOF must be last token - return bt.tokens[len(bt.tokens) - 1] + return bt.tokens[len(bt.tokens)-1] } return bt.tokens[i] } @@ -220,7 +220,7 @@ func (bt *BufferedTokenStream) adjustSeekIndex(i int) int { } func (bt *BufferedTokenStream) lazyInit() { - if (bt.index == -1) { + if bt.index == -1 { bt.setup() } } @@ -247,12 +247,12 @@ func (bt *BufferedTokenStream) setTokenSource(tokenSource TokenSource) { // / func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int { bt.sync(i) - if (i >= len(bt.tokens)) { + if i >= len(bt.tokens) { return -1 } var token = bt.tokens[i] - for (token.channel != bt.channel) { - if (token.tokenType == TokenEOF) { + for token.channel != bt.channel { + if token.tokenType == TokenEOF { return -1 } i += 1 @@ -266,7 +266,7 @@ func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int { // Return i if tokens[i] is on channel. Return -1 if there are no tokens // on channel between i and 0. func (bt *BufferedTokenStream) previousTokenOnChannel(i, channel int) int { - for (i >= 0 && bt.tokens[i].channel != channel) { + for i >= 0 && bt.tokens[i].channel != channel { i -= 1 } return i @@ -277,14 +277,14 @@ func (bt *BufferedTokenStream) previousTokenOnChannel(i, channel int) int { // EOF. If channel is -1, find any non default channel token. func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex, channel int) []*Token { bt.lazyInit() - if (tokenIndex < 0 || tokenIndex >= len(bt.tokens)) { - panic( strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens) - 1) ) + if tokenIndex < 0 || tokenIndex >= len(bt.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens)-1)) } - var nextOnChannel = bt.nextTokenOnChannel(tokenIndex + 1, LexerDefaultTokenChannel) + var nextOnChannel = bt.nextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) var from_ = tokenIndex + 1 // if none onchannel to right, nextOnChannel=-1 so set to = last token var to int - if (nextOnChannel == -1){ + if nextOnChannel == -1 { to = len(bt.tokens) - 1 } else { to = nextOnChannel @@ -297,11 +297,11 @@ func (bt *BufferedTokenStream) getHiddenTokensToRight(tokenIndex, channel int) [ // If channel is -1, find any non default channel token. func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex, channel int) []*Token { bt.lazyInit() - if (tokenIndex < 0 || tokenIndex >= len(bt.tokens)) { - panic( strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens) - 1) ) + if tokenIndex < 0 || tokenIndex >= len(bt.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(bt.tokens)-1)) } - var prevOnChannel = bt.previousTokenOnChannel(tokenIndex - 1, LexerDefaultTokenChannel) - if (prevOnChannel == tokenIndex - 1) { + var prevOnChannel = bt.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + if prevOnChannel == tokenIndex-1 { return nil } // if none on channel to left, prevOnChannel=-1 then from=0 @@ -311,18 +311,18 @@ func (bt *BufferedTokenStream) getHiddenTokensToLeft(tokenIndex, channel int) [] } func (bt *BufferedTokenStream) filterForChannel(left, right, channel int) []*Token { - var hidden = make([]*Token,0) - for i := left; i < right + 1; i++ { + var hidden = make([]*Token, 0) + for i := left; i < right+1; i++ { var t = bt.tokens[i] - if (channel == -1) { - if (t.channel != LexerDefaultTokenChannel) { + if channel == -1 { + if t.channel != LexerDefaultTokenChannel { hidden = append(hidden, t) } - } else if (t.channel == channel) { + } else if t.channel == channel { hidden = append(hidden, t) } } - if (len(hidden) == 0) { + if len(hidden) == 0 { return nil } return hidden @@ -336,27 +336,27 @@ func (bt *BufferedTokenStream) getSourceName() string { func (bt *BufferedTokenStream) getText(interval *Interval) string { bt.lazyInit() bt.fill() - if (interval == nil) { - interval = NewInterval(0, len(bt.tokens) - 1) + if interval == nil { + interval = NewInterval(0, len(bt.tokens)-1) } var start = interval.start -// if s2, ok := start.(*Token); ok { -// start = s2.tokenIndex -// } + // if s2, ok := start.(*Token); ok { + // start = s2.tokenIndex + // } var stop = interval.stop -// if s2, ok := stop.(*Token); ok { -// stop = s2.tokenIndex -// } - if (start < 0 || stop < 0) { + // if s2, ok := stop.(*Token); ok { + // stop = s2.tokenIndex + // } + if start < 0 || stop < 0 { return "" } - if (stop >= len(bt.tokens)) { + if stop >= len(bt.tokens) { stop = len(bt.tokens) - 1 } var s = "" - for i := start; i < stop + 1; i++ { + for i := start; i < stop+1; i++ { var t = bt.tokens[i] - if (t.tokenType == TokenEOF) { + if t.tokenType == TokenEOF { break } s += t.text() @@ -367,9 +367,7 @@ func (bt *BufferedTokenStream) getText(interval *Interval) string { // Get all tokens from lexer until EOF/// func (bt *BufferedTokenStream) fill() { bt.lazyInit() - for (bt.fetch(1000) == 1000) { + for bt.fetch(1000) == 1000 { continue } } - - diff --git a/runtime/Go/src/antlr4/CharStream.go b/runtime/Go/src/antlr4/CharStream.go index 209be2053..fed348385 100644 --- a/runtime/Go/src/antlr4/CharStream.go +++ b/runtime/Go/src/antlr4/CharStream.go @@ -4,4 +4,4 @@ type CharStream interface { IntStream getTextFromInterval(*Interval) string -} \ No newline at end of file +} diff --git a/runtime/Go/src/antlr4/CommonTokenFactory.go b/runtime/Go/src/antlr4/CommonTokenFactory.go index 6515fcf7b..54861b02c 100644 --- a/runtime/Go/src/antlr4/CommonTokenFactory.go +++ b/runtime/Go/src/antlr4/CommonTokenFactory.go @@ -6,32 +6,32 @@ package antlr4 type TokenFactory interface { - create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token + create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token } type CommonTokenFactory struct { - copyText bool + copyText bool } func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { - tf := new(CommonTokenFactory) + tf := new(CommonTokenFactory) - // Indicates whether {@link CommonToken//setText} should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings - // of text from the input after the lexer creates a token (e.g. the - // implementation of {@link CharStream//getText} in - // {@link UnbufferedCharStream} panics an - // {@link UnsupportedOperationException}). Explicitly setting the token text - // allows {@link Token//getText} to be called at any time regardless of the - // input stream implementation. - // - //

- // The default value is {@code false} to avoid the performance and memory - // overhead of copying text for every token unless explicitly requested.

- // - tf.copyText = copyText + // Indicates whether {@link CommonToken//setText} should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings + // of text from the input after the lexer creates a token (e.g. the + // implementation of {@link CharStream//getText} in + // {@link UnbufferedCharStream} panics an + // {@link UnsupportedOperationException}). Explicitly setting the token text + // allows {@link Token//getText} to be called at any time regardless of the + // input stream implementation. + // + //

+ // The default value is {@code false} to avoid the performance and memory + // overhead of copying text for every token unless explicitly requested.

+ // + tf.copyText = copyText return tf } @@ -46,21 +46,19 @@ func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) func (this *CommonTokenFactory) create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) *Token { - var t = NewCommonToken(source, ttype, channel, start, stop) - t.line = line - t.column = column - if (text != "") { - t.setText( text ) - } else if (this.copyText && source.charStream != nil) { - t.setText( source.charStream.getTextFromInterval(NewInterval(start,stop))) - } - return t.Token + var t = NewCommonToken(source, ttype, channel, start, stop) + t.line = line + t.column = column + if text != "" { + t.setText(text) + } else if this.copyText && source.charStream != nil { + t.setText(source.charStream.getTextFromInterval(NewInterval(start, stop))) + } + return t.Token } func (this *CommonTokenFactory) createThin(ttype int, text string) *Token { - var t = NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) - t.setText( text ) - return t.Token + var t = NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.setText(text) + return t.Token } - - diff --git a/runtime/Go/src/antlr4/CommonTokenStream.go b/runtime/Go/src/antlr4/CommonTokenStream.go index e6e24ca69..f360b0b0f 100644 --- a/runtime/Go/src/antlr4/CommonTokenStream.go +++ b/runtime/Go/src/antlr4/CommonTokenStream.go @@ -26,75 +26,74 @@ package antlr4 type CommonTokenStream struct { - *BufferedTokenStream + *BufferedTokenStream } func NewCommonTokenStream(lexer ILexer, channel int) *CommonTokenStream { - ts := new(CommonTokenStream) - ts.InitBufferedTokenStream(lexer) + ts := new(CommonTokenStream) + ts.InitBufferedTokenStream(lexer) - ts.channel = channel + ts.channel = channel - return ts + return ts } func (ts *CommonTokenStream) adjustSeekIndex(i int) int { - return ts.nextTokenOnChannel(i, ts.channel) + return ts.nextTokenOnChannel(i, ts.channel) } func (ts *CommonTokenStream) LB(k int) *Token { - if (k==0 || ts.index-k<0) { - return nil - } - var i = ts.index - var n = 1 - // find k good tokens looking backwards - for (n <= k) { - // skip off-channel tokens - i = ts.previousTokenOnChannel(i - 1, ts.channel) - n += 1 - } - if (i < 0) { - return nil - } - return ts.tokens[i] + if k == 0 || ts.index-k < 0 { + return nil + } + var i = ts.index + var n = 1 + // find k good tokens looking backwards + for n <= k { + // skip off-channel tokens + i = ts.previousTokenOnChannel(i-1, ts.channel) + n += 1 + } + if i < 0 { + return nil + } + return ts.tokens[i] } func (ts *CommonTokenStream) LT(k int) *Token { - ts.lazyInit() - if (k == 0) { - return nil - } - if (k < 0) { - return ts.LB(-k) - } - var i = ts.index - var n = 1 // we know tokens[pos] is a good one - // find k good tokens - for n < k { - // skip off-channel tokens, but make sure to not look past EOF - if (ts.sync(i + 1)) { - i = ts.nextTokenOnChannel(i + 1, ts.channel) - } - n += 1 - } - return ts.tokens[i] + ts.lazyInit() + if k == 0 { + return nil + } + if k < 0 { + return ts.LB(-k) + } + var i = ts.index + var n = 1 // we know tokens[pos] is a good one + // find k good tokens + for n < k { + // skip off-channel tokens, but make sure to not look past EOF + if ts.sync(i + 1) { + i = ts.nextTokenOnChannel(i+1, ts.channel) + } + n += 1 + } + return ts.tokens[i] } // Count EOF just once./// func (ts *CommonTokenStream) getNumberOfOnChannelTokens() int { - var n = 0 - ts.fill() - for i := 0; i < len(ts.tokens); i++ { - var t = ts.tokens[i] - if t.channel==ts.channel { - n += 1 - } - if t.tokenType==TokenEOF { - break - } - } - return n + var n = 0 + ts.fill() + for i := 0; i < len(ts.tokens); i++ { + var t = ts.tokens[i] + if t.channel == ts.channel { + n += 1 + } + if t.tokenType == TokenEOF { + break + } + } + return n } - diff --git a/runtime/Go/src/antlr4/DFA.go b/runtime/Go/src/antlr4/DFA.go index e28efb406..37a56f695 100644 --- a/runtime/Go/src/antlr4/DFA.go +++ b/runtime/Go/src/antlr4/DFA.go @@ -2,9 +2,9 @@ package antlr4 type DFA struct { atnStartState *DecisionState - decision int - _states map[string]*DFAState - s0 *DFAState + decision int + _states map[string]*DFAState + s0 *DFAState precedenceDfa bool } @@ -37,11 +37,11 @@ func NewDFA(atnStartState *DecisionState, decision int) *DFA { // @see //isPrecedenceDfa() func (this *DFA) getPrecedenceStartState(precedence int) *DFAState { - if (!(this.precedenceDfa)) { + if !(this.precedenceDfa) { panic("Only precedence DFAs may contain a precedence start state.") } // s0.edges is never nil for a precedence DFA - if (precedence < 0 || precedence >= len(this.s0.edges)) { + if precedence < 0 || precedence >= len(this.s0.edges) { return nil } return this.s0.edges[precedence] @@ -56,11 +56,11 @@ func (this *DFA) getPrecedenceStartState(precedence int) *DFAState { // @panics IllegalStateException if this is not a precedence DFA. // @see //isPrecedenceDfa() // -func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { - if (!(this.precedenceDfa)) { - panic ("Only precedence DFAs may contain a precedence start state.") +func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !(this.precedenceDfa) { + panic("Only precedence DFAs may contain a precedence start state.") } - if (precedence < 0) { + if precedence < 0 { return } @@ -88,11 +88,11 @@ func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) // {@code false} func (this *DFA) setPrecedenceDfa(precedenceDfa bool) { - if (this.precedenceDfa!=precedenceDfa) { + if this.precedenceDfa != precedenceDfa { this._states = make(map[string]*DFAState) - if (precedenceDfa) { + if precedenceDfa { var precedenceState = NewDFAState(-1, NewATNConfigSet(false)) - precedenceState.edges = make([]*DFAState,0) + precedenceState.edges = make([]*DFAState, 0) precedenceState.isAcceptState = false precedenceState.requiresFullContext = false this.s0 = precedenceState @@ -114,18 +114,18 @@ func (this *DFA) sortedStates() []*DFAState { return nil // states_ is a map of state/state, where key=value -// var keys = Object.keys(this._states) -// var list = [] -// for i:=0; i" + fmt.Sprint(s.predicates) - } else { - return baseStateStr + "=>" + fmt.Sprint(s.prediction) - } - } else { - return baseStateStr - } + var baseStateStr = a + "s" + strconv.Itoa(s.stateNumber) + b + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } else { + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + } else { + return baseStateStr + } } type LexerDFASerializer struct { - DFASerializer + DFASerializer } func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { - this := new(LexerDFASerializer) + this := new(LexerDFASerializer) - this.InitDFASerializer(dfa, nil, nil) + this.InitDFASerializer(dfa, nil, nil) return this } @@ -121,7 +121,3 @@ func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { func (this *LexerDFASerializer) getEdgeLabel(i int) string { return "'" + string(i) + "'" } - - - - diff --git a/runtime/Go/src/antlr4/DFAState.go b/runtime/Go/src/antlr4/DFAState.go index 6bd8e9d33..215c81e99 100644 --- a/runtime/Go/src/antlr4/DFAState.go +++ b/runtime/Go/src/antlr4/DFAState.go @@ -8,7 +8,7 @@ import ( // Map a predicate to a predicted alternative./// type PredPrediction struct { - alt int + alt int pred SemanticContext } @@ -51,19 +51,19 @@ func (this *PredPrediction) toString() string { // / type DFAState struct { - stateNumber int - configs *ATNConfigSet - edges []*DFAState - isAcceptState bool - prediction int + stateNumber int + configs *ATNConfigSet + edges []*DFAState + isAcceptState bool + prediction int lexerActionExecutor *LexerActionExecutor requiresFullContext bool - predicates []*PredPrediction + predicates []*PredPrediction } func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { - if (configs == nil) { + if configs == nil { configs = NewATNConfigSet(false) } @@ -107,14 +107,14 @@ func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { // Get the set of all alts mentioned by all ATN configurations in this // DFA state. func (this *DFAState) getAltSet() *Set { - var alts = NewSet(nil,nil) - if (this.configs != nil) { + var alts = NewSet(nil, nil) + if this.configs != nil { for i := 0; i < len(this.configs.configs); i++ { var c = this.configs.configs[i] alts.add(c.getAlt()) } } - if (alts.length() == 0) { + if alts.length() == 0 { return nil } else { return alts @@ -134,7 +134,7 @@ func (this *DFAState) getAltSet() *Set { // {@link //stateNumber} is irrelevant.

func (this *DFAState) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*DFAState); !ok { return false @@ -150,17 +150,15 @@ func (this *DFAState) toString() string { func (this *DFAState) hashString() string { panic("Not implementd") -// var s string -// if (this.acceptState){ -// -// } -// -// return "" + this.configs + -// (this.isAcceptState ? -// "=>" + (this.predicates != nil ? -// this.predicates : -// this.prediction) : -// "") + // var s string + // if (this.acceptState){ + // + // } + // + // return "" + this.configs + + // (this.isAcceptState ? + // "=>" + (this.predicates != nil ? + // this.predicates : + // this.prediction) : + // "") } - - diff --git a/runtime/Go/src/antlr4/DiagnosticErrorListener.go b/runtime/Go/src/antlr4/DiagnosticErrorListener.go index d4bcefca4..137572f8f 100644 --- a/runtime/Go/src/antlr4/DiagnosticErrorListener.go +++ b/runtime/Go/src/antlr4/DiagnosticErrorListener.go @@ -1,4 +1,5 @@ package antlr4 + import ( "strconv" ) @@ -38,32 +39,32 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { } func (this *DiagnosticErrorListener) reportAmbiguity(recognizer *Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { - if (this.exactOnly && !exact) { + if this.exactOnly && !exact { return } var msg = "reportAmbiguity d=" + - this.getDecisionDescription(recognizer, dfa) + - ": ambigAlts=" + - this.getConflictingAlts(ambigAlts, configs).toString() + - ", input='" + - recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + this.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + this.getConflictingAlts(ambigAlts, configs).toString() + + ", input='" + + recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.notifyErrorListeners(msg, nil, nil) } func (this *DiagnosticErrorListener) reportAttemptingFullContext(recognizer *Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { var msg = "reportAttemptingFullContext d=" + - this.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + this.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.notifyErrorListeners(msg, nil, nil) } func (this *DiagnosticErrorListener) reportContextSensitivity(recognizer *Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { var msg = "reportContextSensitivity d=" + - this.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + this.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.notifyErrorListeners(msg, nil, nil) } @@ -72,11 +73,11 @@ func (this *DiagnosticErrorListener) getDecisionDescription(recognizer *Parser, var ruleIndex = dfa.atnStartState.ruleIndex var ruleNames = recognizer.getRuleNames() - if (ruleIndex < 0 || ruleIndex >= len(ruleNames)) { + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { return strconv.Itoa(decision) } var ruleName = ruleNames[ruleIndex] - if (ruleName == "") { + if ruleName == "" { return strconv.Itoa(decision) } return strconv.Itoa(decision) + " (" + ruleName + ")" @@ -94,7 +95,7 @@ func (this *DiagnosticErrorListener) getDecisionDescription(recognizer *Parser, // returns the set of alternatives represented in {@code configs}. // func (this *DiagnosticErrorListener) getConflictingAlts(reportedAlts *BitSet, set *ATNConfigSet) *BitSet { - if (reportedAlts != nil) { + if reportedAlts != nil { return reportedAlts } var result = NewBitSet() @@ -104,10 +105,10 @@ func (this *DiagnosticErrorListener) getConflictingAlts(reportedAlts *BitSet, se return result -// valuestrings := make([]string, len(result.values())) -// for i,v := range result.values() { -// valuestrings[i] = strconv.Itoa(v) -// } -// -// return "{" + strings.Join(valuestrings, ", ") + "}" -} \ No newline at end of file + // valuestrings := make([]string, len(result.values())) + // for i,v := range result.values() { + // valuestrings[i] = strconv.Itoa(v) + // } + // + // return "{" + strings.Join(valuestrings, ", ") + "}" +} diff --git a/runtime/Go/src/antlr4/ErrorListener.go b/runtime/Go/src/antlr4/ErrorListener.go index 3411aece4..9ff4ae913 100644 --- a/runtime/Go/src/antlr4/ErrorListener.go +++ b/runtime/Go/src/antlr4/ErrorListener.go @@ -1,7 +1,7 @@ package antlr4 import ( - "fmt" + "fmt" "strconv" ) @@ -9,7 +9,6 @@ import ( // default implementation of each method does nothing, but can be overridden as // necessary. - type IErrorListener interface { syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException) reportAmbiguity(recognizer IParser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) @@ -18,7 +17,6 @@ type IErrorListener interface { } type DefaultErrorListener struct { - } func NewErrorListener() *DefaultErrorListener { @@ -63,7 +61,7 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() // // func (this *ConsoleErrorListener) syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException) { - fmt.Errorf("line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + " " + msg) + fmt.Errorf("line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + " " + msg) } type ProxyErrorListener struct { @@ -72,41 +70,34 @@ type ProxyErrorListener struct { } func NewProxyErrorListener(delegates []IErrorListener) *ProxyErrorListener { - if (delegates==nil) { - panic("delegates is not provided") - } + if delegates == nil { + panic("delegates is not provided") + } l := new(ProxyErrorListener) - l.delegates = delegates + l.delegates = delegates return l } func (this *ProxyErrorListener) syntaxError(recognizer IRecognizer, offendingSymbol interface{}, line, column int, msg string, e IRecognitionException) { - for _,d := range this.delegates { + for _, d := range this.delegates { d.syntaxError(recognizer, offendingSymbol, line, column, msg, e) } } func (this *ProxyErrorListener) reportAmbiguity(recognizer IParser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { - for _,d := range this.delegates { + for _, d := range this.delegates { d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) } } func (this *ProxyErrorListener) reportAttemptingFullContext(recognizer IParser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { - for _,d := range this.delegates { + for _, d := range this.delegates { d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) } } func (this *ProxyErrorListener) reportContextSensitivity(recognizer IParser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { - for _,d := range this.delegates { + for _, d := range this.delegates { d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) } } - - - - - - - diff --git a/runtime/Go/src/antlr4/ErrorStrategy.go b/runtime/Go/src/antlr4/ErrorStrategy.go index 583ecfb04..2ef3e6a2e 100644 --- a/runtime/Go/src/antlr4/ErrorStrategy.go +++ b/runtime/Go/src/antlr4/ErrorStrategy.go @@ -1,41 +1,41 @@ package antlr4 import ( - "fmt" - "strings" - "reflect" - "strconv" + "fmt" + "reflect" + "strconv" + "strings" ) type IErrorStrategy interface { - reset(IParser) - recoverInline(IParser) *Token - recover(IParser, IRecognitionException) - sync(IParser) - inErrorRecoveryMode(IParser) bool - reportError(IParser, IRecognitionException) - reportMatch(IParser) + reset(IParser) + recoverInline(IParser) *Token + recover(IParser, IRecognitionException) + sync(IParser) + inErrorRecoveryMode(IParser) bool + reportError(IParser, IRecognitionException) + reportMatch(IParser) } type ErrorStrategy struct { } -func (this *ErrorStrategy) reset(recognizer IParser){ +func (this *ErrorStrategy) reset(recognizer IParser) { } -func (this *ErrorStrategy) recoverInline(recognizer IParser){ +func (this *ErrorStrategy) recoverInline(recognizer IParser) { } -func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException){ +func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException) { } -func (this *ErrorStrategy) sync(recognizer IParser){ +func (this *ErrorStrategy) sync(recognizer IParser) { } -func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser){ +func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser) { } -func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException){ +func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException) { } func (this *ErrorStrategy) reportMatch(recognizer IParser) { @@ -46,45 +46,45 @@ func (this *ErrorStrategy) reportMatch(recognizer IParser) { // error reporting and recovery in ANTLR parsers. // type DefaultErrorStrategy struct { - *ErrorStrategy + *ErrorStrategy - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet } func NewDefaultErrorStrategy() *DefaultErrorStrategy { d := new(DefaultErrorStrategy) - d.InitDefaultErrorStrategy() - return d + d.InitDefaultErrorStrategy() + return d } func (d *DefaultErrorStrategy) InitDefaultErrorStrategy() { - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - d.errorRecoveryMode = false + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //inErrorRecoveryMode + // + d.errorRecoveryMode = false - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseum. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil } //

The default implementation simply calls {@link //endErrorCondition} to // ensure that the handler is not in error recovery mode.

func (this *DefaultErrorStrategy) reset(recognizer IParser) { - this.endErrorCondition(recognizer) + this.endErrorCondition(recognizer) } // @@ -94,11 +94,11 @@ func (this *DefaultErrorStrategy) reset(recognizer IParser) { // @param recognizer the parser instance // func (this *DefaultErrorStrategy) beginErrorCondition(recognizer IParser) { - this.errorRecoveryMode = true + this.errorRecoveryMode = true } func (this *DefaultErrorStrategy) inErrorRecoveryMode(recognizer IParser) bool { - return this.errorRecoveryMode + return this.errorRecoveryMode } // @@ -108,9 +108,9 @@ func (this *DefaultErrorStrategy) inErrorRecoveryMode(recognizer IParser) bool { // @param recognizer // func (this *DefaultErrorStrategy) endErrorCondition(recognizer IParser) { - this.errorRecoveryMode = false - this.lastErrorStates = nil - this.lastErrorIndex = -1 + this.errorRecoveryMode = false + this.lastErrorStates = nil + this.lastErrorIndex = -1 } // @@ -119,7 +119,7 @@ func (this *DefaultErrorStrategy) endErrorCondition(recognizer IParser) { //

The default implementation simply calls {@link //endErrorCondition}.

// func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) { - this.endErrorCondition(recognizer) + this.endErrorCondition(recognizer) } // @@ -142,25 +142,25 @@ func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) { // // func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognitionException) { - // if we've already reported an error and have not matched a token - // yet successfully, don't report any errors. - if(this.inErrorRecoveryMode(recognizer)) { - return // don't report spurious errors - } - this.beginErrorCondition(recognizer) + // if we've already reported an error and have not matched a token + // yet successfully, don't report any errors. + if this.inErrorRecoveryMode(recognizer) { + return // don't report spurious errors + } + this.beginErrorCondition(recognizer) - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) -// fmt.Println(e.stack) - recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e) - case *NoViableAltException: - this.reportNoViableAlternative(recognizer, t) - case *InputMismatchException: - this.reportInputMismatch(recognizer, t) - case *FailedPredicateException: - this.reportFailedPredicate(recognizer, t) - } + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e) + case *NoViableAltException: + this.reportNoViableAlternative(recognizer, t) + case *InputMismatchException: + this.reportInputMismatch(recognizer, t) + case *FailedPredicateException: + this.reportFailedPredicate(recognizer, t) + } } // @@ -172,21 +172,21 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition // func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionException) { - if (this.lastErrorIndex==recognizer.getInputStream().index() && - this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState())) { + if this.lastErrorIndex == recognizer.getInputStream().index() && + this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState()) { // uh oh, another error at same token index and previously-visited // state in ATN must be a case where LT(1) is in the recovery // token set so nothing got consumed. Consume a single token // at least to prevent an infinite loop this is a failsafe. recognizer.consume() - } - this.lastErrorIndex = recognizer.getInputStream().index() - if (this.lastErrorStates == nil) { - this.lastErrorStates = NewIntervalSet() - } - this.lastErrorStates.addOne(recognizer.getState()) - var followSet = this.getErrorRecoverySet(recognizer) - this.consumeUntil(recognizer, followSet) + } + this.lastErrorIndex = recognizer.getInputStream().index() + if this.lastErrorStates == nil { + this.lastErrorStates = NewIntervalSet() + } + this.lastErrorStates.addOne(recognizer.getState()) + var followSet = this.getErrorRecoverySet(recognizer) + this.consumeUntil(recognizer, followSet) } // The default implementation of {@link ANTLRErrorStrategy//sync} makes sure @@ -235,43 +235,43 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce // functionality by simply overriding this method as a blank { }.

// func (this *DefaultErrorStrategy) sync(recognizer IParser) { - // If already recovering, don't try to sync - if (this.inErrorRecoveryMode(recognizer)) { - return - } - var s = recognizer.getInterpreter().atn.states[recognizer.getState()] - var la = recognizer.getTokenStream().LA(1) - // try cheaper subset first might get lucky. seems to shave a wee bit off - if (la==TokenEOF || recognizer.getATN().nextTokens(s,nil).contains(la)) { - return - } - // Return but don't end recovery. only do that upon valid token match - if(recognizer.isExpectedToken(la)) { - return - } - switch (s.getStateType()) { - case ATNStateBLOCK_START: - case ATNStateSTAR_BLOCK_START: - case ATNStatePLUS_BLOCK_START: - case ATNStateSTAR_LOOP_ENTRY: - // report error and recover if possible - if( this.singleTokenDeletion(recognizer) != nil) { - return - } else { - panic(NewInputMismatchException(recognizer)) - } - break - case ATNStatePLUS_LOOP_BACK: - case ATNStateSTAR_LOOP_BACK: - this.reportUnwantedToken(recognizer) - var expecting = NewIntervalSet() - expecting.addSet(recognizer.getExpectedTokens()) - var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)) - this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - break - default: - // do nothing if we can't identify the exact kind of ATN state - } + // If already recovering, don't try to sync + if this.inErrorRecoveryMode(recognizer) { + return + } + var s = recognizer.getInterpreter().atn.states[recognizer.getState()] + var la = recognizer.getTokenStream().LA(1) + // try cheaper subset first might get lucky. seems to shave a wee bit off + if la == TokenEOF || recognizer.getATN().nextTokens(s, nil).contains(la) { + return + } + // Return but don't end recovery. only do that upon valid token match + if recognizer.isExpectedToken(la) { + return + } + switch s.getStateType() { + case ATNStateBLOCK_START: + case ATNStateSTAR_BLOCK_START: + case ATNStatePLUS_BLOCK_START: + case ATNStateSTAR_LOOP_ENTRY: + // report error and recover if possible + if this.singleTokenDeletion(recognizer) != nil { + return + } else { + panic(NewInputMismatchException(recognizer)) + } + break + case ATNStatePLUS_LOOP_BACK: + case ATNStateSTAR_LOOP_BACK: + this.reportUnwantedToken(recognizer) + var expecting = NewIntervalSet() + expecting.addSet(recognizer.getExpectedTokens()) + var whatFollowsLoopIterationOrRule = expecting.addSet(this.getErrorRecoverySet(recognizer)) + this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + break + default: + // do nothing if we can't identify the exact kind of ATN state + } } // This is called by {@link //reportError} when the exception is a @@ -283,19 +283,19 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) { // @param e the recognition exception // func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, e *NoViableAltException) { - var tokens = recognizer.getTokenStream() - var input string - if(tokens != nil) { - if (e.startToken.tokenType==TokenEOF) { - input = "" - } else { - input = tokens.getTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - var msg = "no viable alternative at input " + this.escapeWSAndQuote(input) - recognizer.notifyErrorListeners(msg, e.offendingToken, e) + var tokens = recognizer.getTokenStream() + var input string + if tokens != nil { + if e.startToken.tokenType == TokenEOF { + input = "" + } else { + input = tokens.getTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + var msg = "no viable alternative at input " + this.escapeWSAndQuote(input) + recognizer.notifyErrorListeners(msg, e.offendingToken, e) } // @@ -308,9 +308,9 @@ func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, // @param e the recognition exception // func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *InputMismatchException) { - var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) - recognizer.notifyErrorListeners(msg, e.offendingToken, e) + var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) + recognizer.notifyErrorListeners(msg, e.offendingToken, e) } // @@ -323,9 +323,9 @@ func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *Inp // @param e the recognition exception // func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *FailedPredicateException) { - var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()] - var msg = "rule " + ruleName + " " + e.message - recognizer.notifyErrorListeners(msg, e.offendingToken, e) + var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()] + var msg = "rule " + ruleName + " " + e.message + recognizer.notifyErrorListeners(msg, e.offendingToken, e) } // This method is called to report a syntax error which requires the removal @@ -346,17 +346,18 @@ func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *F // @param recognizer the parser instance // func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) { - if (this.inErrorRecoveryMode(recognizer)) { - return - } - this.beginErrorCondition(recognizer) - var t = recognizer.getCurrentToken() - var tokenName = this.getTokenErrorDisplay(t) - var expecting = this.getExpectedTokens(recognizer) - var msg = "extraneous input " + tokenName + " expecting " + - expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) - recognizer.notifyErrorListeners(msg, t, nil) + if this.inErrorRecoveryMode(recognizer) { + return + } + this.beginErrorCondition(recognizer) + var t = recognizer.getCurrentToken() + var tokenName = this.getTokenErrorDisplay(t) + var expecting = this.getExpectedTokens(recognizer) + var msg = "extraneous input " + tokenName + " expecting " + + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) + recognizer.notifyErrorListeners(msg, t, nil) } + // This method is called to report a syntax error which requires the // insertion of a missing token into the input stream. At the time this // method is called, the missing token has not yet been inserted. When this @@ -374,15 +375,15 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) { // @param recognizer the parser instance // func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) { - if ( this.inErrorRecoveryMode(recognizer)) { - return - } - this.beginErrorCondition(recognizer) - var t = recognizer.getCurrentToken() - var expecting = this.getExpectedTokens(recognizer) - var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) + - " at " + this.getTokenErrorDisplay(t) - recognizer.notifyErrorListeners(msg, t, nil) + if this.inErrorRecoveryMode(recognizer) { + return + } + this.beginErrorCondition(recognizer) + var t = recognizer.getCurrentToken() + var expecting = this.getExpectedTokens(recognizer) + var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) + + " at " + this.getTokenErrorDisplay(t) + recognizer.notifyErrorListeners(msg, t, nil) } //

The default implementation attempts to recover from the mismatched input @@ -435,20 +436,20 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) { // in rule {@code atom}. It can assume that you forgot the {@code ')'}. // func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token { - // SINGLE TOKEN DELETION - var matchedSymbol = this.singleTokenDeletion(recognizer) - if (matchedSymbol != nil) { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.consume() - return matchedSymbol - } - // SINGLE TOKEN INSERTION - if (this.singleTokenInsertion(recognizer)) { - return this.getMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMismatchException(recognizer)) + // SINGLE TOKEN DELETION + var matchedSymbol = this.singleTokenDeletion(recognizer) + if matchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.consume() + return matchedSymbol + } + // SINGLE TOKEN INSERTION + if this.singleTokenInsertion(recognizer) { + return this.getMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + panic(NewInputMismatchException(recognizer)) } // @@ -469,20 +470,20 @@ func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token { // strategy for the current mismatched input, otherwise {@code false} // func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool { - var currentSymbolType = recognizer.getTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - var atn = recognizer.getInterpreter().atn - var currentState = atn.states[recognizer.getState()] - var next = currentState.getTransitions()[0].getTarget() - var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext()) - if (expectingAtLL2.contains(currentSymbolType) ){ - this.reportMissingToken(recognizer) - return true - } else { - return false - } + var currentSymbolType = recognizer.getTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + var atn = recognizer.getInterpreter().atn + var currentState = atn.states[recognizer.getState()] + var next = currentState.getTransitions()[0].getTarget() + var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + this.reportMissingToken(recognizer) + return true + } else { + return false + } } // This method implements the single-token deletion inline error recovery @@ -504,22 +505,22 @@ func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool // {@code nil} // func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token { - var nextTokenType = recognizer.getTokenStream().LA(2) - var expecting = this.getExpectedTokens(recognizer) - if (expecting.contains(nextTokenType)) { - this.reportUnwantedToken(recognizer) - // print("recoverFromMismatchedToken deleting " \ - // + str(recognizer.getTokenStream().LT(1)) \ - // + " since " + str(recognizer.getTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.consume() // simply delete extra token - // we want to return the token we're actually matching - var matchedSymbol = recognizer.getCurrentToken() - this.reportMatch(recognizer) // we know current token is correct - return matchedSymbol - } else { - return nil - } + var nextTokenType = recognizer.getTokenStream().LA(2) + var expecting = this.getExpectedTokens(recognizer) + if expecting.contains(nextTokenType) { + this.reportUnwantedToken(recognizer) + // print("recoverFromMismatchedToken deleting " \ + // + str(recognizer.getTokenStream().LT(1)) \ + // + " since " + str(recognizer.getTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.consume() // simply delete extra token + // we want to return the token we're actually matching + var matchedSymbol = recognizer.getCurrentToken() + this.reportMatch(recognizer) // we know current token is correct + return matchedSymbol + } else { + return nil + } } // Conjure up a missing token during error recovery. @@ -542,27 +543,27 @@ func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token // override this method to create the appropriate tokens. // func (this *DefaultErrorStrategy) getMissingSymbol(recognizer IParser) *Token { - var currentSymbol = recognizer.getCurrentToken() - var expecting = this.getExpectedTokens(recognizer) - var expectedTokenType = expecting.first() - var tokenText string - if (expectedTokenType==TokenEOF) { - tokenText = "" - } else { - tokenText = "" - } - var current = currentSymbol - var lookback = recognizer.getTokenStream().LT(-1) - if (current.tokenType==TokenEOF && lookback != nil) { - current = lookback - } + var currentSymbol = recognizer.getCurrentToken() + var expecting = this.getExpectedTokens(recognizer) + var expectedTokenType = expecting.first() + var tokenText string + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + tokenText = "" + } + var current = currentSymbol + var lookback = recognizer.getTokenStream().LT(-1) + if current.tokenType == TokenEOF && lookback != nil { + current = lookback + } - tf := recognizer.getTokenFactory() - return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column) + tf := recognizer.getTokenFactory() + return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column) } func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *IntervalSet { - return recognizer.getExpectedTokens() + return recognizer.getExpectedTokens() } // How should a token be displayed in an error message? The default @@ -574,25 +575,25 @@ func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *Interva // so that it creates a NewJava type. // func (this *DefaultErrorStrategy) getTokenErrorDisplay(t *Token) string { - if (t == nil) { - return "" - } - var s = t.text() - if (s == "") { - if (t.tokenType==TokenEOF) { - s = "" - } else { - s = "<" + strconv.Itoa(t.tokenType) + ">" - } - } - return this.escapeWSAndQuote(s) + if t == nil { + return "" + } + var s = t.text() + if s == "" { + if t.tokenType == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.tokenType) + ">" + } + } + return this.escapeWSAndQuote(s) } func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s,"\t","\\t", -1) - s = strings.Replace(s,"\n","\\n", -1) - s = strings.Replace(s,"\r","\\r", -1) - return "'" + s + "'" + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" } // Compute the error recovery set for the current rule. During @@ -688,28 +689,28 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // at run-time upon error to avoid overhead during parsing. // func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *IntervalSet { - var atn = recognizer.getInterpreter().atn - var ctx = recognizer.getParserRuleContext() - var recoverSet = NewIntervalSet() - for (ctx != nil && ctx.getInvokingState()>=0) { - // compute what follows who invoked us - var invokingState = atn.states[ctx.getInvokingState()] - var rt = invokingState.getTransitions()[0] - var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.getParent().(IParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet + var atn = recognizer.getInterpreter().atn + var ctx = recognizer.getParserRuleContext() + var recoverSet = NewIntervalSet() + for ctx != nil && ctx.getInvokingState() >= 0 { + // compute what follows who invoked us + var invokingState = atn.states[ctx.getInvokingState()] + var rt = invokingState.getTransitions()[0] + var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.getParent().(IParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet } // Consume tokens until one matches the given token set.// func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *IntervalSet) { - var ttype = recognizer.getTokenStream().LA(1) - for( ttype != TokenEOF && !set.contains(ttype)) { - recognizer.consume() - ttype = recognizer.getTokenStream().LA(1) - } + var ttype = recognizer.getTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.consume() + ttype = recognizer.getTokenStream().LA(1) + } } // @@ -741,13 +742,13 @@ func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *Interval // @see Parser//setErrorHandler(ANTLRErrorStrategy) type BailErrorStrategy struct { - DefaultErrorStrategy + DefaultErrorStrategy } func NewBailErrorStrategy() *BailErrorStrategy { - this := new(BailErrorStrategy) - this.InitDefaultErrorStrategy() + this := new(BailErrorStrategy) + this.InitDefaultErrorStrategy() return this } @@ -758,24 +759,22 @@ func NewBailErrorStrategy() *BailErrorStrategy { // original {@link RecognitionException}. // func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionException) { - var context = recognizer.getParserRuleContext() - for (context != nil) { - context.setException(e) - context = context.getParent().(IParserRuleContext) - } - panic(NewParseCancellationException()) // TODO we don't emit e properly + var context = recognizer.getParserRuleContext() + for context != nil { + context.setException(e) + context = context.getParent().(IParserRuleContext) + } + panic(NewParseCancellationException()) // TODO we don't emit e properly } - + // Make sure we don't attempt to recover inline if the parser // successfully recovers, it won't panic an exception. // func (this *BailErrorStrategy) recoverInline(recognizer IParser) { - this.recover(recognizer, NewInputMismatchException(recognizer)) + this.recover(recognizer, NewInputMismatchException(recognizer)) } // Make sure we don't attempt to recover from problems in subrules.// func (this *BailErrorStrategy) sync(recognizer IParser) { - // pass + // pass } - - diff --git a/runtime/Go/src/antlr4/Errors.go b/runtime/Go/src/antlr4/Errors.go index 74bd8756f..5856af3e7 100644 --- a/runtime/Go/src/antlr4/Errors.go +++ b/runtime/Go/src/antlr4/Errors.go @@ -1,7 +1,6 @@ package antlr4 -import ( - ) +import () // The root of the ANTLR exception hierarchy. In general, ANTLR tracks just // 3 kinds of errors: prediction errors, failed predicate errors, and @@ -9,42 +8,39 @@ import ( // in the input, where it is in the ATN, the rule invocation stack, // and what kind of problem occurred. - type IRecognitionException interface { getOffendingToken() *Token getMessage() string } type RecognitionException struct { - - message string - recognizer IRecognizer + message string + recognizer IRecognizer offendingToken *Token offendingState int - ctx IRuleContext - input CharStream - + ctx IRuleContext + input CharStream } func NewRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext) *RecognitionException { -// todo -// Error.call(this) -// -// if (!!Error.captureStackTrace) { -// Error.captureStackTrace(this, RecognitionException) -// } else { -// var stack = NewError().stack -// } + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // var stack = NewError().stack + // } // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int t := new(RecognitionException) t.InitRecognitionException(message, recognizer, input, ctx) - return t + return t } -func (t *RecognitionException) InitRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext){ +func (t *RecognitionException) InitRecognitionException(message string, recognizer IRecognizer, input CharStream, ctx IRuleContext) { t.message = message t.recognizer = recognizer @@ -60,7 +56,7 @@ func (t *RecognitionException) InitRecognitionException(message string, recogniz // {@link DecisionState} number. For others, it is the state whose outgoing // edge we couldn't match. t.offendingState = -1 - if (t.recognizer!=nil) { + if t.recognizer != nil { t.offendingState = t.recognizer.getState() } } @@ -86,58 +82,52 @@ func (this *RecognitionException) getOffendingToken() *Token { // state in the ATN, or {@code nil} if the information is not available. // / func (this *RecognitionException) getExpectedTokens() *IntervalSet { - if (this.recognizer!=nil) { - return this.recognizer.getATN().getExpectedTokens(this.offendingState, this.ctx) - } else { - return nil - } + if this.recognizer != nil { + return this.recognizer.getATN().getExpectedTokens(this.offendingState, this.ctx) + } else { + return nil + } } func (this *RecognitionException) toString() string { - return this.message + return this.message } - type LexerNoViableAltException struct { - RecognitionException - startIndex int + startIndex int deadEndConfigs *ATNConfigSet - } func NewLexerNoViableAltException(lexer *Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { - this := new (LexerNoViableAltException) + this := new(LexerNoViableAltException) this.InitRecognitionException("", lexer, input, nil) this.startIndex = startIndex - this.deadEndConfigs = deadEndConfigs + this.deadEndConfigs = deadEndConfigs - return this + return this } func (this *LexerNoViableAltException) toString() string { - var symbol = "" - if (this.startIndex >= 0 && this.startIndex < this.input.size()) { - symbol = this.input.getTextFromInterval(NewInterval(this.startIndex,this.startIndex)) - } - return "LexerNoViableAltException" + symbol + var symbol = "" + if this.startIndex >= 0 && this.startIndex < this.input.size() { + symbol = this.input.getTextFromInterval(NewInterval(this.startIndex, this.startIndex)) + } + return "LexerNoViableAltException" + symbol } - type NoViableAltException struct { - RecognitionException - startToken *Token + startToken *Token offendingToken *Token - ctx IParserRuleContext + ctx IParserRuleContext deadEndConfigs *ATNConfigSet - } // Indicates that the parser could not decide which of two or more paths @@ -147,34 +137,34 @@ type NoViableAltException struct { // func NewNoViableAltException(recognizer IParser, input CharStream, startToken *Token, offendingToken *Token, deadEndConfigs *ATNConfigSet, ctx IParserRuleContext) *NoViableAltException { - if (ctx == nil){ + if ctx == nil { ctx = recognizer.getParserRuleContext() } - if (offendingToken == nil){ + if offendingToken == nil { offendingToken = recognizer.getCurrentToken() } - if (startToken == nil){ + if startToken == nil { startToken = recognizer.getCurrentToken() } - if (input == nil){ + if input == nil { input = recognizer.getInputStream() } this := new(NoViableAltException) this.InitRecognitionException("", recognizer, input, ctx) - // Which configurations did we try at input.index() that couldn't match + // Which configurations did we try at input.index() that couldn't match // input.LT(1)?// - this.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - this.startToken = startToken - this.offendingToken = offendingToken + this.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + this.startToken = startToken + this.offendingToken = offendingToken return this } @@ -203,13 +193,11 @@ func NewInputMismatchException(recognizer IParser) *InputMismatchException { // prediction. type FailedPredicateException struct { - RecognitionException - ruleIndex int + ruleIndex int predicateIndex int - predicate string - + predicate string } func NewFailedPredicateException(recognizer *Parser, predicate string, message string) *FailedPredicateException { @@ -218,38 +206,34 @@ func NewFailedPredicateException(recognizer *Parser, predicate string, message s this.InitRecognitionException(this.formatMessage(predicate, message), recognizer, recognizer.getInputStream(), recognizer._ctx) - var s = recognizer._interp.atn.states[recognizer.state] - var trans = s.getTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - this.ruleIndex = trans2.ruleIndex - this.predicateIndex = trans2.predIndex - } else { - this.ruleIndex = 0 - this.predicateIndex = 0 - } - this.predicate = predicate - this.offendingToken = recognizer.getCurrentToken() + var s = recognizer.Interpreter.atn.states[recognizer.state] + var trans = s.getTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + this.ruleIndex = trans2.ruleIndex + this.predicateIndex = trans2.predIndex + } else { + this.ruleIndex = 0 + this.predicateIndex = 0 + } + this.predicate = predicate + this.offendingToken = recognizer.getCurrentToken() - return this + return this } func (this *FailedPredicateException) formatMessage(predicate, message string) string { - if (message != "") { - return message - } else { - return "failed predicate: {" + predicate + "}?" - } + if message != "" { + return message + } else { + return "failed predicate: {" + predicate + "}?" + } } type ParseCancellationException struct { - } func NewParseCancellationException() *ParseCancellationException { -// Error.call(this) -// Error.captureStackTrace(this, ParseCancellationException) + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) return new(ParseCancellationException) } - - - diff --git a/runtime/Go/src/antlr4/FileStream.go b/runtime/Go/src/antlr4/FileStream.go index 6e6693cd2..85c26417e 100644 --- a/runtime/Go/src/antlr4/FileStream.go +++ b/runtime/Go/src/antlr4/FileStream.go @@ -2,8 +2,8 @@ package antlr4 import ( "bytes" - "os" "io" + "os" ) // @@ -25,7 +25,4 @@ func NewFileStream(fileName string) { io.Copy(buf, f) // Error handling elided for brevity. f.Close() - } - - diff --git a/runtime/Go/src/antlr4/InputStream.go b/runtime/Go/src/antlr4/InputStream.go index e3d0a9a7e..2059f6274 100644 --- a/runtime/Go/src/antlr4/InputStream.go +++ b/runtime/Go/src/antlr4/InputStream.go @@ -1,10 +1,10 @@ package antlr4 type InputStream struct { - name string - index int - data []rune - size int + name string + index int + data []rune + size int } func NewInputStream(data string) *InputStream { @@ -73,7 +73,7 @@ func (is *InputStream) getText(start int, stop int) string { if start >= is.size { return "" } else { - return string(is.data[start:stop+1]) + return string(is.data[start : stop+1]) } } diff --git a/runtime/Go/src/antlr4/IntStream.go b/runtime/Go/src/antlr4/IntStream.go index 637fb2eac..bfe1b37e5 100644 --- a/runtime/Go/src/antlr4/IntStream.go +++ b/runtime/Go/src/antlr4/IntStream.go @@ -1,7 +1,6 @@ package antlr4 type IntStream interface { - consume() LA(int) int mark() int @@ -10,5 +9,4 @@ type IntStream interface { seek(index int) size() int getSourceName() string - } diff --git a/runtime/Go/src/antlr4/IntervalSet.go b/runtime/Go/src/antlr4/IntervalSet.go index f6b5d3f36..68852b280 100644 --- a/runtime/Go/src/antlr4/IntervalSet.go +++ b/runtime/Go/src/antlr4/IntervalSet.go @@ -1,17 +1,17 @@ package antlr4 import ( - "strings" "strconv" + "strings" ) type Interval struct { start int - stop int + stop int } /* stop is not included! */ -func NewInterval(start, stop int) *Interval{ +func NewInterval(start, stop int) *Interval { i := new(Interval) i.start = start @@ -24,7 +24,7 @@ func (i *Interval) contains(item int) bool { } func (i *Interval) toString() string { - if(i.start==i.stop-1) { + if i.start == i.stop-1 { return strconv.Itoa(i.start) } else { return strconv.Itoa(i.start) + ".." + strconv.Itoa(i.stop-1) @@ -37,7 +37,7 @@ func (i *Interval) length() int { type IntervalSet struct { intervals []*Interval - readOnly bool + readOnly bool } func NewIntervalSet() *IntervalSet { @@ -51,7 +51,7 @@ func NewIntervalSet() *IntervalSet { } func (i *IntervalSet) first() int { - if (len(i.intervals)==0) { + if len(i.intervals) == 0 { return TokenInvalidType } else { return i.intervals[0].start @@ -59,30 +59,30 @@ func (i *IntervalSet) first() int { } func (i *IntervalSet) addOne(v int) { - i.addInterval(NewInterval(v, v + 1)) + i.addInterval(NewInterval(v, v+1)) } func (i *IntervalSet) addRange(l, h int) { - i.addInterval(NewInterval(l, h + 1)) + i.addInterval(NewInterval(l, h+1)) } func (is *IntervalSet) addInterval(v *Interval) { - if (is.intervals == nil) { + if is.intervals == nil { is.intervals = make([]*Interval, 0) - is.intervals = append( is.intervals, v ) + is.intervals = append(is.intervals, v) } else { // find insert pos for k := 0; k < len(is.intervals); k++ { var i = is.intervals[k] // distinct range -> insert - if (v.stop < i.start) { + if v.stop < i.start { // is.intervals = splice(k, 0, v) is.intervals = append(is.intervals[0:k], append([]*Interval{v}, is.intervals[k:]...)...) return - } else if (v.stop == i.start) { + } else if v.stop == i.start { is.intervals[k].start = v.start return - } else if (v.start <= i.stop) { + } else if v.start <= i.stop { is.intervals[k] = NewInterval(intMin(i.start, v.start), intMax(i.stop, v.stop)) is.reduce(k) return @@ -94,7 +94,7 @@ func (is *IntervalSet) addInterval(v *Interval) { } func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { - if (other.intervals != nil) { + if other.intervals != nil { for k := 0; k < len(other.intervals); k++ { var i2 = other.intervals[k] i.addInterval(NewInterval(i2.start, i2.stop)) @@ -105,35 +105,35 @@ func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { func (i *IntervalSet) reduce(k int) { // only need to reduce if k is not the last - if (k < len(i.intervals) - 1) { + if k < len(i.intervals)-1 { var l = i.intervals[k] - var r = i.intervals[k + 1] + var r = i.intervals[k+1] // if r contained in l - if (l.stop >= r.stop) { - i.intervals = i.intervals[0:len(i.intervals)-1] // pop(k + 1) + if l.stop >= r.stop { + i.intervals = i.intervals[0 : len(i.intervals)-1] // pop(k + 1) i.reduce(k) - } else if (l.stop >= r.start) { + } else if l.stop >= r.start { i.intervals[k] = NewInterval(l.start, r.stop) - i.intervals = i.intervals[0:len(i.intervals)-1] // i.intervals.pop(k + 1) + i.intervals = i.intervals[0 : len(i.intervals)-1] // i.intervals.pop(k + 1) } } } func (is *IntervalSet) complement(start int, stop int) *IntervalSet { - var result = NewIntervalSet() - result.addInterval(NewInterval(start,stop+1)) - for i := 0; i< len(is.intervals); i++ { - result.removeRange(is.intervals[i]) - } - return result + var result = NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for i := 0; i < len(is.intervals); i++ { + result.removeRange(is.intervals[i]) + } + return result } func (i *IntervalSet) contains(item int) bool { - if (i.intervals == nil) { + if i.intervals == nil { return false } else { for k := 0; k < len(i.intervals); k++ { - if(i.intervals[k].contains(item)) { + if i.intervals[k].contains(item) { return true } } @@ -144,7 +144,7 @@ func (i *IntervalSet) contains(item int) bool { func (is *IntervalSet) length() int { len := 0 - for _,v := range is.intervals { + for _, v := range is.intervals { len += v.length() } @@ -152,74 +152,74 @@ func (is *IntervalSet) length() int { } func (is *IntervalSet) removeRange(v *Interval) { - if v.start==v.stop-1 { - is.removeOne(v.start) - } else if (is.intervals!=nil) { - k:= 0 - for n :=0; ni.start && v.stop i.start && v.stop < i.stop { + is.intervals[k] = NewInterval(i.start, v.start) + var x = NewInterval(v.stop, i.stop) // is.intervals.splice(k, 0, x) is.intervals = append(is.intervals[0:k], append([]*Interval{x}, is.intervals[k:]...)...) - return - } else if(v.start<=i.start && v.stop>=i.stop) { -// is.intervals.splice(k, 1) + return + } else if v.start <= i.start && v.stop >= i.stop { + // is.intervals.splice(k, 1) is.intervals = append(is.intervals[0:k], is.intervals[k+1:]...) - k = k - 1 // need another pass - } else if(v.start") } else { names = append(names, ("'" + string(v.start) + "'")) } } else { - names = append(names, "'" + string(v.start) + "'..'" + string(v.stop-1) + "'") + names = append(names, "'"+string(v.start)+"'..'"+string(v.stop-1)+"'") } } - if (len(names) > 1) { + if len(names) > 1 { return "{" + strings.Join(names, ", ") + "}" } else { return names[0] } } - func (is *IntervalSet) toIndexString() string { var names = make([]string, 0) - for i := 0; i < len( is.intervals ); i++ { + for i := 0; i < len(is.intervals); i++ { var v = is.intervals[i] - if(v.stop==v.start+1) { - if ( v.start==TokenEOF ) { - names = append( names, "") + if v.stop == v.start+1 { + if v.start == TokenEOF { + names = append(names, "") } else { - names = append( names, string(v.start)) + names = append(names, string(v.start)) } } else { - names = append( names, string(v.start) + ".." + string(v.stop-1)) + names = append(names, string(v.start)+".."+string(v.stop-1)) } } - if (len(names) > 1) { + if len(names) > 1 { return "{" + strings.Join(names, ", ") + "}" } else { return names[0] } } - func (is *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { var names = make([]string, 0) - for i := 0; i < len( is.intervals ); i++ { + for i := 0; i < len(is.intervals); i++ { var v = is.intervals[i] for j := v.start; j < v.stop; j++ { names = append(names, is.elementName(literalNames, symbolicNames, j)) } } - if (len(names) > 1) { - return "{" + strings.Join(names,", ") + "}" + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" } else { return names[0] } } func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { - if (a == TokenEOF) { + if a == TokenEOF { return "" - } else if (a == TokenEpsilon) { + } else if a == TokenEpsilon { return "" } else { - if (literalNames[a] != ""){ + if literalNames[a] != "" { return literalNames[a] } else { return symbolicNames[a] } } } - - - diff --git a/runtime/Go/src/antlr4/LL1Analyzer.go b/runtime/Go/src/antlr4/LL1Analyzer.go index 56b96e539..a39805bb5 100644 --- a/runtime/Go/src/antlr4/LL1Analyzer.go +++ b/runtime/Go/src/antlr4/LL1Analyzer.go @@ -1,23 +1,22 @@ package antlr4 -import ( - ) +import () type LL1Analyzer struct { - atn *ATN + atn *ATN } -func NewLL1Analyzer (atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la } //* Special value added to the lookahead sets to indicate that we hit // a predicate during analysis if {@code seeThruPreds==false}. /// const ( - LL1AnalyzerHIT_PRED = TokenInvalidType + LL1AnalyzerHIT_PRED = TokenInvalidType ) //* @@ -30,23 +29,23 @@ const ( // @param s the ATN state // @return the expected symbols for each outgoing transition of {@code s}. func (la *LL1Analyzer) getDecisionLookahead(s IATNState) []*IntervalSet { - if (s == nil) { - return nil - } - var count = len(s.getTransitions()) - var look = make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - var lookBusy = NewSet(nil,nil) - var seeThruPreds = false // fail to get lookahead upon pred - la._LOOK(s.getTransitions()[alt].getTarget(), nil, PredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if (look[alt].length()==0 || look[alt].contains(LL1AnalyzerHIT_PRED)) { - look[alt] = nil - } - } - return look + if s == nil { + return nil + } + var count = len(s.getTransitions()) + var look = make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() + var lookBusy = NewSet(nil, nil) + var seeThruPreds = false // fail to get lookahead upon pred + la._LOOK(s.getTransitions()[alt].getTarget(), nil, PredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHIT_PRED) { + look[alt] = nil + } + } + return look } //* @@ -68,16 +67,16 @@ func (la *LL1Analyzer) getDecisionLookahead(s IATNState) []*IntervalSet { // specified {@code ctx}. /// func (la *LL1Analyzer) LOOK(s, stopState IATNState, ctx IRuleContext) *IntervalSet { - var r = NewIntervalSet() - var seeThruPreds = true // ignore preds get all lookahead - var lookContext IPredictionContext - if (ctx != nil){ - predictionContextFromRuleContext(s.getATN(), ctx) - } - la._LOOK(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) - return r + var r = NewIntervalSet() + var seeThruPreds = true // ignore preds get all lookahead + var lookContext IPredictionContext + if ctx != nil { + predictionContextFromRuleContext(s.getATN(), ctx) + } + la._LOOK(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) + return r } - + //* // Compute set of tokens that can follow {@code s} in the ATN in the // specified {@code ctx}. @@ -108,98 +107,97 @@ func (la *LL1Analyzer) LOOK(s, stopState IATNState, ctx IRuleContext) *IntervalS // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. - func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - c := NewATNConfig6(s, 0, ctx) + c := NewATNConfig6(s, 0, ctx) - if lookBusy.add(c) == nil { - return - } + if lookBusy.add(c) == nil { + return + } - if (s == stopState) { - if (ctx == nil) { - look.addOne(TokenEpsilon) - return - } else if (ctx.isEmpty() && addEOF) { - look.addOne(TokenEOF) - return - } - } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } - _,ok := s.(*RuleStopState) + _, ok := s.(*RuleStopState) - if ok { - if ( ctx==nil ) { - look.addOne(TokenEpsilon) - return - } else if (ctx.isEmpty() && addEOF) { - look.addOne(TokenEOF) - return - } + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } - if ( ctx != PredictionContextEMPTY ) { + if ctx != PredictionContextEMPTY { - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] -// System.out.println("popping back to "+retState) + returnState := la.atn.states[ctx.getReturnState(i)] + // System.out.println("popping back to "+retState) - removed := calledRuleStack.contains(returnState.getRuleIndex()) + removed := calledRuleStack.contains(returnState.getRuleIndex()) - // TODO this is incorrect - defer func(){ - if (removed) { - calledRuleStack.add(returnState.getRuleIndex()) - } - }() + // TODO this is incorrect + defer func() { + if removed { + calledRuleStack.add(returnState.getRuleIndex()) + } + }() - calledRuleStack.clear(returnState.getRuleIndex()) - la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + calledRuleStack.clear(returnState.getRuleIndex()) + la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } - return - } - } + } + return + } + } - n := len(s.getTransitions()) + n := len(s.getTransitions()) - for i:=0; i" - } else if (c == '\n') { + } else if c == '\n' { return "\\n" - } else if (c == '\t') { + } else if c == '\t' { return "\\t" - } else if (c == '\r') { + } else if c == '\r' { return "\\r" } else { return string(c) @@ -395,15 +394,13 @@ func (l *Lexer) getCharErrorDisplay(c rune) string { // to do sophisticated error recovery if you are in a fragment rule. // / func (l *Lexer) recover(re IRecognitionException) { - if (l._input.LA(1) != TokenEOF) { + if l._input.LA(1) != TokenEOF { if _, ok := re.(*LexerNoViableAltException); ok { // skip a char and try again - l._interp.consume(l._input) + l.Interpreter.consume(l._input) } else { // TODO: Do we lose character or line position information? l._input.consume() } } } - - diff --git a/runtime/Go/src/antlr4/LexerATNSimulator.go b/runtime/Go/src/antlr4/LexerATNSimulator.go index 7631bb23f..82a3fc05f 100644 --- a/runtime/Go/src/antlr4/LexerATNSimulator.go +++ b/runtime/Go/src/antlr4/LexerATNSimulator.go @@ -1,6 +1,7 @@ package antlr4 + import ( - "fmt" + "fmt" "strconv" ) @@ -28,9 +29,9 @@ func resetSimState(sim *SimState) { } type SimState struct { - index int - line int - column int + index int + line int + column int dfaState *DFAState } @@ -49,17 +50,16 @@ func (this *SimState) reset() { type LexerATNSimulator struct { ATNSimulator - recog *Lexer + recog *Lexer predictionMode int - decisionToDFA []*DFA - mergeCache DoubleDict - startIndex int - line int - column int - mode int - prevAccept *SimState - match_calls int - + decisionToDFA []*DFA + mergeCache DoubleDict + startIndex int + line int + column int + mode int + prevAccept *SimState + match_calls int } func NewLexerATNSimulator(recog *Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { @@ -109,14 +109,14 @@ func (this *LexerATNSimulator) match(input CharStream, mode int) int { this.mode = mode var mark = input.mark() - defer func(){ + defer func() { input.release(mark) }() this.startIndex = input.index() this.prevAccept.reset() var dfa = this.decisionToDFA[mode] - if (dfa.s0 == nil) { + if dfa.s0 == nil { return this.matchATN(input) } else { return this.execATN(input, dfa.s0) @@ -134,7 +134,7 @@ func (this *LexerATNSimulator) reset() { func (this *LexerATNSimulator) matchATN(input CharStream) int { var startState = this.atn.modeToStartState[this.mode] - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("matchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString()) } var old_mode = this.mode @@ -144,31 +144,31 @@ func (this *LexerATNSimulator) matchATN(input CharStream) int { var next = this.addDFAState(s0_closure.ATNConfigSet) - if (!suppressEdge) { + if !suppressEdge { this.decisionToDFA[this.mode].s0 = next } var predict = this.execATN(input, next) - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()) } return predict } func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("start state closure=" + ds0.configs.toString()) } - if (ds0.isAcceptState) { + if ds0.isAcceptState { // allow zero-length tokens this.captureSimState(this.prevAccept, input, ds0) } var t = input.LA(1) var s = ds0 // s is current/from DFA state - for (true) { // while more work - if (LexerATNSimulatordebug) { + for true { // while more work + if LexerATNSimulatordebug { fmt.Println("execATN loop starting closure: " + s.configs.toString()) } @@ -192,23 +192,23 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { // print("Target for:" + str(s) + " and:" + str(t)) var target = this.getExistingTargetState(s, t) // print("Existing:" + str(target)) - if (target == nil) { + if target == nil { target = this.computeTargetState(input, s, t) // print("Computed:" + str(target)) } - if (target == ATNSimulatorERROR) { + if target == ATNSimulatorERROR { break } // If this is a consumable input element, make sure to consume before // capturing the accept state so the input index, line, and char // position accurately reflect the state of the interpreter at the // end of the token. - if (t != TokenEOF) { + if t != TokenEOF { this.consume(input) } - if (target.isAcceptState) { + if target.isAcceptState { this.captureSimState(this.prevAccept, input, target) - if (t == TokenEOF) { + if t == TokenEOF { break } } @@ -228,15 +228,15 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { // {@code t}, or {@code nil} if the target state for this edge is not // already cached func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if (s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE) { + if s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE { return nil } - var target = s.edges[t - LexerATNSimulatorMIN_DFA_EDGE] - if(target==nil) { + var target = s.edges[t-LexerATNSimulatorMIN_DFA_EDGE] + if target == nil { target = nil } - if (LexerATNSimulatordebug && target != nil) { + if LexerATNSimulatordebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target @@ -258,8 +258,8 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, // Fill reach starting from closure, following t transitions this.getReachableConfigSet(input, s.configs, reach.ATNConfigSet, t) - if (len( reach.configs) == 0) { // we got nowhere on t from s - if (!reach.hasSemanticContext) { + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { // we got nowhere on t, don't panic out this knowledge it'd // cause a failover from DFA later. this.addDFAEdge(s, t, ATNSimulatorERROR, nil) @@ -272,14 +272,14 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, } func (this *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { - if (this.prevAccept.dfaState != nil) { + if this.prevAccept.dfaState != nil { var lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor this.accept(input, lexerActionExecutor, this.startIndex, - prevAccept.index, prevAccept.line, prevAccept.column) + prevAccept.index, prevAccept.line, prevAccept.column) return prevAccept.dfaState.prediction } else { // if no accept and EOF is first char, return EOF - if (t == TokenEOF && input.index() == this.startIndex) { + if t == TokenEOF && input.index() == this.startIndex { return TokenEOF } panic(NewLexerNoViableAltException(this.recog, input, this.startIndex, reach)) @@ -296,24 +296,24 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure * for i := 0; i < len(closure.configs); i++ { var cfg = closure.configs[i] var currentAltReachedAcceptState = (cfg.getAlt() == skipAlt) - if (currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision) { + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { continue } - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Printf("testing %s at %s\n", this.getTokenName(t), cfg.toString()) // this.recog, true)) } for j := 0; j < len(cfg.getState().getTransitions()); j++ { var trans = cfg.getState().getTransitions()[j] // for each transition var target = this.getReachableTarget(trans, t) - if (target != nil) { + if target != nil { var lexerActionExecutor = cfg.(*LexerATNConfig).lexerActionExecutor - if (lexerActionExecutor != nil) { + if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index() - this.startIndex) } var treatEofAsEpsilon = (t == TokenEOF) var config = NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if (this.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEofAsEpsilon)) { + if this.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEofAsEpsilon) { // any remaining configs for this alt have a lower priority // than the one that just reached an accept state. skipAlt = cfg.getAlt() @@ -324,32 +324,32 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure * } func (this *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("ACTION %s\n", lexerActionExecutor) } // seek to after last char in token input.seek(index) this.line = line this.column = charPos - if (lexerActionExecutor != nil && this.recog != nil) { + if lexerActionExecutor != nil && this.recog != nil { lexerActionExecutor.execute(this.recog, input, startIndex) } } func (this *LexerATNSimulator) getReachableTarget(trans ITransition, t int) IATNState { - if (trans.matches(t, 0, 0xFFFE)) { + if trans.matches(t, 0, 0xFFFE) { return trans.getTarget() } else { return nil } } -func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState ) *OrderedATNConfigSet { +func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState) *OrderedATNConfigSet { var configs = NewOrderedATNConfigSet() for i := 0; i < len(p.getTransitions()); i++ { var target = p.getTransitions()[i].getTarget() - var cfg = NewLexerATNConfig6(target, i+1, PredictionContextEMPTY) + var cfg = NewLexerATNConfig6(target, i+1, PredictionContextEMPTY) this.closure(input, cfg, configs.ATNConfigSet, false, false, false) } return configs @@ -366,21 +366,21 @@ func (this *LexerATNSimulator) computeStartState(input CharStream, p IATNState ) func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEofAsEpsilon bool) bool { - if (LexerATNSimulatordebug) { - fmt.Println("closure(" + config.toString() + ")") // config.toString(this.recog, true) + ")") + if LexerATNSimulatordebug { + fmt.Println("closure(" + config.toString() + ")") // config.toString(this.recog, true) + ")") } - _, ok :=config.state.(*RuleStopState) - if (ok) { - if (LexerATNSimulatordebug) { - if (this.recog != nil) { + _, ok := config.state.(*RuleStopState) + if ok { + if LexerATNSimulatordebug { + if this.recog != nil { fmt.Println("closure at %s rule stop %s\n", this.recog.getRuleNames()[config.state.getRuleIndex()], config) } else { fmt.Println("closure at rule stop %s\n", config) } } - if (config.context == nil || config.context.hasEmptyPath()) { - if (config.context == nil || config.context.isEmpty()) { + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { configs.add(config, nil) return true } else { @@ -388,9 +388,9 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, currentAltReachedAcceptState = true } } - if (config.context != nil && !config.context.isEmpty()) { + if config.context != nil && !config.context.isEmpty() { for i := 0; i < config.context.length(); i++ { - if (config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE) { + if config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE { var newContext = config.context.getParent(i) // "pop" return state var returnState = this.atn.states[config.context.getReturnState(i)] cfg := NewLexerATNConfig2(config, returnState, newContext) @@ -401,17 +401,17 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, return currentAltReachedAcceptState } // optimization - if (!config.state.getEpsilonOnlyTransitions()) { - if (!currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision) { + if !config.state.getEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { configs.add(config, nil) } } for j := 0; j < len(config.state.getTransitions()); j++ { var trans = config.state.getTransitions()[j] cfg := this.getEpsilonTarget(input, config, trans, configs, speculative, treatEofAsEpsilon) - if (cfg != nil) { + if cfg != nil { currentAltReachedAcceptState = this.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEofAsEpsilon) + currentAltReachedAcceptState, speculative, treatEofAsEpsilon) } } return currentAltReachedAcceptState @@ -419,19 +419,19 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, // side-effect: can alter configs.hasSemanticContext func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans ITransition, - configs *ATNConfigSet, speculative, treatEofAsEpsilon bool) *LexerATNConfig { + configs *ATNConfigSet, speculative, treatEofAsEpsilon bool) *LexerATNConfig { var cfg *LexerATNConfig - if (trans.getSerializationType() == TransitionRULE) { + if trans.getSerializationType() == TransitionRULE { rt := trans.(*RuleTransition) var newContext = SingletonPredictionContextcreate(config.context, rt.followState.getStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext ) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - } else if (trans.getSerializationType() == TransitionPRECEDENCE) { + } else if trans.getSerializationType() == TransitionPRECEDENCE { panic("Precedence predicates are not supported in lexers.") - } else if (trans.getSerializationType() == TransitionPREDICATE) { + } else if trans.getSerializationType() == TransitionPREDICATE { // Track traversing semantic predicates. If we traverse, // we cannot add a DFA state for this "reach" computation // because the DFA would not test the predicate again in the @@ -452,15 +452,15 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA pt := trans.(*PredicateTransition) - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } configs.hasSemanticContext = true - if (this.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative)) { + if this.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } - } else if (trans.getSerializationType() == TransitionACTION) { - if (config.context == nil || config.context.hasEmptyPath()) { + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { // execute actions anywhere in the start rule for a token. // // TODO: if the entry rule is invoked recursively, some @@ -479,13 +479,13 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA // ignore actions in referenced rules cfg = NewLexerATNConfig4(config, trans.getTarget()) } - } else if (trans.getSerializationType() == TransitionEPSILON) { + } else if trans.getSerializationType() == TransitionEPSILON { cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if (trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET) { - if (treatEofAsEpsilon) { - if (trans.matches(TokenEOF, 0, 0xFFFF)) { + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEofAsEpsilon { + if trans.matches(TokenEOF, 0, 0xFFFF) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } } @@ -513,12 +513,12 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA // @return {@code true} if the specified predicate evaluates to // {@code true}. // / -func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { +func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided - if (this.recog == nil) { + if this.recog == nil { return true } - if (!speculative) { + if !speculative { return this.recog.sempred(nil, ruleIndex, predIndex) } var savedcolumn = this.column @@ -526,7 +526,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, p var index = input.index() var marker = input.mark() - defer func(){ + defer func() { this.column = savedcolumn this.line = savedLine input.seek(index) @@ -544,8 +544,8 @@ func (this *LexerATNSimulator) captureSimState(settings *SimState, input CharStr settings.dfaState = dfaState } -func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState , cfgs *ATNConfigSet) *DFAState { - if (to == nil && cfgs != nil) { +func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { // leading to this call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes this edge // dependent on the specific input sequence, so the static edge in the @@ -562,23 +562,23 @@ func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState to = this.addDFAState(cfgs) - if (suppressEdge) { + if suppressEdge { return to } } // add the edge - if (tk < LexerATNSimulatorMIN_DFA_EDGE || tk > LexerATNSimulatorMAX_DFA_EDGE) { + if tk < LexerATNSimulatorMIN_DFA_EDGE || tk > LexerATNSimulatorMAX_DFA_EDGE { // Only track edges within the DFA bounds return to } - if (LexerATNSimulatordebug) { + if LexerATNSimulatordebug { fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + strconv.Itoa(tk)) } - if (from_.edges == nil) { + if from_.edges == nil { // make room for tokens 1..n and -1 masquerading as index 0 from_.edges = make([]*DFAState, LexerATNSimulatorMAX_DFA_EDGE-LexerATNSimulatorMIN_DFA_EDGE+1) } - from_.edges[tk - LexerATNSimulatorMIN_DFA_EDGE] = to // connect + from_.edges[tk-LexerATNSimulatorMIN_DFA_EDGE] = to // connect return to } @@ -597,12 +597,12 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState { _, ok := cfg.getState().(*RuleStopState) - if (ok) { + if ok { firstConfigWithRuleStopState = cfg break } } - if (firstConfigWithRuleStopState != nil) { + if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.getState().getRuleIndex()] @@ -610,7 +610,7 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState { var hash = proposed.hashString() var dfa = this.decisionToDFA[this.mode] var existing = dfa.getStates()[hash] - if (existing != nil) { + if existing != nil { return existing } var newState = proposed @@ -628,12 +628,12 @@ func (this *LexerATNSimulator) getDFA(mode int) *DFA { // Get the text matched so far for the current token. func (this *LexerATNSimulator) getText(input CharStream) string { // index is first lookahead char, don't include. - return input.getTextFromInterval(NewInterval(this.startIndex, input.index() - 1)) + return input.getTextFromInterval(NewInterval(this.startIndex, input.index()-1)) } func (this *LexerATNSimulator) consume(input CharStream) { var curChar = input.LA(1) - if (curChar == int('\n')) { + if curChar == int('\n') { this.line += 1 this.column = 0 } else { @@ -643,11 +643,9 @@ func (this *LexerATNSimulator) consume(input CharStream) { } func (this *LexerATNSimulator) getTokenName(tt int) string { - if (tt == -1) { + if tt == -1 { return "EOF" } else { return "'" + string(tt) + "'" } } - - diff --git a/runtime/Go/src/antlr4/LexerAction.go b/runtime/Go/src/antlr4/LexerAction.go index e989306fb..f52388337 100644 --- a/runtime/Go/src/antlr4/LexerAction.go +++ b/runtime/Go/src/antlr4/LexerAction.go @@ -1,59 +1,60 @@ package antlr4 + import "strconv" const ( - LexerActionTypeCHANNEL = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCUSTOM = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMODE = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMORE = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePOP_MODE = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePUSH_MODE = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSKIP = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeTYPE = 7 //The type of a {@link LexerTypeAction} action. + LexerActionTypeCHANNEL = 0 //The type of a {@link LexerChannelAction} action. + LexerActionTypeCUSTOM = 1 //The type of a {@link LexerCustomAction} action. + LexerActionTypeMODE = 2 //The type of a {@link LexerModeAction} action. + LexerActionTypeMORE = 3 //The type of a {@link LexerMoreAction} action. + LexerActionTypePOP_MODE = 4 //The type of a {@link LexerPopModeAction} action. + LexerActionTypePUSH_MODE = 5 //The type of a {@link LexerPushModeAction} action. + LexerActionTypeSKIP = 6 //The type of a {@link LexerSkipAction} action. + LexerActionTypeTYPE = 7 //The type of a {@link LexerTypeAction} action. ) type ILexerAction interface { - getActionType() int - getIsPositionDependent() bool - execute(lexer ILexer) - hashString() string - equals(other ILexerAction) bool + getActionType() int + getIsPositionDependent() bool + execute(lexer ILexer) + hashString() string + equals(other ILexerAction) bool } -type LexerAction struct { - actionType int - isPositionDependent bool +type LexerAction struct { + actionType int + isPositionDependent bool } func NewLexerAction(action int) *LexerAction { - la := new(LexerAction) - la.InitLexerAction(action) - return la + la := new(LexerAction) + la.InitLexerAction(action) + return la } -func (la *LexerAction) InitLexerAction(action int){ - la.actionType = action - la.isPositionDependent = false +func (la *LexerAction) InitLexerAction(action int) { + la.actionType = action + la.isPositionDependent = false } func (this *LexerAction) execute(lexer ILexer) { - panic("Not implemented") + panic("Not implemented") } func (this *LexerAction) getActionType() int { - return this.actionType + return this.actionType } func (this *LexerAction) getIsPositionDependent() bool { - return this.isPositionDependent + return this.isPositionDependent } func (this *LexerAction) hashString() string { - return strconv.Itoa(this.actionType) + return strconv.Itoa(this.actionType) } func (this *LexerAction) equals(other ILexerAction) bool { - return this == other + return this == other } // @@ -62,12 +63,12 @@ func (this *LexerAction) equals(other ILexerAction) bool { //

The {@code skip} command does not have any parameters, so this action is // implemented as a singleton instance exposed by {@link //INSTANCE}.

type LexerSkipAction struct { - *LexerAction + *LexerAction } func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.InitLexerAction(LexerActionTypeSKIP) + la := new(LexerSkipAction) + la.InitLexerAction(LexerActionTypeSKIP) return la } @@ -75,7 +76,7 @@ func NewLexerSkipAction() *LexerSkipAction { var LexerSkipActionINSTANCE = NewLexerSkipAction() func (this *LexerSkipAction) execute(lexer ILexer) { - lexer.skip() + lexer.skip() } func (this *LexerSkipAction) toString() string { @@ -87,18 +88,18 @@ func (this *LexerSkipAction) toString() string { type LexerTypeAction struct { *LexerAction - _type int + _type int } func NewLexerTypeAction(_type int) *LexerTypeAction { this := new(LexerTypeAction) - this.InitLexerAction( LexerActionTypeTYPE ) + this.InitLexerAction(LexerActionTypeTYPE) this._type = _type return this } func (this *LexerTypeAction) execute(lexer ILexer) { - lexer.setType( this._type ) + lexer.setType(this._type) } func (this *LexerTypeAction) hashString() string { @@ -106,17 +107,17 @@ func (this *LexerTypeAction) hashString() string { } func (this *LexerTypeAction) equals(other ILexerAction) bool { - if(this == other) { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return this._type == other.(*LexerTypeAction)._type - } + if this == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return this._type == other.(*LexerTypeAction)._type + } } func (this *LexerTypeAction) toString() string { - return "actionType(" + strconv.Itoa(this._type) + ")" + return "actionType(" + strconv.Itoa(this._type) + ")" } // Implements the {@code pushMode} lexer action by calling @@ -124,36 +125,36 @@ func (this *LexerTypeAction) toString() string { type LexerPushModeAction struct { *LexerAction - mode int + mode int } func NewLexerPushModeAction(mode int) *LexerPushModeAction { - this := new(LexerPushModeAction) - this.InitLexerAction( LexerActionTypePUSH_MODE ) + this := new(LexerPushModeAction) + this.InitLexerAction(LexerActionTypePUSH_MODE) - this.mode = mode - return this + this.mode = mode + return this } //

This action is implemented by calling {@link Lexer//pushMode} with the // value provided by {@link //getMode}.

func (this *LexerPushModeAction) execute(lexer ILexer) { - lexer.pushMode(this.mode) + lexer.pushMode(this.mode) } func (this *LexerPushModeAction) hashString() string { - return strconv.Itoa(this.actionType) + strconv.Itoa(this.mode) + return strconv.Itoa(this.actionType) + strconv.Itoa(this.mode) } func (this *LexerPushModeAction) equals(other ILexerAction) bool { - if (this == other) { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return this.mode == other.(*LexerPushModeAction).mode - } + if this == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return this.mode == other.(*LexerPushModeAction).mode + } } func (this *LexerPushModeAction) toString() string { @@ -165,14 +166,14 @@ func (this *LexerPushModeAction) toString() string { //

The {@code popMode} command does not have any parameters, so this action is // implemented as a singleton instance exposed by {@link //INSTANCE}.

type LexerPopModeAction struct { - *LexerAction + *LexerAction } func NewLexerPopModeAction() *LexerPopModeAction { - this := new(LexerPopModeAction) + this := new(LexerPopModeAction) - this.InitLexerAction( LexerActionTypePOP_MODE ) + this.InitLexerAction(LexerActionTypePOP_MODE) return this } @@ -181,7 +182,7 @@ var LexerPopModeActionINSTANCE = NewLexerPopModeAction() //

This action is implemented by calling {@link Lexer//popMode}.

func (this *LexerPopModeAction) execute(lexer ILexer) { - lexer.popMode() + lexer.popMode() } func (this *LexerPopModeAction) toString() string { @@ -194,12 +195,12 @@ func (this *LexerPopModeAction) toString() string { // implemented as a singleton instance exposed by {@link //INSTANCE}.

type LexerMoreAction struct { - *LexerAction + *LexerAction } func NewLexerMoreAction() *LexerModeAction { - this := new(LexerModeAction) - this.InitLexerAction( LexerActionTypeMORE ) + this := new(LexerModeAction) + this.InitLexerAction(LexerActionTypeMORE) return this } @@ -208,33 +209,32 @@ var LexerMoreActionINSTANCE = NewLexerMoreAction() //

This action is implemented by calling {@link Lexer//popMode}.

func (this *LexerMoreAction) execute(lexer ILexer) { - lexer.more() + lexer.more() } func (this *LexerMoreAction) toString() string { - return "more" + return "more" } - // Implements the {@code mode} lexer action by calling {@link Lexer//mode} with // the assigned mode. type LexerModeAction struct { *LexerAction - mode int + mode int } func NewLexerModeAction(mode int) *LexerModeAction { this := new(LexerModeAction) - this.InitLexerAction( LexerActionTypeMODE ) - this.mode = mode - return this + this.InitLexerAction(LexerActionTypeMODE) + this.mode = mode + return this } //

This action is implemented by calling {@link Lexer//mode} with the // value provided by {@link //getMode}.

func (this *LexerModeAction) execute(lexer ILexer) { - lexer.mode(this.mode) + lexer.mode(this.mode) } func (this *LexerModeAction) hashString() string { @@ -242,17 +242,17 @@ func (this *LexerModeAction) hashString() string { } func (this *LexerModeAction) equals(other ILexerAction) bool { - if (this == other) { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return this.mode == other.(*LexerModeAction).mode - } + if this == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return this.mode == other.(*LexerModeAction).mode + } } func (this *LexerModeAction) toString() string { - return "mode(" + strconv.Itoa(this.mode) + ")" + return "mode(" + strconv.Itoa(this.mode) + ")" } // Executes a custom lexer action by calling {@link Recognizer//action} with the @@ -274,36 +274,36 @@ func (this *LexerModeAction) toString() string { type LexerCustomAction struct { *LexerAction - ruleIndex, actionIndex int + ruleIndex, actionIndex int } func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { this := new(LexerCustomAction) - this.InitLexerAction( LexerActionTypeCUSTOM ) - this.ruleIndex = ruleIndex - this.actionIndex = actionIndex - this.isPositionDependent = true - return this + this.InitLexerAction(LexerActionTypeCUSTOM) + this.ruleIndex = ruleIndex + this.actionIndex = actionIndex + this.isPositionDependent = true + return this } //

Custom actions are implemented by calling {@link Lexer//action} with the // appropriate rule and action indexes.

func (this *LexerCustomAction) execute(lexer ILexer) { - lexer.action(nil, this.ruleIndex, this.actionIndex) + lexer.action(nil, this.ruleIndex, this.actionIndex) } func (this *LexerCustomAction) hashString() string { - return strconv.Itoa(this.actionType) + strconv.Itoa(this.ruleIndex) + strconv.Itoa(this.actionIndex) + return strconv.Itoa(this.actionType) + strconv.Itoa(this.ruleIndex) + strconv.Itoa(this.actionIndex) } func (this *LexerCustomAction) equals(other ILexerAction) bool { - if (this == other) { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return this.ruleIndex == other.(*LexerCustomAction).ruleIndex && this.actionIndex == other.(*LexerCustomAction).actionIndex - } + if this == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return this.ruleIndex == other.(*LexerCustomAction).ruleIndex && this.actionIndex == other.(*LexerCustomAction).actionIndex + } } // Implements the {@code channel} lexer action by calling @@ -313,38 +313,38 @@ func (this *LexerCustomAction) equals(other ILexerAction) bool { type LexerChannelAction struct { *LexerAction - channel int + channel int } func NewLexerChannelAction(channel int) *LexerChannelAction { this := new(LexerChannelAction) - this.InitLexerAction( LexerActionTypeCHANNEL ) - this.channel = channel - return this + this.InitLexerAction(LexerActionTypeCHANNEL) + this.channel = channel + return this } //

This action is implemented by calling {@link Lexer//setChannel} with the // value provided by {@link //getChannel}.

func (this *LexerChannelAction) execute(lexer ILexer) { - lexer.setChannel(this.channel) + lexer.setChannel(this.channel) } func (this *LexerChannelAction) hashString() string { - return strconv.Itoa(this.actionType) + strconv.Itoa(this.channel) + return strconv.Itoa(this.actionType) + strconv.Itoa(this.channel) } func (this *LexerChannelAction) equals(other ILexerAction) bool { - if (this == other) { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return this.channel == other.(*LexerChannelAction).channel - } + if this == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return this.channel == other.(*LexerChannelAction).channel + } } func (this *LexerChannelAction) toString() string { - return "channel(" + strconv.Itoa(this.channel) + ")" + return "channel(" + strconv.Itoa(this.channel) + ")" } // This implementation of {@link LexerAction} is used for tracking input offsets @@ -370,51 +370,40 @@ func (this *LexerChannelAction) toString() string { type LexerIndexedCustomAction struct { *LexerAction - offset int - lexerAction ILexerAction - isPositionDependent bool + offset int + lexerAction ILexerAction + isPositionDependent bool } func NewLexerIndexedCustomAction(offset int, lexerAction ILexerAction) *LexerIndexedCustomAction { - this := new(LexerIndexedCustomAction) - this.InitLexerAction( lexerAction.getActionType() ) + this := new(LexerIndexedCustomAction) + this.InitLexerAction(lexerAction.getActionType()) - this.offset = offset - this.lexerAction = lexerAction - this.isPositionDependent = true + this.offset = offset + this.lexerAction = lexerAction + this.isPositionDependent = true - return this + return this } //

This method calls {@link //execute} on the result of {@link //getAction} // using the provided {@code lexer}.

func (this *LexerIndexedCustomAction) execute(lexer ILexer) { - // assume the input stream position was properly set by the calling code - this.lexerAction.execute(lexer) + // assume the input stream position was properly set by the calling code + this.lexerAction.execute(lexer) } func (this *LexerIndexedCustomAction) hashString() string { - return strconv.Itoa(this.actionType) + strconv.Itoa(this.offset) + this.lexerAction.hashString() + return strconv.Itoa(this.actionType) + strconv.Itoa(this.offset) + this.lexerAction.hashString() } func (this *LexerIndexedCustomAction) equals(other ILexerAction) bool { - if (this == other) { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return this.offset == other.(*LexerIndexedCustomAction).offset && this.lexerAction == other.(*LexerIndexedCustomAction).lexerAction - } + if this == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return this.offset == other.(*LexerIndexedCustomAction).offset && this.lexerAction == other.(*LexerIndexedCustomAction).lexerAction + } } - - - - - - - - - - - diff --git a/runtime/Go/src/antlr4/LexerActionExecutor.go b/runtime/Go/src/antlr4/LexerActionExecutor.go index acc57af01..4fe8486d7 100644 --- a/runtime/Go/src/antlr4/LexerActionExecutor.go +++ b/runtime/Go/src/antlr4/LexerActionExecutor.go @@ -8,13 +8,13 @@ package antlr4 // not cause bloating of the {@link DFA} created for the lexer.

type LexerActionExecutor struct { - lexerActions []ILexerAction + lexerActions []ILexerAction cachedHashString string } func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor { - if (lexerActions == nil){ + if lexerActions == nil { lexerActions = make([]ILexerAction, 0) } @@ -49,13 +49,13 @@ func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor { // @return A {@link LexerActionExecutor} for executing the combine actions // of {@code lexerActionExecutor} and {@code lexerAction}. func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction ILexerAction) *LexerActionExecutor { - if (lexerActionExecutor == nil) { + if lexerActionExecutor == nil { return NewLexerActionExecutor([]ILexerAction{lexerAction}) } - var lexerActions = append(lexerActionExecutor.lexerActions, lexerAction ) + var lexerActions = append(lexerActionExecutor.lexerActions, lexerAction) -// var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]) + // var lexerActions = lexerActionExecutor.lexerActions.concat([ lexerAction ]) return NewLexerActionExecutor(lexerActions) } @@ -91,11 +91,11 @@ func (this *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionEx var updatedLexerActions []ILexerAction = nil for i := 0; i < len(this.lexerActions); i++ { _, ok := this.lexerActions[i].(*LexerIndexedCustomAction) - if (this.lexerActions[i].getIsPositionDependent() && !ok){ - if (updatedLexerActions == nil) { - updatedLexerActions = make([]ILexerAction,0) + if this.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]ILexerAction, 0) - for _,a:= range this.lexerActions { + for _, a := range this.lexerActions { updatedLexerActions = append(updatedLexerActions, a) } } @@ -103,7 +103,7 @@ func (this *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionEx updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, this.lexerActions[i]) } } - if (updatedLexerActions == nil) { + if updatedLexerActions == nil { return this } else { return NewLexerActionExecutor(updatedLexerActions) @@ -132,8 +132,8 @@ func (this *LexerActionExecutor) execute(lexer *Lexer, input CharStream, startIn var requiresSeek = false var stopIndex = input.index() - defer func(){ - if (requiresSeek) { + defer func() { + if requiresSeek { input.seek(stopIndex) } }() @@ -145,7 +145,7 @@ func (this *LexerActionExecutor) execute(lexer *Lexer, input CharStream, startIn input.seek(startIndex + offset) lexerAction = la.lexerAction requiresSeek = (startIndex + offset) != stopIndex - } else if (lexerAction.getIsPositionDependent()) { + } else if lexerAction.getIsPositionDependent() { input.seek(stopIndex) requiresSeek = false } @@ -158,14 +158,12 @@ func (this *LexerActionExecutor) hashString() string { } func (this *LexerActionExecutor) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*LexerActionExecutor); !ok { return false } else { return this.cachedHashString == other.(*LexerActionExecutor).cachedHashString && - &this.lexerActions == &other.(*LexerActionExecutor).lexerActions + &this.lexerActions == &other.(*LexerActionExecutor).lexerActions } } - - diff --git a/runtime/Go/src/antlr4/Parser.go b/runtime/Go/src/antlr4/Parser.go index bd4b25093..42a101977 100644 --- a/runtime/Go/src/antlr4/Parser.go +++ b/runtime/Go/src/antlr4/Parser.go @@ -2,7 +2,7 @@ package antlr4 import ( "fmt" - ) +) type TraceListener struct { parser *Parser @@ -10,7 +10,7 @@ type TraceListener struct { func NewTraceListener(parser *Parser) *TraceListener { tl := new(TraceListener) - tl.parser = parser + tl.parser = parser return tl } @@ -21,7 +21,7 @@ func (this *TraceListener) enterEveryRule(ctx IParserRuleContext) { fmt.Println("enter " + this.parser.getRuleNames()[ctx.getRuleIndex()] + ", LT(1)=" + this.parser._input.LT(1).text()) } -func (this *TraceListener) visitTerminal( node TerminalNode ) { +func (this *TraceListener) visitTerminal(node TerminalNode) { fmt.Println("consume " + fmt.Sprint(node.getSymbol()) + " rule " + this.parser.getRuleNames()[this.parser._ctx.getRuleIndex()]) } @@ -46,23 +46,23 @@ type IParser interface { isExpectedToken(symbol int) bool getPrecedence() int getRuleInvocationStack(IParserRuleContext) []string - } type Parser struct { *Recognizer - _input TokenStream - _errHandler IErrorStrategy - _precedenceStack IntStack - _ctx IParserRuleContext - buildParseTrees bool - _tracer *TraceListener - _parseListeners []ParseTreeListener - _syntaxErrors int - _interp *ParserATNSimulator + Interpreter *ParserATNSimulator - literalNames []string + _input TokenStream + _errHandler IErrorStrategy + _precedenceStack IntStack + _ctx IParserRuleContext + buildParseTrees bool + _tracer *TraceListener + _parseListeners []ParseTreeListener + _syntaxErrors int + + literalNames []string symbolicNames []string } @@ -72,6 +72,13 @@ func NewParser(input TokenStream) *Parser { p := new(Parser) + p.InitParser(input) + + return p +} + +func (p *Parser) InitParser(input TokenStream) { + p.InitRecognizer() // The input stream. @@ -100,8 +107,6 @@ func NewParser(input TokenStream) *Parser { // incremented each time {@link //notifyErrorListeners} is called. p._syntaxErrors = 0 p.setInputStream(input) - - return p } // p.field maps from the serialized ATN string to the deserialized {@link @@ -114,7 +119,7 @@ var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// func (p *Parser) reset() { - if (p._input != nil) { + if p._input != nil { p._input.seek(0) } p._errHandler.reset(p) @@ -123,8 +128,8 @@ func (p *Parser) reset() { p.setTrace(nil) p._precedenceStack = make([]int, 0) p._precedenceStack.Push(0) - if (p._interp != nil) { - p._interp.reset() + if p.Interpreter != nil { + p.Interpreter.reset() } } @@ -147,12 +152,12 @@ func (p *Parser) reset() { func (p *Parser) match(ttype int) *Token { var t = p.getCurrentToken() - if (t.tokenType == ttype) { + if t.tokenType == ttype { p._errHandler.reportMatch(p) p.consume() } else { t = p._errHandler.recoverInline(p) - if (p.buildParseTrees && t.tokenIndex == -1) { + if p.buildParseTrees && t.tokenIndex == -1 { // we must have conjured up a Newtoken during single token // insertion // if it's not the current symbol @@ -161,6 +166,7 @@ func (p *Parser) match(ttype int) *Token { } return t } + // Match current input symbol as a wildcard. If the symbol type matches // (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} // and {@link //consume} are called to complete the match process. @@ -179,12 +185,12 @@ func (p *Parser) match(ttype int) *Token { func (p *Parser) matchWildcard() *Token { var t = p.getCurrentToken() - if (t.tokenType > 0) { + if t.tokenType > 0 { p._errHandler.reportMatch(p) p.consume() } else { t = p._errHandler.recoverInline(p) - if (p.buildParseTrees && t.tokenIndex == -1) { + if p.buildParseTrees && t.tokenIndex == -1 { // we must have conjured up a Newtoken during single token // insertion // if it's not the current symbol @@ -199,8 +205,8 @@ func (p *Parser) getParserRuleContext() IParserRuleContext { } func (p *Parser) getParseListeners() []ParseTreeListener { - if (p._parseListeners == nil){ - return make([]ParseTreeListener,0) + if p._parseListeners == nil { + return make([]ParseTreeListener, 0) } return p._parseListeners } @@ -234,10 +240,10 @@ func (p *Parser) getParseListeners() []ParseTreeListener { // @panics nilPointerException if {@code} listener is {@code nil} // func (p *Parser) addParseListener(listener ParseTreeListener) { - if (listener == nil) { + if listener == nil { panic("listener") } - if (p._parseListeners == nil) { + if p._parseListeners == nil { p._parseListeners = make([]ParseTreeListener, 0) } p._parseListeners = append(p._parseListeners, listener) @@ -252,15 +258,15 @@ func (p *Parser) addParseListener(listener ParseTreeListener) { // func (p *Parser) removeParseListener(listener ParseTreeListener) { panic("Not implemented!") -// if (p._parseListeners != nil) { -// var idx = p._parseListeners.indexOf(listener) -// if (idx >= 0) { -// p._parseListeners.splice(idx, 1) -// } -// if (len(p._parseListeners) == 0) { -// p._parseListeners = nil -// } -// } + // if (p._parseListeners != nil) { + // var idx = p._parseListeners.indexOf(listener) + // if (idx >= 0) { + // p._parseListeners.splice(idx, 1) + // } + // if (len(p._parseListeners) == 0) { + // p._parseListeners = nil + // } + // } } // Remove all parse listeners. @@ -270,9 +276,9 @@ func (p *Parser) removeParseListeners() { // Notify any parse listeners of an enter rule event. func (p *Parser) triggerEnterRuleEvent() { - if (p._parseListeners != nil) { - var ctx = p._ctx - for _,listener := range p._parseListeners { + if p._parseListeners != nil { + var ctx = p._ctx + for _, listener := range p._parseListeners { listener.enterEveryRule(ctx) ctx.enterRule(listener) } @@ -285,9 +291,9 @@ func (p *Parser) triggerEnterRuleEvent() { // @see //addParseListener // func (p *Parser) triggerExitRuleEvent() { - if (p._parseListeners != nil) { + if p._parseListeners != nil { // reverse order walk of listeners - ctx := p._ctx + ctx := p._ctx l := len(p._parseListeners) - 1 for i := range p._parseListeners { @@ -307,11 +313,11 @@ func (this *Parser) getSymbolicNames() []string { } func (this *Parser) getInterpreter() *ParserATNSimulator { - return this._interp + return this.Interpreter } func (this *Parser) getATN() *ATN { - return this._interp.atn + return this.Interpreter.atn } func (p *Parser) getTokenFactory() TokenFactory { @@ -320,7 +326,7 @@ func (p *Parser) getTokenFactory() TokenFactory { // Tell our token source and error strategy about a Newway to create tokens.// func (p *Parser) setTokenFactory(factory TokenFactory) { - p._input.getTokenSource().setTokenFactory( factory ) + p._input.getTokenSource().setTokenFactory(factory) } // The ATN with bypass alternatives is expensive to create so we create it @@ -334,18 +340,18 @@ func (p *Parser) getATNWithBypassAlts() { // TODO panic("Not implemented!") -// var serializedAtn = p.getSerializedATN() -// if (serializedAtn == nil) { -// panic("The current parser does not support an ATN with bypass alternatives.") -// } -// var result = p.bypassAltsAtnCache[serializedAtn] -// if (result == nil) { -// var deserializationOptions = NewATNDeserializationOptions(nil) -// deserializationOptions.generateRuleBypassTransitions = true -// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) -// p.bypassAltsAtnCache[serializedAtn] = result -// } -// return result + // var serializedAtn = p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // var result = p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // var deserializationOptions = NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result } // The preferred method of getting a tree pattern. For example, here's a @@ -362,21 +368,21 @@ func (p *Parser) getATNWithBypassAlts() { func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer) { panic("NewParseTreePatternMatcher not implemented!") -// -// if (lexer == nil) { -// if (p.getTokenStream() != nil) { -// var tokenSource = p.getTokenStream().getTokenSource() -// if _, ok := tokenSource.(ILexer); ok { -// lexer = tokenSource -// } -// } -// } -// if (lexer == nil) { -// panic("Parser can't discover a lexer to use") -// } + // + // if (lexer == nil) { + // if (p.getTokenStream() != nil) { + // var tokenSource = p.getTokenStream().getTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } -// var m = NewParseTreePatternMatcher(lexer, p) -// return m.compile(pattern, patternRuleIndex) + // var m = NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) } func (p *Parser) getInputStream() CharStream { @@ -406,7 +412,7 @@ func (p *Parser) getCurrentToken() *Token { } func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRecognitionException) { - if (offendingToken == nil) { + if offendingToken == nil { offendingToken = p.getCurrentToken() } p._syntaxErrors += 1 @@ -418,28 +424,28 @@ func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRe func (p *Parser) consume() *Token { var o = p.getCurrentToken() - if (o.tokenType != TokenEOF) { + if o.tokenType != TokenEOF { p.getInputStream().consume() } var hasListener = p._parseListeners != nil && len(p._parseListeners) > 0 - if (p.buildParseTrees || hasListener) { - if (p._errHandler.inErrorRecoveryMode(p)) { + if p.buildParseTrees || hasListener { + if p._errHandler.inErrorRecoveryMode(p) { var node = p._ctx.addErrorNode(o) - if (p._parseListeners != nil) { + if p._parseListeners != nil { for _, l := range p._parseListeners { - l.visitErrorNode(node); + l.visitErrorNode(node) } } } else { - node := p._ctx.addTokenNode(o); - if (p._parseListeners != nil) { + node := p._ctx.addTokenNode(o) + if p._parseListeners != nil { for _, l := range p._parseListeners { l.visitTerminal(node) } } } -// node.invokingState = p.state + // node.invokingState = p.state } return o @@ -447,27 +453,27 @@ func (p *Parser) consume() *Token { func (p *Parser) addContextToParseTree() { // add current context to parent if we have a parent - if (p._ctx.getParent() != nil) { - p._ctx.getParent().setChildren( append(p._ctx.getParent().getChildren(), p._ctx) ) + if p._ctx.getParent() != nil { + p._ctx.getParent().setChildren(append(p._ctx.getParent().getChildren(), p._ctx)) } } func (p *Parser) enterRule(localctx IParserRuleContext, state, ruleIndex int) { p.state = state p._ctx = localctx - p._ctx.setStart( p._input.LT(1) ) - if (p.buildParseTrees) { + p._ctx.setStart(p._input.LT(1)) + if p.buildParseTrees { p.addContextToParseTree() } - if (p._parseListeners != nil) { + if p._parseListeners != nil { p.triggerEnterRuleEvent() } } func (p *Parser) exitRule() { - p._ctx.setStop( p._input.LT(-1) ) + p._ctx.setStop(p._input.LT(-1)) // trigger event on _ctx, before it reverts to parent - if (p._parseListeners != nil) { + if p._parseListeners != nil { p.triggerExitRuleEvent() } p.state = p._ctx.getInvokingState() @@ -477,8 +483,8 @@ func (p *Parser) exitRule() { func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) { // if we have Newlocalctx, make sure we replace existing ctx // that is previous child of parse tree - if (p.buildParseTrees && p._ctx != localctx) { - if (p._ctx.getParent() != nil) { + if p.buildParseTrees && p._ctx != localctx { + if p._ctx.getParent() != nil { p._ctx.getParent().(IParserRuleContext).removeLastChild() p._ctx.getParent().(IParserRuleContext).addChild(localctx) } @@ -492,10 +498,10 @@ func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) { // the parser context is not nested within a precedence rule. func (p *Parser) getPrecedence() int { - if ( len(p._precedenceStack) == 0) { + if len(p._precedenceStack) == 0 { return -1 } else { - return p._precedenceStack[ len(p._precedenceStack) -1] + return p._precedenceStack[len(p._precedenceStack)-1] } } @@ -503,10 +509,10 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde p.state = state p._precedenceStack.Push(precedence) p._ctx = localctx - p._ctx.setStart( p._input.LT(1) ) - if (p._parseListeners != nil) { + p._ctx.setStart(p._input.LT(1)) + if p._parseListeners != nil { p.triggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules + // left-recursive rules } } @@ -515,28 +521,28 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, ruleIndex int) { var previous = p._ctx - previous.setParent( localctx ) - previous.setInvokingState( state ) - previous.setStart( p._input.LT(-1) ) + previous.setParent(localctx) + previous.setInvokingState(state) + previous.setStart(p._input.LT(-1)) p._ctx = localctx - p._ctx.setStart( previous.getStart() ) - if (p.buildParseTrees) { + p._ctx.setStart(previous.getStart()) + if p.buildParseTrees { p._ctx.addChild(previous) } - if (p._parseListeners != nil) { + if p._parseListeners != nil { p.triggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules + // left-recursive rules } } func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) { p._precedenceStack.Pop() - p._ctx.setStop( p._input.LT(-1) ) + p._ctx.setStop(p._input.LT(-1)) var retCtx = p._ctx // save current ctx (return value) // unroll so _ctx is as it was before call to recursive method - if (p._parseListeners != nil) { - for (p._ctx != parentCtx) { + if p._parseListeners != nil { + for p._ctx != parentCtx { p.triggerExitRuleEvent() p._ctx = p._ctx.getParent().(IParserRuleContext) } @@ -544,8 +550,8 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) { p._ctx = parentCtx } // hook into tree - retCtx.setParent( parentCtx ) - if (p.buildParseTrees && parentCtx != nil) { + retCtx.setParent(parentCtx) + if p.buildParseTrees && parentCtx != nil { // add return ctx into invoking rule's tree parentCtx.addChild(retCtx) } @@ -553,8 +559,8 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) { func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext { var ctx = p._ctx - for (ctx != nil) { - if (ctx.getRuleIndex() == ruleIndex) { + for ctx != nil { + if ctx.getRuleIndex() == ruleIndex { return ctx } ctx = ctx.getParent().(IParserRuleContext) @@ -563,7 +569,7 @@ func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext { } func (p *Parser) precpred(localctx IRuleContext, precedence int) bool { - return precedence >= p._precedenceStack[ len(p._precedenceStack) -1] + return precedence >= p._precedenceStack[len(p._precedenceStack)-1] } func (p *Parser) inContext(context IParserRuleContext) bool { @@ -586,26 +592,26 @@ func (p *Parser) inContext(context IParserRuleContext) bool { // the ATN, otherwise {@code false}. func (p *Parser) isExpectedToken(symbol int) bool { - var atn *ATN = p._interp.atn + var atn *ATN = p.Interpreter.atn var ctx = p._ctx var s = atn.states[p.state] - var following = atn.nextTokens(s,nil) - if (following.contains(symbol)) { + var following = atn.nextTokens(s, nil) + if following.contains(symbol) { return true } - if (!following.contains(TokenEpsilon)) { + if !following.contains(TokenEpsilon) { return false } - for (ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon)) { + for ctx != nil && ctx.getInvokingState() >= 0 && following.contains(TokenEpsilon) { var invokingState = atn.states[ctx.getInvokingState()] var rt = invokingState.getTransitions()[0] - following = atn.nextTokens(rt.(*RuleTransition).followState,nil) - if (following.contains(symbol)) { + following = atn.nextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { return true } ctx = ctx.getParent().(IParserRuleContext) } - if (following.contains(TokenEpsilon) && symbol == TokenEOF) { + if following.contains(TokenEpsilon) && symbol == TokenEOF { return true } else { return false @@ -619,19 +625,19 @@ func (p *Parser) isExpectedToken(symbol int) bool { // @see ATN//getExpectedTokens(int, RuleContext) // func (p *Parser) getExpectedTokens() *IntervalSet { - return p._interp.atn.getExpectedTokens(p.state, p._ctx) + return p.Interpreter.atn.getExpectedTokens(p.state, p._ctx) } func (p *Parser) getExpectedTokensWithinCurrentRule() *IntervalSet { - var atn = p._interp.atn + var atn = p.Interpreter.atn var s = atn.states[p.state] - return atn.nextTokens(s,nil) + return atn.nextTokens(s, nil) } // Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// func (p *Parser) getRuleIndex(ruleName string) int { var ruleIndex, ok = p.getRuleIndexMap()[ruleName] - if (ok) { + if ok { return ruleIndex } else { return -1 @@ -646,45 +652,45 @@ func (p *Parser) getRuleIndex(ruleName string) int { // this very useful for error messages. func (this *Parser) getRuleInvocationStack(p IParserRuleContext) []string { - if (p == nil) { - p = this._ctx; + if p == nil { + p = this._ctx } - var stack = make([]string,0) - for (p != nil) { + var stack = make([]string, 0) + for p != nil { // compute what follows who invoked us - var ruleIndex = p.getRuleIndex(); - if (ruleIndex < 0) { + var ruleIndex = p.getRuleIndex() + if ruleIndex < 0 { stack = append(stack, "n/a") } else { - stack = append(stack, this.getRuleNames()[ruleIndex]); + stack = append(stack, this.getRuleNames()[ruleIndex]) } - p = p.getParent().(IParserRuleContext); + p = p.getParent().(IParserRuleContext) } - return stack; -}; + return stack +} // For debugging and other purposes.// func (p *Parser) getDFAStrings() { panic("dumpDFA Not implemented!") -// return p._interp.decisionToDFA.toString() + // return p._interp.decisionToDFA.toString() } // For debugging and other purposes.// func (p *Parser) dumpDFA() { panic("dumpDFA Not implemented!") -// var seenOne = false -// for i := 0; i < p._interp.decisionToDFA.length; i++ { -// var dfa = p._interp.decisionToDFA[i] -// if ( len(dfa.states) > 0) { -// if (seenOne) { -// fmt.Println() -// } -// p.printer.println("Decision " + dfa.decision + ":") -// p.printer.print(dfa.toString(p.literalNames, p.symbolicNames)) -// seenOne = true -// } -// } + // var seenOne = false + // for i := 0; i < p._interp.decisionToDFA.length; i++ { + // var dfa = p._interp.decisionToDFA[i] + // if ( len(dfa.states) > 0) { + // if (seenOne) { + // fmt.Println() + // } + // p.printer.println("Decision " + dfa.decision + ":") + // p.printer.print(dfa.toString(p.literalNames, p.symbolicNames)) + // seenOne = true + // } + // } } /* @@ -702,15 +708,14 @@ func (p *Parser) getSourceName() string { // events as well as token matches. p.is for quick and dirty debugging. // func (p *Parser) setTrace(trace *TraceListener) { - if (trace == nil) { + if trace == nil { p.removeParseListener(p._tracer) p._tracer = nil } else { - if (p._tracer != nil) { + if p._tracer != nil { p.removeParseListener(p._tracer) } p._tracer = NewTraceListener(p) p.addParseListener(p._tracer) } } - diff --git a/runtime/Go/src/antlr4/ParserATNSimulator.go b/runtime/Go/src/antlr4/ParserATNSimulator.go index 8c9c690a8..62fd3382b 100644 --- a/runtime/Go/src/antlr4/ParserATNSimulator.go +++ b/runtime/Go/src/antlr4/ParserATNSimulator.go @@ -1,55 +1,55 @@ package antlr4 import ( - "fmt" - "strconv" - "strings" + "fmt" + "strconv" + "strings" ) type ParserATNSimulator struct { - *ATNSimulator + *ATNSimulator - parser IParser - predictionMode int - _input TokenStream - _startIndex int - _dfa *DFA - decisionToDFA []*DFA - mergeCache *DoubleDict - _outerContext IParserRuleContext + parser IParser + predictionMode int + _input TokenStream + _startIndex int + _dfa *DFA + decisionToDFA []*DFA + mergeCache *DoubleDict + _outerContext IParserRuleContext } func NewParserATNSimulator(parser IParser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - this := new(ParserATNSimulator) + this := new(ParserATNSimulator) - this.InitParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache) + this.InitParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache) - return this + return this } func (this *ParserATNSimulator) InitParserATNSimulator(parser IParser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) { - this.InitATNSimulator(atn, sharedContextCache) + this.InitATNSimulator(atn, sharedContextCache) - this.parser = parser - this.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - this.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - this._input = nil - this._startIndex = 0 - this._outerContext = nil - this._dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - this.mergeCache = nil + this.parser = parser + this.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + this.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + this._input = nil + this._startIndex = 0 + this._outerContext = nil + this._dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. + // + this.mergeCache = nil } @@ -63,85 +63,85 @@ func (this *ParserATNSimulator) reset() { func (this *ParserATNSimulator) adaptivePredict(input TokenStream, decision int, outerContext IParserRuleContext) int { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions) { - fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).line) + ":" + - strconv.Itoa( input.LT(1).column) ) - } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).line) + ":" + + strconv.Itoa(input.LT(1).column)) + } - this._input = input - this._startIndex = input.index() - this._outerContext = outerContext - - var dfa = this.decisionToDFA[decision] - this._dfa = dfa - var m = input.mark() - var index = input.index() + this._input = input + this._startIndex = input.index() + this._outerContext = outerContext - defer func(){ - this._dfa = nil - this.mergeCache = nil // wack cache after each prediction - input.seek(index) - input.release(m) - }() + var dfa = this.decisionToDFA[decision] + this._dfa = dfa + var m = input.mark() + var index = input.index() - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - if (dfa.precedenceDfa) { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()) - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 - } + defer func() { + this._dfa = nil + this.mergeCache = nil // wack cache after each prediction + input.seek(index) + input.release(m) + }() - if (s0==nil) { - if (outerContext==nil) { - outerContext = RuleContextEMPTY - } - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions) { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + this.getLookaheadName(input) + - ", outerContext=" + outerContext.toString(this.parser.getRuleNames(), nil)) - } - // If this is not a precedence DFA, we check the ATN start state - // to determine if this ATN start state is the decision for the - // closure block that determines whether a precedence rule - // should continue or complete. + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + if dfa.precedenceDfa { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(this.parser.getPrecedence()) + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0 + } - var t2 IATNState = dfa.atnStartState - t, ok := t2.(*StarLoopEntryState) - if (!dfa.precedenceDfa && ok) { - if (t.precedenceRuleDecision) { - dfa.setPrecedenceDfa(true) - } - } - var fullCtx = false - var s0_closure = this.computeStartState(dfa.atnStartState, RuleContextEMPTY, fullCtx) + if s0 == nil { + if outerContext == nil { + outerContext = RuleContextEMPTY + } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + this.getLookaheadName(input) + + ", outerContext=" + outerContext.toString(this.parser.getRuleNames(), nil)) + } + // If this is not a precedence DFA, we check the ATN start state + // to determine if this ATN start state is the decision for the + // closure block that determines whether a precedence rule + // should continue or complete. - if( dfa.precedenceDfa) { - // If this is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - s0_closure = this.applyPrecedenceFilter(s0_closure) - s0 = this.addDFAState(dfa, NewDFAState(-1, s0_closure)) - dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0) - } else { - s0 = this.addDFAState(dfa, NewDFAState(-1, s0_closure)) - dfa.s0 = s0 - } - } - var alt = this.execATN(dfa, s0, input, index, outerContext) - if (ParserATNSimulatorprototypedebug) { - fmt.Println("DFA after predictATN: " + dfa.toString(this.parser.getLiteralNames(), nil)) - } - return alt + var t2 IATNState = dfa.atnStartState + t, ok := t2.(*StarLoopEntryState) + if !dfa.precedenceDfa && ok { + if t.precedenceRuleDecision { + dfa.setPrecedenceDfa(true) + } + } + var fullCtx = false + var s0_closure = this.computeStartState(dfa.atnStartState, RuleContextEMPTY, fullCtx) + + if dfa.precedenceDfa { + // If this is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + s0_closure = this.applyPrecedenceFilter(s0_closure) + s0 = this.addDFAState(dfa, NewDFAState(-1, s0_closure)) + dfa.setPrecedenceStartState(this.parser.getPrecedence(), s0) + } else { + s0 = this.addDFAState(dfa, NewDFAState(-1, s0_closure)) + dfa.s0 = s0 + } + } + var alt = this.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorprototypedebug { + fmt.Println("DFA after predictATN: " + dfa.toString(this.parser.getLiteralNames(), nil)) + } + return alt } @@ -151,22 +151,22 @@ func (this *ParserATNSimulator) adaptivePredict(input TokenStream, decision int, // There are some key conditions we're looking for after computing a new // set of ATN configs (proposed DFA state): - // if the set is empty, there is no viable alternative for current symbol - // does the state uniquely predict an alternative? - // does the state have a conflict that would prevent us from - // putting it on the work list? +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? // We also have some key operations to do: - // add an edge from previous DFA state to potentially NewDFA state, D, - // upon current symbol but only if adding to work list, which means in all - // cases except no viable alternative (and possibly non-greedy decisions?) - // collecting predicates and adding semantic context to DFA accept states - // adding rule context to context-sensitive DFA accept states - // consuming an input symbol - // reporting a conflict - // reporting an ambiguity - // reporting a context sensitivity - // reporting insufficient predicates +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// reporting a conflict +// reporting an ambiguity +// reporting a context sensitivity +// reporting insufficient predicates // cover these cases: // dead end @@ -175,103 +175,103 @@ func (this *ParserATNSimulator) adaptivePredict(input TokenStream, decision int, // conflict // conflict + preds // -func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext IParserRuleContext ) int { +func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext IParserRuleContext) int { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions) { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + this.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).line) + ":" + strconv.Itoa(input.LT(1).column)) - } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + this.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).line) + ":" + strconv.Itoa(input.LT(1).column)) + } - var previousD = s0 + var previousD = s0 - if (ParserATNSimulatorprototypedebug) { - fmt.Println("s0 = " + s0.toString()) - } - var t = input.LA(1) - for(true) { // for more work - var D = this.getExistingTargetState(previousD, t) - if(D==nil) { - D = this.computeTargetState(dfa, previousD, t) - } - if(D==ATNSimulatorERROR) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := this.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.seek(startIndex) - alt := this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if(alt != ATNINVALID_ALT_NUMBER) { - return alt - } else { - panic(e) - } - } - if(D.requiresFullContext && this.predictionMode != PredictionModeSLL) { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - var conflictingAlts *BitSet = D.configs.conflictingAlts - if (D.predicates!=nil) { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - var conflictIndex = input.index() - if(conflictIndex != startIndex) { - input.seek(startIndex) - } - conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true) - if (conflictingAlts.length()==1) { - if(ParserATNSimulatorprototypedebug) { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if (conflictIndex != startIndex) { - // restore the index so reporting the fallback to full - // context occurs with the index at the correct spot - input.seek(conflictIndex) - } - } - if (ParserATNSimulatorprototypedfa_debug) { - fmt.Println("ctx sensitive state " + outerContext.toString(nil,nil) +" in " + D.toString()) - } - var fullCtx = true - var s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx) - this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index()) - alt := this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext) - return alt - } - if (D.isAcceptState) { - if (D.predicates==nil) { - return D.prediction - } - var stopIndex = input.index() - input.seek(startIndex) - var alts = this.evalSemanticContext(D.predicates, outerContext, true) - if (alts.length()==0) { - panic(this.noViableAlt(input, outerContext, D.configs, startIndex)) - } else if (alts.length()==1) { - return alts.minValue() - } else { - // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. - this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D + if ParserATNSimulatorprototypedebug { + fmt.Println("s0 = " + s0.toString()) + } + var t = input.LA(1) + for true { // for more work + var D = this.getExistingTargetState(previousD, t) + if D == nil { + D = this.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorERROR { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := this.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.seek(startIndex) + alt := this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNINVALID_ALT_NUMBER { + return alt + } else { + panic(e) + } + } + if D.requiresFullContext && this.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + var conflictingAlts *BitSet = D.configs.conflictingAlts + if D.predicates != nil { + if ParserATNSimulatorprototypedebug { + fmt.Println("DFA state has preds in DFA sim LL failover") + } + var conflictIndex = input.index() + if conflictIndex != startIndex { + input.seek(startIndex) + } + conflictingAlts = this.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if ParserATNSimulatorprototypedebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue() + } + if conflictIndex != startIndex { + // restore the index so reporting the fallback to full + // context occurs with the index at the correct spot + input.seek(conflictIndex) + } + } + if ParserATNSimulatorprototypedfa_debug { + fmt.Println("ctx sensitive state " + outerContext.toString(nil, nil) + " in " + D.toString()) + } + var fullCtx = true + var s0_closure = this.computeStartState(dfa.atnStartState, outerContext, fullCtx) + this.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index()) + alt := this.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext) + return alt + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction + } + var stopIndex = input.index() + input.seek(startIndex) + var alts = this.evalSemanticContext(D.predicates, outerContext, true) + if alts.length() == 0 { + panic(this.noViableAlt(input, outerContext, D.configs, startIndex)) + } else if alts.length() == 1 { + return alts.minValue() + } else { + // report ambiguity after predicate evaluation to make sure the correct set of ambig alts is reported. + this.reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue() + } + } + previousD = D - if (t != TokenEOF) { - input.consume() - t = input.LA(1) - } - } + if t != TokenEOF { + input.consume() + t = input.LA(1) + } + } - panic("Should not have reached this state") + panic("Should not have reached this state") } // Get an existing target state for an edge in the DFA. If the target state @@ -285,12 +285,12 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea // already cached func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - var edges = previousD.edges - if (edges==nil) { - return nil - } else { - return edges[t + 1] - } + var edges = previousD.edges + if edges == nil { + return nil + } else { + return edges[t+1] + } } // Compute a target state for an edge in the DFA, and attempt to add the @@ -305,310 +305,311 @@ func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t in // returns {@link //ERROR}. func (this *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - var reach = this.computeReachSet(previousD.configs, t, false) + var reach = this.computeReachSet(previousD.configs, t, false) - if(reach==nil) { - this.addDFAEdge(dfa, previousD, t, ATNSimulatorERROR) - return ATNSimulatorERROR - } - // create Newtarget state we'll add to DFA after it's complete - var D = NewDFAState(-1, reach) + if reach == nil { + this.addDFAEdge(dfa, previousD, t, ATNSimulatorERROR) + return ATNSimulatorERROR + } + // create Newtarget state we'll add to DFA after it's complete + var D = NewDFAState(-1, reach) - var predictedAlt = this.getUniqueAlt(reach) + var predictedAlt = this.getUniqueAlt(reach) - if (ParserATNSimulatorprototypedebug) { - var altSubSets = PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.toString() + - ", configs=" + reach.toString() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + this.getConflictingAlts(reach).toString()) - } - if (predictedAlt!=ATNINVALID_ALT_NUMBER) { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.uniqueAlt = predictedAlt - D.prediction = predictedAlt - } else if (PredictionModehasSLLConflictTerminatingPrediction(this.predictionMode, reach)) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = this.getConflictingAlts(reach) - D.requiresFullContext = true - // in SLL-only mode, we will stop at this state and return the minimum alt - D.isAcceptState = true - D.prediction = D.configs.conflictingAlts.minValue() - } - if (D.isAcceptState && D.configs.hasSemanticContext) { - this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)) - if( D.predicates!=nil) { - D.prediction = ATNINVALID_ALT_NUMBER - } - } - // all adds to dfa are done after we've created full D state - D = this.addDFAEdge(dfa, previousD, t, D) - return D + if ParserATNSimulatorprototypedebug { + var altSubSets = PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.toString() + + ", configs=" + reach.toString() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + this.getConflictingAlts(reach).toString()) + } + if predictedAlt != ATNINVALID_ALT_NUMBER { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.uniqueAlt = predictedAlt + D.prediction = predictedAlt + } else if PredictionModehasSLLConflictTerminatingPrediction(this.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = this.getConflictingAlts(reach) + D.requiresFullContext = true + // in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = true + D.prediction = D.configs.conflictingAlts.minValue() + } + if D.isAcceptState && D.configs.hasSemanticContext { + this.predicateDFAState(D, this.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.prediction = ATNINVALID_ALT_NUMBER + } + } + // all adds to dfa are done after we've created full D state + D = this.addDFAEdge(dfa, previousD, t, D) + return D } func (this *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState *DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - var nalts = len(decisionState.getTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - var altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs) - var altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if (altToPred!=nil) { - dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.prediction = ATNINVALID_ALT_NUMBER // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.prediction = altsToCollectPredsFrom.minValue() - } + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + var nalts = len(decisionState.getTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + var altsToCollectPredsFrom = this.getConflictingAltsOrUniqueAlt(dfaState.configs) + var altToPred = this.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = this.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.prediction = ATNINVALID_ALT_NUMBER // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.prediction = altsToCollectPredsFrom.minValue() + } } // comes back with reach.uniqueAlt set to a valid alt -func (this *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext IParserRuleContext) int{ +func (this *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext IParserRuleContext) int { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions) { - fmt.Println("execATNWithFullContext "+ s0.toString()) - } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions { + fmt.Println("execATNWithFullContext " + s0.toString()) + } - var fullCtx = true - var foundExactAmbig = false - var reach *ATNConfigSet = nil - var previous = s0 - input.seek(startIndex) - var t = input.LA(1) - var predictedAlt = -1 + var fullCtx = true + var foundExactAmbig = false + var reach *ATNConfigSet = nil + var previous = s0 + input.seek(startIndex) + var t = input.LA(1) + var predictedAlt = -1 - for (true) { // for more work - reach = this.computeReachSet(previous, t, fullCtx) - if (reach==nil) { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - var e = this.noViableAlt(input, outerContext, previous, startIndex) - input.seek(startIndex) - var alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if(alt!=ATNINVALID_ALT_NUMBER) { - return alt - } else { - panic(e) - } - } - var altSubSets = PredictionModegetConflictingAltSubsets(reach) - if(ParserATNSimulatorprototypedebug) { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.uniqueAlt = this.getUniqueAlt(reach) - // unique prediction? - if(reach.uniqueAlt!=ATNINVALID_ALT_NUMBER) { - predictedAlt = reach.uniqueAlt - break - } else if (this.predictionMode != PredictionModeLL_EXACT_AMBIG_DETECTION) { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if(predictedAlt != ATNINVALID_ALT_NUMBER) { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if (PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets)) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if( t != TokenEOF) { - input.consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if (reach.uniqueAlt != ATNINVALID_ALT_NUMBER ) { - this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. + for true { // for more work + reach = this.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + var e = this.noViableAlt(input, outerContext, previous, startIndex) + input.seek(startIndex) + var alt = this.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNINVALID_ALT_NUMBER { + return alt + } else { + panic(e) + } + } + var altSubSets = PredictionModegetConflictingAltSubsets(reach) + if ParserATNSimulatorprototypedebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.uniqueAlt = this.getUniqueAlt(reach) + // unique prediction? + if reach.uniqueAlt != ATNINVALID_ALT_NUMBER { + predictedAlt = reach.uniqueAlt + break + } else if this.predictionMode != PredictionModeLL_EXACT_AMBIG_DETECTION { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNINVALID_ALT_NUMBER { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.uniqueAlt != ATNINVALID_ALT_NUMBER { + this.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index()) + return predictedAlt + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In this case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve this without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In this case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve this without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. - this.reportAmbiguity(dfa, D, startIndex, input.index(), foundExactAmbig, nil, reach) + this.reportAmbiguity(dfa, D, startIndex, input.index(), foundExactAmbig, nil, reach) - return predictedAlt + return predictedAlt } func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("in computeReachSet, starting closure: " + closure.toString()) - } - if( this.mergeCache==nil) { - this.mergeCache = NewDoubleDict() - } - var intermediate = NewATNConfigSet(fullCtx) + if ParserATNSimulatorprototypedebug { + fmt.Println("in computeReachSet, starting closure: " + closure.toString()) + } + if this.mergeCache == nil { + this.mergeCache = NewDoubleDict() + } + var intermediate = NewATNConfigSet(fullCtx) - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative matching the longest overall sequence is - // chosen when multiple such configurations can match the input. - - var skippedStopStates []*ATNConfig = nil + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative matching the longest overall sequence is + // chosen when multiple such configurations can match the input. - // First figure out where we can reach on input t - for i:=0; i1 - // (basically a graph subtraction algorithm). - if (!config.getPrecedenceFilterSuppressed()) { - var context = statesFromAlt1[config.getState().getStateNumber()] - if (context!=nil && context.equals(config.getContext())) { - // eliminated - continue - } + for i := 0; i < len(configs.configs); i++ { + config := configs.configs[i] + // handle alt 1 first + if config.getAlt() != 1 { + continue } - configSet.add(config, this.mergeCache) - } - return configSet + var updatedContext = config.getSemanticContext().evalPrecedence(this.parser, this._outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.getState().getStateNumber()] = config.getContext() + if updatedContext != config.getSemanticContext() { + configSet.add(NewATNConfig2(config, updatedContext), this.mergeCache) + } else { + configSet.add(config, this.mergeCache) + } + } + for i := 0; i < len(configs.configs); i++ { + config := configs.configs[i] + if config.getAlt() == 1 { + // already handled + continue + } + // In the future, this elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + var context = statesFromAlt1[config.getState().getStateNumber()] + if context != nil && context.equals(config.getContext()) { + // eliminated + continue + } + } + configSet.add(config, this.mergeCache) + } + return configSet } func (this *ParserATNSimulator) getReachableTarget(trans ITransition, ttype int) IATNState { - if (trans.matches(ttype, 0, this.atn.maxTokenType)) { - return trans.getTarget() - } else { - return nil - } + if trans.matches(ttype, 0, this.atn.maxTokenType) { + return trans.getTarget() + } else { + return nil + } } func (this *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { - var altToPred = make([]SemanticContext, nalts + 1) - for i:=0; i0) { - alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if (alt!=ATNINVALID_ALT_NUMBER) { // syntactically viable path exists - return alt - } - } - return ATNINVALID_ALT_NUMBER + var cfgs = this.splitAccordingToSemanticValidity(configs, outerContext) + var semValidConfigs = cfgs[0] + var semInvalidConfigs = cfgs[1] + var alt = this.getAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNINVALID_ALT_NUMBER { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.configs) > 0 { + alt = this.getAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNINVALID_ALT_NUMBER { // syntactically viable path exists + return alt + } + } + return ATNINVALID_ALT_NUMBER } - + func (this *ParserATNSimulator) getAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { - var alts = NewIntervalSet() + var alts = NewIntervalSet() - for i:=0; i0 || (ok && c.getContext().hasEmptyPath())) { - alts.addOne(c.getAlt()) - } - } - if (alts.length()==0) { - return ATNINVALID_ALT_NUMBER - } else { - return alts.first() - } + if c.getReachesIntoOuterContext() > 0 || (ok && c.getContext().hasEmptyPath()) { + alts.addOne(c.getAlt()) + } + } + if alts.length() == 0 { + return ATNINVALID_ALT_NUMBER + } else { + return alts.first() + } } + // Walk the list of configurations and split them according to // those that have preds evaluating to true/false. If no pred, assume // true pred and include in succeeded set. Returns Pair of sets. @@ -913,27 +915,27 @@ func (this *ParserATNSimulator) getAltThatFinishedDecisionEntryRule(configs *ATN // prediction, which is where predicates need to evaluate. type ATNConfigSetPair struct { - item0, item1 *ATNConfigSet + item0, item1 *ATNConfigSet } -func (this *ParserATNSimulator) splitAccordingToSemanticValidity( configs *ATNConfigSet, outerContext IParserRuleContext) []*ATNConfigSet { - var succeeded = NewATNConfigSet(configs.fullCtx) - var failed = NewATNConfigSet(configs.fullCtx) +func (this *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext IParserRuleContext) []*ATNConfigSet { + var succeeded = NewATNConfigSet(configs.fullCtx) + var failed = NewATNConfigSet(configs.fullCtx) - for i:=0; i50) { - panic("problem") - } - } + if ParserATNSimulatorprototypedebug { + fmt.Println("closure(" + config.toString() + ")") //config.toString(this.parser,true) + ")") + fmt.Println("configs(" + configs.toString() + ")") + if config.getReachesIntoOuterContext() > 50 { + panic("problem") + } + } - _, ok := config.getState().(*RuleStopState) - if (ok) { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if (!config.getContext().isEmpty()) { - for i :=0; i< config.getContext().length(); i++ { - if (config.getContext().getReturnState(i) == PredictionContextEMPTY_RETURN_STATE) { - if (fullCtx) { - configs.add(NewATNConfig1(config, config.getState(), PredictionContextEMPTY), this.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if (ParserATNSimulatorprototypedebug) { - fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) - } - this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) - } - continue - } - returnState := this.atn.states[config.getContext().getReturnState(i)] - newContext := config.getContext().getParent(i) // "pop" return state + _, ok := config.getState().(*RuleStopState) + if ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.getContext().isEmpty() { + for i := 0; i < config.getContext().length(); i++ { + if config.getContext().getReturnState(i) == PredictionContextEMPTY_RETURN_STATE { + if fullCtx { + configs.add(NewATNConfig1(config, config.getState(), PredictionContextEMPTY), this.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorprototypedebug { + fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) + } + this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) + } + continue + } + returnState := this.atn.states[config.getContext().getReturnState(i)] + newContext := config.getContext().getParent(i) // "pop" return state - c := NewATNConfig5(returnState, config.getAlt(), newContext, config.getSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.setReachesIntoOuterContext( config.getReachesIntoOuterContext() ) - this.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon) - } - return - } else if( fullCtx) { - // reached end of start rule - configs.add(config, this.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if (ParserATNSimulatorprototypedebug) { - fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) - } - } - } - this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) + c := NewATNConfig5(returnState, config.getAlt(), newContext, config.getSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.setReachesIntoOuterContext(config.getReachesIntoOuterContext()) + this.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEofAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.add(config, this.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorprototypedebug { + fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) + } + } + } + this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) } // Do the actual work of walking epsilon edges// func (this *ParserATNSimulator) closure_(config IATNConfig, configs *ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEofAsEpsilon bool) { - var p = config.getState() - // optimization - if (! p.getEpsilonOnlyTransitions()) { - configs.add(config, this.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i 0. + var newDepth = depth + t2, ok := t.(*EpsilonTransition) + if ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. - if (closureBusy.add(c)!=c) { - // avoid infinite recursion for right-recursive rules - continue - } + if closureBusy.add(c) != c { + // avoid infinite recursion for right-recursive rules + continue + } - if (this._dfa != nil && this._dfa.precedenceDfa) { - if (t2.outermostPrecedenceReturn == this._dfa.atnStartState.getRuleIndex()) { + if this._dfa != nil && this._dfa.precedenceDfa { + if t2.outermostPrecedenceReturn == this._dfa.atnStartState.getRuleIndex() { c.precedenceFilterSuppressed = true } } - c.setReachesIntoOuterContext( c.getReachesIntoOuterContext() + 1 ) - configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method - newDepth -= 1 - if (ParserATNSimulatorprototypedebug) { - fmt.Println("dips into outer ctx: " + c.toString()) - } - } else if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if (newDepth >= 0) { - newDepth += 1 - } - } - this.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon) - } - } + c.setReachesIntoOuterContext(c.getReachesIntoOuterContext() + 1) + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + newDepth -= 1 + if ParserATNSimulatorprototypedebug { + fmt.Println("dips into outer ctx: " + c.toString()) + } + } else if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth += 1 + } + } + this.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon) + } + } } -func (this *ParserATNSimulator) getRuleName( index int ) string { - if (this.parser!=nil && index >=0) { - return this.parser.getRuleNames()[index] - } else { - return "" - } +func (this *ParserATNSimulator) getRuleName(index int) string { + if this.parser != nil && index >= 0 { + return this.parser.getRuleNames()[index] + } else { + return "" + } } func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransition, collectPredicates, inContext, fullCtx, treatEofAsEpsilon bool) *ATNConfig { - switch(t.getSerializationType()) { - case TransitionRULE: - return this.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return this.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return this.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return this.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewATNConfig4( config, t.getTarget() ) - case TransitionATOM: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if (treatEofAsEpsilon) { - if (t.matches(TokenEOF, 0, 1)) { - return NewATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionRANGE: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if (treatEofAsEpsilon) { - if (t.matches(TokenEOF, 0, 1)) { - return NewATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if (treatEofAsEpsilon) { - if (t.matches(TokenEOF, 0, 1)) { - return NewATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } + switch t.getSerializationType() { + case TransitionRULE: + return this.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return this.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return this.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return this.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewATNConfig4(config, t.getTarget()) + case TransitionATOM: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEofAsEpsilon { + if t.matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionRANGE: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEofAsEpsilon { + if t.matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEofAsEpsilon { + if t.matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } } func (this *ParserATNSimulator) actionTransition(config IATNConfig, t *ActionTransition) *ATNConfig { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa( t.actionIndex) ) - } - return NewATNConfig4(config, t.getTarget()) + if ParserATNSimulatorprototypedebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewATNConfig4(config, t.getTarget()) } func (this *ParserATNSimulator) precedenceTransition(config IATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if (this.parser!=nil) { - fmt.Println("context surrounding pred is " + fmt.Sprint(this.parser.getRuleInvocationStack(nil))) - } - } - var c *ATNConfig = nil - if (collectPredicates && inContext) { - if (fullCtx) { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - var currentPosition = this._input.index() - this._input.seek(this._startIndex) - var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext) - this._input.seek(currentPosition) - if (predSucceeds) { - c = NewATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.getSemanticContext(), pt.getPredicate()) - c = NewATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewATNConfig4(config, pt.getTarget()) - } - if (ParserATNSimulatorprototypedebug) { - fmt.Println("config from pred transition=" + c.toString()) - } - return c + if ParserATNSimulatorprototypedebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if this.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(this.parser.getRuleInvocationStack(nil))) + } + } + var c *ATNConfig = nil + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + var currentPosition = this._input.index() + this._input.seek(this._startIndex) + var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext) + this._input.seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.getSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorprototypedebug { + fmt.Println("config from pred transition=" + c.toString()) + } + return c } func (this *ParserATNSimulator) predTransition(config IATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if (this.parser!=nil) { - fmt.Println("context surrounding pred is " + fmt.Sprint(this.parser.getRuleInvocationStack(nil))) - } - } - var c *ATNConfig = nil - if (collectPredicates && ((pt.isCtxDependent && inContext) || ! pt.isCtxDependent)) { - if (fullCtx) { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - var currentPosition = this._input.index() - this._input.seek(this._startIndex) - var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext) - this._input.seek(currentPosition) - if (predSucceeds) { - c = NewATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - var newSemCtx = SemanticContextandContext(config.getSemanticContext(), pt.getPredicate()) - c = NewATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewATNConfig4(config, pt.getTarget() ) - } - if (ParserATNSimulatorprototypedebug) { - fmt.Println("config from pred transition=" + c.toString()) - } - return c + if ParserATNSimulatorprototypedebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if this.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(this.parser.getRuleInvocationStack(nil))) + } + } + var c *ATNConfig = nil + if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + var currentPosition = this._input.index() + this._input.seek(this._startIndex) + var predSucceeds = pt.getPredicate().evaluate(this.parser, this._outerContext) + this._input.seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + var newSemCtx = SemanticContextandContext(config.getSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorprototypedebug { + fmt.Println("config from pred transition=" + c.toString()) + } + return c } func (this *ParserATNSimulator) ruleTransition(config IATNConfig, t *RuleTransition) *ATNConfig { - if (ParserATNSimulatorprototypedebug) { - fmt.Println("CALL rule " + this.getRuleName(t.getTarget().getRuleIndex()) + ", ctx=" + config.getContext().toString()) - } - var returnState = t.followState - var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.getStateNumber()) - return NewATNConfig1( config, t.getTarget(), newContext ) + if ParserATNSimulatorprototypedebug { + fmt.Println("CALL rule " + this.getRuleName(t.getTarget().getRuleIndex()) + ", ctx=" + config.getContext().toString()) + } + var returnState = t.followState + var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.getStateNumber()) + return NewATNConfig1(config, t.getTarget(), newContext) } func (this *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { - var altsets = PredictionModegetConflictingAltSubsets(configs) - return PredictionModegetAlts(altsets) + var altsets = PredictionModegetConflictingAltSubsets(configs) + return PredictionModegetAlts(altsets) } - // Sam pointed out a problem with the previous definition, v3, of - // ambiguous states. If we have another state associated with conflicting - // alternatives, we should keep going. For example, the following grammar - // - // s : (ID | ID ID?) '' - // - // When the ATN simulation reaches the state before '', it has a DFA - // state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally - // 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node - // because alternative to has another way to continue, via [6|2|[]]. - // The key is that we have a single state that has config's only associated - // with a single alternative, 2, and crucially the state transitions - // among the configurations are all non-epsilon transitions. That means - // we don't consider any conflicts that include alternative 2. So, we - // ignore the conflict between alts 1 and 2. We ignore a set of - // conflicting alts when there is an intersection with an alternative - // associated with a single alt state in the state&rarrconfig-list map. - // - // It's also the case that we might have two conflicting configurations but - // also a 3rd nonconflicting configuration for a different alternative: - // [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: - // - // a : A | A | A B - // - // After matching input A, we reach the stop state for rule A, state 1. - // State 8 is the state right before B. Clearly alternatives 1 and 2 - // conflict and no amount of further lookahead will separate the two. - // However, alternative 3 will be able to continue and so we do not - // stop working on this state. In the previous example, we're concerned - // with states associated with the conflicting alternatives. Here alt - // 3 is not associated with the conflicting configs, but since we can continue - // looking for input reasonably, I don't declare the state done. We - // ignore a set of conflicting alts when we have an alternative - // that we still need to pursue. +// Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) '' +// +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via [6|2|[]]. +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state&rarrconfig-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// +// a : A | A | A B +// +// After matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not +// stop working on this state. In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. // func (this *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { - var conflictingAlts *BitSet = nil - if (configs.uniqueAlt!= ATNINVALID_ALT_NUMBER) { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.uniqueAlt) - } else { - conflictingAlts = configs.conflictingAlts - } - return conflictingAlts + var conflictingAlts *BitSet = nil + if configs.uniqueAlt != ATNINVALID_ALT_NUMBER { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.uniqueAlt) + } else { + conflictingAlts = configs.conflictingAlts + } + return conflictingAlts } -func (this *ParserATNSimulator) getTokenName( t int ) string { - if (t==TokenEOF) { - return "EOF" - } - if( this.parser!=nil && this.parser.getLiteralNames()!=nil) { - if (t >= len(this.parser.getLiteralNames())) { - fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(this.parser.getLiteralNames(), ",")) -// fmt.Println(this.parser.getInputStream().getTokens()) - } else { - return this.parser.getLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - } - return "" + strconv.Itoa(t) +func (this *ParserATNSimulator) getTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + if this.parser != nil && this.parser.getLiteralNames() != nil { + if t >= len(this.parser.getLiteralNames()) { + fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(this.parser.getLiteralNames(), ",")) + // fmt.Println(this.parser.getInputStream().getTokens()) + } else { + return this.parser.getLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + } + return "" + strconv.Itoa(t) } func (this *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return this.getTokenName(input.LA(1)) + return this.getTokenName(input.LA(1)) } // Used for debugging in adaptivePredict around execATN but I cut @@ -1313,49 +1314,49 @@ func (this *ParserATNSimulator) getLookaheadName(input TokenStream) string { // func (this *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - panic("Not implemented") + panic("Not implemented") -// fmt.Println("dead end configs: ") -// var decs = nvae.deadEndConfigs -// -// for i:=0; i0) { -// var t = c.state.getTransitions()[0] -// if t2, ok := t.(*AtomTransition); ok { -// trans = "Atom "+ this.getTokenName(t2.label) -// } else if t3, ok := t.(SetTransition); ok { -// _, ok := t.(*NotSetTransition) -// -// var s string -// if (ok){ -// s = "~" -// } -// -// trans = s + "Set " + t3.set -// } -// } -// fmt.Errorf(c.toString(this.parser, true) + ":" + trans) -// } + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.getTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ this.getTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.toString(this.parser, true) + ":" + trans) + // } } func (this *ParserATNSimulator) noViableAlt(input TokenStream, outerContext IParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(this.parser, input, input.get(startIndex), input.LT(1), configs, outerContext) + return NewNoViableAltException(this.parser, input, input.get(startIndex), input.LT(1), configs, outerContext) } func (this *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { - var alt = ATNINVALID_ALT_NUMBER - for i:=0; i " + to.toString() + " upon " + this.getTokenName(t)) - } - if (to==nil) { - return nil - } - to = this.addDFAState(dfa, to) // used existing if possible not incoming - if (from_==nil || t < -1 || t > this.atn.maxTokenType) { - return to - } - if (from_.edges==nil) { - from_.edges = make([]*DFAState, this.atn.maxTokenType+1+1) - } - from_.edges[t+1] = to // connect + if ParserATNSimulatorprototypedebug { + fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + this.getTokenName(t)) + } + if to == nil { + return nil + } + to = this.addDFAState(dfa, to) // used existing if possible not incoming + if from_ == nil || t < -1 || t > this.atn.maxTokenType { + return to + } + if from_.edges == nil { + from_.edges = make([]*DFAState, this.atn.maxTokenType+1+1) + } + from_.edges[t+1] = to // connect - if (ParserATNSimulatorprototypedebug) { - var names []string - if (this.parser != nil){ - names = this.parser.getLiteralNames() - } + if ParserATNSimulatorprototypedebug { + var names []string + if this.parser != nil { + names = this.parser.getLiteralNames() + } - fmt.Println("DFA=\n" + dfa.toString(names, nil)) - } - return to + fmt.Println("DFA=\n" + dfa.toString(names, nil)) + } + return to } + // // Add state {@code D} to the DFA if it is not already present, and return // the actual instance stored in the DFA. If a state equivalent to {@code D} @@ -1420,58 +1422,57 @@ func (this *ParserATNSimulator) addDFAEdge(dfa *DFA, from_ *DFAState, t int, to // state was not already present. // func (this *ParserATNSimulator) addDFAState(dfa *DFA, D *DFAState) *DFAState { - if (D == ATNSimulatorERROR) { - return D - } - var hash = D.hashString() - var existing, ok = dfa.getStates()[hash] - if(ok) { - return existing - } - D.stateNumber = len(dfa.getStates()) - if (! D.configs.readOnly) { - D.configs.optimizeConfigs(this.ATNSimulator) - D.configs.setReadonly(true) - } - dfa.getStates()[hash] = D - if (ParserATNSimulatorprototypedebug) { - fmt.Println("adding NewDFA state: " + D.toString()) - } - return D + if D == ATNSimulatorERROR { + return D + } + var hash = D.hashString() + var existing, ok = dfa.getStates()[hash] + if ok { + return existing + } + D.stateNumber = len(dfa.getStates()) + if !D.configs.readOnly { + D.configs.optimizeConfigs(this.ATNSimulator) + D.configs.setReadonly(true) + } + dfa.getStates()[hash] = D + if ParserATNSimulatorprototypedebug { + fmt.Println("adding NewDFA state: " + D.toString()) + } + return D } func (this *ParserATNSimulator) reportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug) { - var interval = NewInterval(startIndex, stopIndex + 1) - fmt.Println("reportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + - ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) - } - if (this.parser!=nil) { - this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { + var interval = NewInterval(startIndex, stopIndex+1) + fmt.Println("reportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + + ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) + } + if this.parser != nil { + this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } } func (this *ParserATNSimulator) reportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug) { - var interval = NewInterval(startIndex, stopIndex + 1) - fmt.Println("reportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + - ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) - } - if (this.parser!=nil) { - this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs) - } + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { + var interval = NewInterval(startIndex, stopIndex+1) + fmt.Println("reportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + + ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) + } + if this.parser != nil { + this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs) + } } - + // If context sensitive parsing, we know it's ambiguity not conflict// func (this *ParserATNSimulator) reportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs *ATNConfigSet ) { - if (ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug) { - var interval = NewInterval(startIndex, stopIndex + 1) - fmt.Println("reportAmbiguity " + ambigAlts.toString() + ":" + configs.toString() + - ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) - } - if (this.parser!=nil) { - this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { + var interval = NewInterval(startIndex, stopIndex+1) + fmt.Println("reportAmbiguity " + ambigAlts.toString() + ":" + configs.toString() + + ", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) + } + if this.parser != nil { + this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } } - diff --git a/runtime/Go/src/antlr4/ParserRuleContext.go b/runtime/Go/src/antlr4/ParserRuleContext.go index 9313d40cb..56e55c1c3 100644 --- a/runtime/Go/src/antlr4/ParserRuleContext.go +++ b/runtime/Go/src/antlr4/ParserRuleContext.go @@ -21,34 +21,31 @@ type IParserRuleContext interface { addChild(child IRuleContext) IRuleContext removeLastChild() - } type ParserRuleContext struct { *RuleContext - ruleIndex int - children []ParseTree + children []ParseTree start, stop *Token - exception IRecognitionException + exception IRecognitionException } func NewParserRuleContext(parent IParserRuleContext, invokingStateNumber int) *ParserRuleContext { prc := new(ParserRuleContext) - prc.InitRuleContext(parent, invokingStateNumber) prc.InitParserRuleContext(parent, invokingStateNumber) return prc } -func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, invokingStateNumber int){ +func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, invokingStateNumber int) { prc.InitRuleContext(parent, invokingStateNumber) - prc.ruleIndex = -1 + prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a visitor, // we need to track all of the tokens and rule invocations associated // with prc rule's context. This is empty for parsing w/o tree constr. @@ -101,18 +98,18 @@ func (prc *ParserRuleContext) exitRule(listener ParseTreeListener) { // * Does not set parent link other add methods do that/// func (prc *ParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if (prc.children == nil) { + if prc.children == nil { prc.children = make([]ParseTree, 0) } - prc.children = append( prc.children, child ) + prc.children = append(prc.children, child) return child } func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext { - if (prc.children == nil) { + if prc.children == nil { prc.children = make([]ParseTree, 0) } - prc.children = append( prc.children, child ) + prc.children = append(prc.children, child) return child } @@ -121,8 +118,8 @@ func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext { // generic ruleContext object. // / func (prc *ParserRuleContext) removeLastChild() { - if (prc.children != nil && len(prc.children) > 0) { - prc.children = prc.children[0:len(prc.children)-1] + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] } } @@ -143,7 +140,7 @@ func (prc *ParserRuleContext) addErrorNode(badToken *Token) *ErrorNodeImpl { } func (prc *ParserRuleContext) getChild(i int) Tree { - if (prc.children != nil && len(prc.children) >= i){ + if prc.children != nil && len(prc.children) >= i { return prc.children[i] } else { return nil @@ -151,13 +148,13 @@ func (prc *ParserRuleContext) getChild(i int) Tree { } func (prc *ParserRuleContext) getChildOfType(i int, childType reflect.Type) IRuleContext { - if (childType == nil) { + if childType == nil { return prc.getChild(i).(IRuleContext) } else { - for j :=0; j 0) { + if i > 0 { s = s + ", " } - if (this.returnStates[i] == PredictionContextEMPTY_RETURN_STATE) { + if this.returnStates[i] == PredictionContextEMPTY_RETURN_STATE { s = s + "$" continue } s = s + strconv.Itoa(this.returnStates[i]) - if (this.parents[i] != nil) { + if this.parents[i] != nil { s = s + " " + this.parents[i].toString() } else { s = s + "nil" @@ -360,12 +359,12 @@ func (this *ArrayPredictionContext) toString() string { // Return {@link //EMPTY} if {@code outerContext} is empty or nil. // / func predictionContextFromRuleContext(a *ATN, outerContext IRuleContext) IPredictionContext { - if (outerContext == nil) { + if outerContext == nil { outerContext = RuleContextEMPTY } // if we are in RuleContext of start rule, s, then PredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) - if (outerContext.getParent() == nil || outerContext == RuleContextEMPTY) { + if outerContext.getParent() == nil || outerContext == RuleContextEMPTY { return PredictionContextEMPTY } // If we have a parent, convert it to a PredictionContext graph @@ -392,19 +391,19 @@ func calculateListsHashString(parents []PredictionContext, returnStates []int) s func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext { // share same graph if both same - if (a == b) { + if a == b { return a } ac, ok1 := a.(*SingletonPredictionContext) bc, ok2 := a.(*SingletonPredictionContext) - if (ok1 && ok2) { + if ok1 && ok2 { return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) } // At least one of a or b is array // If one is $ and rootIsWildcard, return $ as// wildcard - if (rootIsWildcard) { + if rootIsWildcard { if _, ok := a.(*EmptyPredictionContext); ok { return a } @@ -414,10 +413,10 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) } // convert singleton so both are arrays to normalize if _, ok := a.(*SingletonPredictionContext); ok { - a = NewArrayPredictionContext([]IPredictionContext{ a.getParent(0) }, []int{ a.getReturnState(0) }) + a = NewArrayPredictionContext([]IPredictionContext{a.getParent(0)}, []int{a.getReturnState(0)}) } if _, ok := b.(*SingletonPredictionContext); ok { - b = NewArrayPredictionContext( []IPredictionContext{ b.getParent(0) }, []int{ b.getReturnState(0) }) + b = NewArrayPredictionContext([]IPredictionContext{b.getParent(0)}, []int{b.getReturnState(0)}) } return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) } @@ -454,32 +453,32 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) // @param mergeCache // / func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext { - if (mergeCache != nil) { + if mergeCache != nil { var previous = mergeCache.get(a.hashString(), b.hashString()) - if (previous != nil) { + if previous != nil { return previous.(IPredictionContext) } previous = mergeCache.get(b.hashString(), a.hashString()) - if (previous != nil) { + if previous != nil { return previous.(IPredictionContext) } } var rootMerge = mergeRoot(a, b, rootIsWildcard) - if (rootMerge != nil) { - if (mergeCache != nil) { + if rootMerge != nil { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), rootMerge) } return rootMerge } - if (a.returnState == b.returnState) { + if a.returnState == b.returnState { var parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) // if parent is same as existing a or b parent or reduced to a parent, // return it - if (parent == a.parentCtx) { + if parent == a.parentCtx { return a // ax + bx = ax, if a=b } - if (parent == b.parentCtx) { + if parent == b.parentCtx { return b // ax + bx = bx, if a=b } // else: ax + ay = a'[x,y] @@ -487,28 +486,28 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg // of those graphs. dup a, a' points at merged array // Newjoined parent so create Newsingleton pointing to it, a' var spc = SingletonPredictionContextcreate(parent, a.returnState) - if (mergeCache != nil) { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), spc) } return spc } else { // a != b payloads differ // see if we can collapse parents due to $+x parents if local ctx var singleParent IPredictionContext = nil - if (a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx)) { // ax + + if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + // bx = // [a,b]x singleParent = a.parentCtx } - if (singleParent != nil) { // parents are same + if singleParent != nil { // parents are same // sort payloads and use same parent - var payloads = []int{ a.returnState, b.returnState } - if (a.returnState > b.returnState) { + var payloads = []int{a.returnState, b.returnState} + if a.returnState > b.returnState { payloads[0] = b.returnState payloads[1] = a.returnState } - var parents = []IPredictionContext{ singleParent, singleParent } + var parents = []IPredictionContext{singleParent, singleParent} var apc = NewArrayPredictionContext(parents, payloads) - if (mergeCache != nil) { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), apc) } return apc @@ -516,15 +515,15 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg // parents differ and can't merge them. Just pack together // into array can't merge. // ax + by = [ax,by] - var payloads = []int{ a.returnState, b.returnState } - var parents = []IPredictionContext{ a.parentCtx, b.parentCtx } - if (a.returnState > b.returnState) { // sort by payload + var payloads = []int{a.returnState, b.returnState} + var parents = []IPredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload payloads[0] = b.returnState payloads[1] = a.returnState - parents = []IPredictionContext{ b.parentCtx, a.parentCtx } + parents = []IPredictionContext{b.parentCtx, a.parentCtx} } var a_ = NewArrayPredictionContext(parents, payloads) - if (mergeCache != nil) { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), a_) } return a_ @@ -570,23 +569,23 @@ func mergeSingletons(a, b *SingletonPredictionContext, rootIsWildcard bool, merg // otherwise false to indicate a full-context merge // / func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictionContext { - if (rootIsWildcard) { - if (a == PredictionContextEMPTY) { + if rootIsWildcard { + if a == PredictionContextEMPTY { return PredictionContextEMPTY // // + b =// } - if (b == PredictionContextEMPTY) { + if b == PredictionContextEMPTY { return PredictionContextEMPTY // a +// =// } } else { - if (a == PredictionContextEMPTY && b == PredictionContextEMPTY) { + if a == PredictionContextEMPTY && b == PredictionContextEMPTY { return PredictionContextEMPTY // $ + $ = $ - } else if (a == PredictionContextEMPTY) { // $ + x = [$,x] - var payloads = []int{ b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE } - var parents = []IPredictionContext{ b.getParent(-1), nil } + } else if a == PredictionContextEMPTY { // $ + x = [$,x] + var payloads = []int{b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE} + var parents = []IPredictionContext{b.getParent(-1), nil} return NewArrayPredictionContext(parents, payloads) - } else if (b == PredictionContextEMPTY) { // x + $ = [$,x] ($ is always first if present) - var payloads = []int{ a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE } - var parents = []IPredictionContext{ a.getParent(-1), nil } + } else if b == PredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) + var payloads = []int{a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE} + var parents = []IPredictionContext{a.getParent(-1), nil} return NewArrayPredictionContext(parents, payloads) } } @@ -614,13 +613,13 @@ func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictio //

// / func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) IPredictionContext { - if (mergeCache != nil) { + if mergeCache != nil { var previous = mergeCache.get(a.hashString(), b.hashString()) - if (previous != nil) { + if previous != nil { return previous.(IPredictionContext) } previous = mergeCache.get(b.hashString(), a.hashString()) - if (previous != nil) { + if previous != nil { return previous.(IPredictionContext) } } @@ -629,13 +628,13 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * var j = 0 // walks b var k = 0 // walks target M array - var mergedReturnStates = make([]int,0) - var mergedParents = make([]IPredictionContext,0) + var mergedReturnStates = make([]int, 0) + var mergedParents = make([]IPredictionContext, 0) // walk and merge to yield mergedParents, mergedReturnStates for i < len(a.returnStates) && j < len(b.returnStates) { var a_parent = a.parents[i] var b_parent = b.parents[j] - if (a.returnStates[i] == b.returnStates[j]) { + if a.returnStates[i] == b.returnStates[j] { // same payload (stack tops are equal), must yield merged singleton var payload = a.returnStates[i] // $+$ = $ @@ -643,7 +642,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * var ax_ax = (a_parent != nil && b_parent != nil && a_parent == b_parent) // ax+ax // -> // ax - if (bothDollars || ax_ax) { + if bothDollars || ax_ax { mergedParents[k] = a_parent // choose left mergedReturnStates[k] = payload } else { // ax+ay -> a'[x,y] @@ -653,7 +652,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * } i += 1 // hop over left one as usual j += 1 // but also skip one in right side since we merge - } else if (a.returnStates[i] < b.returnStates[j]) { // copy a[i] to M + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M mergedParents[k] = a_parent mergedReturnStates[k] = a.returnStates[i] i += 1 @@ -665,7 +664,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * k += 1 } // copy over any payloads remaining in either array - if (i < len(a.returnStates)) { + if i < len(a.returnStates) { for p := i; p < len(a.returnStates); p++ { mergedParents[k] = a.parents[p] mergedReturnStates[k] = a.returnStates[p] @@ -679,10 +678,10 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * } } // trim merged if we combined a few that had same stack tops - if (k < len(mergedParents)) { // write index < last position trim - if (k == 1) { // for just one merged element, return singleton top + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top var a_ = SingletonPredictionContextcreate(mergedParents[0], mergedReturnStates[0]) - if (mergeCache != nil) { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), a_) } return a_ @@ -695,21 +694,21 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * // if we created same array as a or b, return that instead // TODO: track whether this is possible above during merge sort for speed - if (M == a) { - if (mergeCache != nil) { + if M == a { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), a) } return a } - if (M == b) { - if (mergeCache != nil) { + if M == b { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), b) } return b } combineCommonParents(mergedParents) - if (mergeCache != nil) { + if mergeCache != nil { mergeCache.set(a.hashString(), b.hashString(), M) } return M @@ -805,10 +804,3 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic // return nodes // } //} - - - - - - - diff --git a/runtime/Go/src/antlr4/PredictionMode.go b/runtime/Go/src/antlr4/PredictionMode.go index 807b49d7f..2140d8896 100644 --- a/runtime/Go/src/antlr4/PredictionMode.go +++ b/runtime/Go/src/antlr4/PredictionMode.go @@ -1,7 +1,8 @@ package antlr4 + import ( - "strings" - "strconv" + "strconv" + "strings" ) // @@ -69,7 +70,6 @@ const ( // behavior for syntactically-incorrect inputs.

// PredictionModeLL_EXACT_AMBIG_DETECTION = 2 - ) // @@ -164,37 +164,37 @@ const ( // the configurations to strip out all of the predicates so that a standard // {@link ATNConfigSet} will merge everything ignoring predicates.

// -func PredictionModehasSLLConflictTerminatingPrediction( mode int, configs *ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to match additional input so we terminate prediction. - // - if (PredictionModeallConfigsInRuleStopStates(configs)) { - return true - } - // pure SLL mode parsing - if (mode == PredictionModeSLL) { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if (configs.hasSemanticContext) { - // dup configs, tossing out semantic predicates - var dup = NewATNConfigSet(false) - for i:= 0; i< len(configs.configs); i++ { - var c = configs.configs[i] +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + var dup = NewATNConfigSet(false) + for i := 0; i < len(configs.configs); i++ { + var c = configs.configs[i] // NewATNConfig({semanticContext:}, c) - c = NewATNConfig2(c, SemanticContextNONE) - dup.add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - var altsets = PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) + c = NewATNConfig2(c, SemanticContextNONE) + dup.add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + var altsets = PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) } // Checks if any configuration in {@code configs} is in a @@ -206,13 +206,13 @@ func PredictionModehasSLLConflictTerminatingPrediction( mode int, configs *ATNCo // @return {@code true} if any configuration in {@code configs} is in a // {@link RuleStopState}, otherwise {@code false} func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { - for i:= 0; i< len(configs.configs); i++ { + for i := 0; i < len(configs.configs); i++ { var c = configs.configs[i] if _, ok := c.getState().(*RuleStopState); ok { - return true - } + return true + } } - return false + return false } // Checks if all configurations in {@code configs} are in a @@ -225,14 +225,14 @@ func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { // {@link RuleStopState}, otherwise {@code false} func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { - for i:= 0; i < len(configs.configs); i++ { + for i := 0; i < len(configs.configs); i++ { var c = configs.configs[i] - if _, ok := c.getState().(*RuleStopState); !ok { - return false - } + if _, ok := c.getState().(*RuleStopState); !ok { + return false + } } - return true + return true } // @@ -377,7 +377,7 @@ func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { // {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

// func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) + return PredictionModegetSingleViableAlt(altsets) } // @@ -389,8 +389,9 @@ func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { // {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} // func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) + return !PredictionModehasNonConflictingAltSet(altsets) } + // // Determines if any single alternative subset in {@code altsets} contains // exactly one alternative. @@ -400,13 +401,13 @@ func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { // {@link BitSet//cardinality cardinality} 1, otherwise {@code false} // func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i:=0; i1) { - return true - } + if alts.length() > 1 { + return true + } } - return false + return false } // @@ -435,18 +436,18 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { // others, otherwise {@code false} // func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet = nil + var first *BitSet = nil - for i:=0; i // func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { - var configToAlts = make(map[string]*BitSet) + var configToAlts = make(map[string]*BitSet) - for i :=0; i < len(configs.configs); i++ { + for i := 0; i < len(configs.configs); i++ { var c = configs.configs[i] - var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString() - var alts = configToAlts[key] - if (alts != nil) { - alts = NewBitSet() - configToAlts[key] = alts - } - alts.add(c.getAlt()) + var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString() + var alts = configToAlts[key] + if alts != nil { + alts = NewBitSet() + configToAlts[key] = alts + } + alts.add(c.getAlt()) } var values = make([]*BitSet, 0) - for k,_ := range configToAlts { - if( strings.Index( k, "key_") != 0) { + for k, _ := range configToAlts { + if strings.Index(k, "key_") != 0 { continue } values = append(values, configToAlts[k]) } - return values + return values } // @@ -523,41 +524,40 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { // // func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict { - var m = NewAltDict() + var m = NewAltDict() for _, c := range configs.configs { - var alts = m.get(c.getState().toString()) - if (alts == nil) { - alts = NewBitSet() - m.put(c.getState().toString(), alts) - } - alts.(*BitSet).add(c.getAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt (configs *ATNConfigSet) bool { - var values = PredictionModegetStateToAltMap(configs).values() - for i:=0; iUsed for XPath and tree pattern compilation.

// func (this *Recognizer) getRuleIndexMap() map[string]int { - panic("Method not defined!") -// var ruleNames = this.getRuleNames() -// if (ruleNames==nil) { -// panic("The current recognizer does not provide a list of rule names.") -// } -// -// var result = ruleIndexMapCache[ruleNames] -// if(result==nil) { -// result = ruleNames.reduce(function(o, k, i) { o[k] = i }) -// ruleIndexMapCache[ruleNames] = result -// } -// return result + panic("Method not defined!") + // var ruleNames = this.getRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result } func (this *Recognizer) getTokenType(tokenName string) int { - panic("Method not defined!") -// var ttype = this.getTokenTypeMap()[tokenName] -// if (ttype !=nil) { -// return ttype -// } else { -// return TokenInvalidType -// } + panic("Method not defined!") + // var ttype = this.getTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } } //func (this *Recognizer) getTokenTypeMap() map[string]int { @@ -143,12 +141,11 @@ func (this *Recognizer) getTokenType(tokenName string) int { // What is the error header, normally line/character position information?// func (this *Recognizer) getErrorHeader(e IRecognitionException) string { - var line = e.getOffendingToken().line - var column = e.getOffendingToken().column - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + var line = e.getOffendingToken().line + var column = e.getOffendingToken().column + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) } - // How should a token be displayed in an error message? The default // is to display just the text, but during development you might // want to have a lot of information spit out. Override in that case @@ -163,34 +160,34 @@ func (this *Recognizer) getErrorHeader(e IRecognitionException) string { // {@link DefaultErrorStrategy//getTokenErrorDisplay}. // func (this *Recognizer) getTokenErrorDisplay(t *Token) string { - if (t==nil) { - return "" - } - var s = t.text() - if s=="" { - if (t.tokenType==TokenEOF) { - s = "" - } else { - s = "<" + strconv.Itoa(t.tokenType) + ">" - } - } - s = strings.Replace(s,"\t","\\t", -1) - s = strings.Replace(s,"\n","\\n", -1) - s = strings.Replace(s,"\r","\\r", -1) + if t == nil { + return "" + } + var s = t.text() + if s == "" { + if t.tokenType == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.tokenType) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" + return "'" + s + "'" } func (this *Recognizer) getErrorListenerDispatch() IErrorListener { - return NewProxyErrorListener(this._listeners) + return NewProxyErrorListener(this._listeners) } // subclass needs to override these if there are sempreds or actions // that the ATN interp needs to execute func (this *Recognizer) sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool { - return true + return true } func (this *Recognizer) precpred(localctx IRuleContext, precedence int) bool { - return true -} \ No newline at end of file + return true +} diff --git a/runtime/Go/src/antlr4/RuleContext.go b/runtime/Go/src/antlr4/RuleContext.go index 1b4a565ed..93e6c811a 100644 --- a/runtime/Go/src/antlr4/RuleContext.go +++ b/runtime/Go/src/antlr4/RuleContext.go @@ -28,23 +28,24 @@ import ( type IRuleContext interface { RuleNode - getInvokingState()int + getInvokingState() int setInvokingState(int) - getRuleIndex()int + getRuleIndex() int + isEmpty() bool toString([]string, IRuleContext) string } type RuleContext struct { - parentCtx IRuleContext + parentCtx IRuleContext invokingState int - ruleIndex int - children []Tree + RuleIndex int + children []Tree } -func NewRuleContext(parent IRuleContext, invokingState int) *RuleContext { +func NewRuleContext(parent IRuleContext, invokingState int) *RuleContext { rn := new(RuleContext) @@ -61,22 +62,21 @@ func (rn *RuleContext) InitRuleContext(parent IRuleContext, invokingState int) { // What state invoked the rule associated with this context? // The "return address" is the followState of invokingState // If parent is nil, this should be -1. - if (parent == nil){ + if parent == nil { rn.invokingState = -1 } else { rn.invokingState = invokingState } } -func (this *RuleContext) setChildren(elems []Tree){ +func (this *RuleContext) setChildren(elems []Tree) { this.children = elems } -func (this *RuleContext) setParent(v Tree){ +func (this *RuleContext) setParent(v Tree) { this.parentCtx = v.(IRuleContext) } - func (this *RuleContext) getInvokingState() int { return this.getInvokingState() } @@ -85,8 +85,8 @@ func (this *RuleContext) setInvokingState(t int) { this.invokingState = t } -func (this *RuleContext) getRuleIndex() int{ - return this.ruleIndex +func (this *RuleContext) getRuleIndex() int { + return this.RuleIndex } func (this *RuleContext) getChildren() []Tree { @@ -96,7 +96,7 @@ func (this *RuleContext) getChildren() []Tree { func (this *RuleContext) depth() int { var n = 0 var p Tree = this - for (p != nil) { + for p != nil { p = p.getParent() n += 1 } @@ -131,7 +131,7 @@ func (this *RuleContext) getPayload() interface{} { // method. // func (this *RuleContext) getText() string { - if (this.getChildCount() == 0) { + if this.getChildCount() == 0 { return "" } else { var s string @@ -173,22 +173,22 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string var p IRuleContext = this var s = "[" - for (p != nil && p != stop) { - if (ruleNames == nil) { - if (!p.isEmpty()) { + for p != nil && p != stop { + if ruleNames == nil { + if !p.isEmpty() { s += strconv.Itoa(p.getInvokingState()) } } else { var ri = p.getRuleIndex() var ruleName string - if (ri >= 0 && ri < len(ruleNames)) { + if ri >= 0 && ri < len(ruleNames) { ruleName = ruleNames[ri] } else { ruleName = strconv.Itoa(ri) } s += ruleName } - if (p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty())) { + if p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty()) { s += " " } p = p.getParent().(IRuleContext) @@ -196,4 +196,3 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string s += "]" return s } - diff --git a/runtime/Go/src/antlr4/SemanticContext.go b/runtime/Go/src/antlr4/SemanticContext.go index 52641604a..1934c7341 100644 --- a/runtime/Go/src/antlr4/SemanticContext.go +++ b/runtime/Go/src/antlr4/SemanticContext.go @@ -1,9 +1,8 @@ package antlr4 import ( - - "strconv" "fmt" + "strconv" ) // A tree structure used to record the semantic context in which @@ -22,14 +21,14 @@ type SemanticContext interface { } func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if (a == nil || a == SemanticContextNONE) { + if a == nil || a == SemanticContextNONE { return b } - if (b == nil || b == SemanticContextNONE) { + if b == nil || b == SemanticContextNONE { return a } var result = NewAND(a, b) - if ( len(result.opnds) == 1) { + if len(result.opnds) == 1 { return result.opnds[0] } else { return result @@ -37,27 +36,26 @@ func SemanticContextandContext(a, b SemanticContext) SemanticContext { } func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if (a == nil) { + if a == nil { return b } - if (b == nil) { + if b == nil { return a } - if (a == SemanticContextNONE || b == SemanticContextNONE) { + if a == SemanticContextNONE || b == SemanticContextNONE { return SemanticContextNONE } var result = NewOR(a, b) - if ( len(result.opnds) == 1) { + if len(result.opnds) == 1 { return result.opnds[0] } else { return result } } - type Predicate struct { - ruleIndex int - predIndex int + ruleIndex int + predIndex int isCtxDependent bool } @@ -73,7 +71,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { //The default {@link SemanticContext}, which is semantically equivalent to //a predicate of the form {@code {true}?}. -var SemanticContextNONE SemanticContext = NewPredicate(-1,-1,false) +var SemanticContextNONE SemanticContext = NewPredicate(-1, -1, false) func (this *Predicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext { return this @@ -83,7 +81,7 @@ func (this *Predicate) evaluate(parser IRecognizer, outerContext IRuleContext) b var localctx IRuleContext = nil - if (this.isCtxDependent){ + if this.isCtxDependent { localctx = outerContext } @@ -95,14 +93,14 @@ func (this *Predicate) hashString() string { } func (this *Predicate) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*Predicate); !ok { return false } else { return this.ruleIndex == other.(*Predicate).ruleIndex && - this.predIndex == other.(*Predicate).predIndex && - this.isCtxDependent == other.(*Predicate).isCtxDependent + this.predIndex == other.(*Predicate).predIndex && + this.isCtxDependent == other.(*Predicate).isCtxDependent } } @@ -127,7 +125,7 @@ func (this *PrecedencePredicate) evaluate(parser IRecognizer, outerContext IRule } func (this *PrecedencePredicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext { - if (parser.precpred(outerContext, this.precedence)) { + if parser.precpred(outerContext, this.precedence) { return SemanticContextNONE } else { return nil @@ -143,7 +141,7 @@ func (this *PrecedencePredicate) hashString() string { } func (this *PrecedencePredicate) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*PrecedencePredicate); !ok { return false @@ -153,14 +151,13 @@ func (this *PrecedencePredicate) equals(other interface{}) bool { } func (this *PrecedencePredicate) toString() string { - return "{"+strconv.Itoa(this.precedence)+">=prec}?" + return "{" + strconv.Itoa(this.precedence) + ">=prec}?" } - func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { var result = make([]*PrecedencePredicate, 0) - for _,v := range set.values() { + for _, v := range set.values() { if c2, ok := v.(*PrecedencePredicate); ok { result = append(result, c2) } @@ -172,14 +169,13 @@ func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredic // A semantic context which is true whenever none of the contained contexts // is false.` - type AND struct { opnds []SemanticContext } func NewAND(a, b SemanticContext) *AND { - var operands = NewSet(nil,nil) + var operands = NewSet(nil, nil) if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.add(o) @@ -196,12 +192,12 @@ func NewAND(a, b SemanticContext) *AND { operands.add(b) } var precedencePredicates = PrecedencePredicatefilterPrecedencePredicates(operands) - if ( len(precedencePredicates) > 0) { + if len(precedencePredicates) > 0 { // interested in the transition with the lowest precedence var reduced *PrecedencePredicate = nil - for _,p := range precedencePredicates { - if(reduced==nil || p.precedence < reduced.precedence) { + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { reduced = p } } @@ -209,7 +205,6 @@ func NewAND(a, b SemanticContext) *AND { operands.add(reduced) } - vs := operands.values() opnds := make([]SemanticContext, len(vs)) for i, v := range vs { @@ -223,7 +218,7 @@ func NewAND(a, b SemanticContext) *AND { } func (this *AND) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*AND); !ok { return false @@ -240,6 +235,7 @@ func (this *AND) equals(other interface{}) bool { func (this *AND) hashString() string { return fmt.Sprint(this.opnds) + "/AND" } + // // {@inheritDoc} // @@ -249,7 +245,7 @@ func (this *AND) hashString() string { // func (this *AND) evaluate(parser IRecognizer, outerContext IRuleContext) bool { for i := 0; i < len(this.opnds); i++ { - if (!this.opnds[i].evaluate(parser, outerContext)) { + if !this.opnds[i].evaluate(parser, outerContext) { return false } } @@ -264,29 +260,29 @@ func (this *AND) evalPrecedence(parser IRecognizer, outerContext IRuleContext) S var context = this.opnds[i] var evaluated = context.evalPrecedence(parser, outerContext) differs = differs || (evaluated != context) - if (evaluated == nil) { + if evaluated == nil { // The AND context is false if any element is false return nil - } else if (evaluated != SemanticContextNONE) { + } else if evaluated != SemanticContextNONE { // Reduce the result by skipping true elements - operands = append (operands, evaluated) + operands = append(operands, evaluated) } } - if (!differs) { + if !differs { return this } - if ( len(operands) == 0) { + if len(operands) == 0 { // all elements were true, so the AND context is true return SemanticContextNONE } var result SemanticContext = nil - for _,o := range operands { - if (result == nil){ + for _, o := range operands { + if result == nil { result = o - } else { + } else { result = SemanticContextandContext(result, o) } } @@ -297,11 +293,11 @@ func (this *AND) evalPrecedence(parser IRecognizer, outerContext IRuleContext) S func (this *AND) toString() string { var s = "" - for _,o := range this.opnds { + for _, o := range this.opnds { s += "&& " + o.toString() } - if (len(s) > 3){ + if len(s) > 3 { return s[0:3] } else { return s @@ -318,7 +314,7 @@ type OR struct { } func NewOR(a, b SemanticContext) *OR { - var operands = NewSet(nil,nil) + var operands = NewSet(nil, nil) if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.add(o) @@ -335,12 +331,12 @@ func NewOR(a, b SemanticContext) *OR { operands.add(b) } var precedencePredicates = PrecedencePredicatefilterPrecedencePredicates(operands) - if ( len(precedencePredicates) > 0) { + if len(precedencePredicates) > 0 { // interested in the transition with the lowest precedence var reduced *PrecedencePredicate = nil - for _,p := range precedencePredicates { - if(reduced==nil || p.precedence > reduced.precedence) { + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { reduced = p } } @@ -360,9 +356,8 @@ func NewOR(a, b SemanticContext) *OR { return this } - func (this *OR) equals(other interface{}) bool { - if (this == other) { + if this == other { return true } else if _, ok := other.(*OR); !ok { return false @@ -386,7 +381,7 @@ func (this *OR) hashString() string { // func (this *OR) evaluate(parser IRecognizer, outerContext IRuleContext) bool { for i := 0; i < len(this.opnds); i++ { - if (this.opnds[i].evaluate(parser, outerContext)) { + if this.opnds[i].evaluate(parser, outerContext) { return true } } @@ -400,28 +395,28 @@ func (this *OR) evalPrecedence(parser IRecognizer, outerContext IRuleContext) Se var context = this.opnds[i] var evaluated = context.evalPrecedence(parser, outerContext) differs = differs || (evaluated != context) - if (evaluated == SemanticContextNONE) { + if evaluated == SemanticContextNONE { // The OR context is true if any element is true return SemanticContextNONE - } else if (evaluated != nil) { + } else if evaluated != nil { // Reduce the result by skipping false elements operands = append(operands, evaluated) } } - if (!differs) { + if !differs { return this } - if (len(operands) == 0) { + if len(operands) == 0 { // all elements were false, so the OR context is false return nil } var result SemanticContext = nil - for _,o := range operands { - if (result == nil) { + for _, o := range operands { + if result == nil { result = o } else { - result = SemanticContextorContext(result, o); + result = SemanticContextorContext(result, o) } } @@ -431,17 +426,13 @@ func (this *OR) evalPrecedence(parser IRecognizer, outerContext IRuleContext) Se func (this *OR) toString() string { var s = "" - for _,o := range this.opnds { + for _, o := range this.opnds { s += "|| " + o.toString() } - if (len(s) > 3){ + if len(s) > 3 { return s[0:3] } else { return s } } - - - - diff --git a/runtime/Go/src/antlr4/Token.go b/runtime/Go/src/antlr4/Token.go index 5deee4584..4ef75efb7 100644 --- a/runtime/Go/src/antlr4/Token.go +++ b/runtime/Go/src/antlr4/Token.go @@ -1,13 +1,13 @@ package antlr4 import ( - "strings" "strconv" + "strings" ) type TokenSourceCharStreamPair struct { tokenSource TokenSource - charStream CharStream + charStream CharStream } // A token has properties: text, type, line, character position in the line @@ -15,16 +15,16 @@ type TokenSourceCharStreamPair struct { // we obtained this token. type Token struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - _text string // text of the token. - readOnly bool + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + _text string // text of the token. + readOnly bool } const ( @@ -58,7 +58,7 @@ const ( // should be obtained from the input along with the start and stop indexes // of the token. -func (this *Token) text() string{ +func (this *Token) text() string { return this._text } @@ -88,7 +88,7 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start t.start = start t.stop = stop t.tokenIndex = -1 - if (t.source.tokenSource != nil) { + if t.source.tokenSource != nil { t.line = source.tokenSource.getLine() t.column = source.tokenSource.getCharPositionInLine() } else { @@ -116,7 +116,7 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start // func (ct *CommonToken) clone() *CommonToken { var t = NewCommonToken(ct.source, ct.tokenType, ct.channel, ct.start, - ct.stop) + ct.stop) t.tokenIndex = ct.tokenIndex t.line = ct.line t.column = ct.column @@ -125,15 +125,15 @@ func (ct *CommonToken) clone() *CommonToken { } func (this *CommonToken) text() string { - if (this._text != "") { + if this._text != "" { return this._text } var input = this.getInputStream() - if (input == nil) { + if input == nil { return "" } var n = input.size() - if (this.start < n && this.stop < n) { + if this.start < n && this.stop < n { return input.getTextFromInterval(NewInterval(this.start, this.stop)) } else { return "" @@ -146,7 +146,7 @@ func (this *CommonToken) setText(text string) { func (this *CommonToken) toString() string { var txt = this.text() - if (txt != "") { + if txt != "" { txt = strings.Replace(txt, "\n", "", -1) txt = strings.Replace(txt, "\r", "", -1) txt = strings.Replace(txt, "\t", "", -1) @@ -154,17 +154,14 @@ func (this *CommonToken) toString() string { txt = "" } - var ch string; - if (this.channel > 0){ + var ch string + if this.channel > 0 { ch = ",channel=" + strconv.Itoa(this.channel) } else { ch = "" } return "[@" + strconv.Itoa(this.tokenIndex) + "," + strconv.Itoa(this.start) + ":" + strconv.Itoa(this.stop) + "='" + - txt + "',<" + strconv.Itoa(this.tokenType) + ">" + - ch + "," + strconv.Itoa(this.line) + ":" + strconv.Itoa(this.column) + "]" + txt + "',<" + strconv.Itoa(this.tokenType) + ">" + + ch + "," + strconv.Itoa(this.line) + ":" + strconv.Itoa(this.column) + "]" } - - - diff --git a/runtime/Go/src/antlr4/TokenSource.go b/runtime/Go/src/antlr4/TokenSource.go index 4688199df..7aad12513 100644 --- a/runtime/Go/src/antlr4/TokenSource.go +++ b/runtime/Go/src/antlr4/TokenSource.go @@ -1,7 +1,6 @@ package antlr4 type TokenSource interface { - nextToken() *Token skip() more() @@ -11,6 +10,4 @@ type TokenSource interface { getSourceName() string setTokenFactory(factory TokenFactory) getTokenFactory() TokenFactory - } - diff --git a/runtime/Go/src/antlr4/Transition.go b/runtime/Go/src/antlr4/Transition.go index 987d3e210..a12e42162 100644 --- a/runtime/Go/src/antlr4/Transition.go +++ b/runtime/Go/src/antlr4/Transition.go @@ -1,4 +1,5 @@ package antlr4 + import ( "fmt" "strconv" @@ -19,19 +20,19 @@ type ITransition interface { getIsEpsilon() bool getLabel() *IntervalSet getSerializationType() int - matches( int, int, int ) bool + matches(int, int, int) bool } type Transition struct { - target IATNState - isEpsilon bool - label *IntervalSet + target IATNState + isEpsilon bool + label *IntervalSet serializationType int } -func NewTransition (target IATNState) *Transition { +func NewTransition(target IATNState) *Transition { - if (target==nil || target==nil) { + if target == nil || target == nil { panic("target cannot be nil.") } @@ -68,24 +69,23 @@ func (t *Transition) getSerializationType() int { return t.serializationType } -func (t *Transition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool { +func (t *Transition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { panic("Not implemented") } -const( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOT_SET = 8 - TransitionWILDCARD = 9 +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOT_SET = 8 + TransitionWILDCARD = 9 TransitionPRECEDENCE = 10 ) - var TransitionserializationNames = []string{ "INVALID", "EPSILON", @@ -124,18 +124,17 @@ var TransitionserializationNames = []string{ // TransitionPRECEDENCE //} - // TODO: make all transitions sets? no, should remove set edges type AtomTransition struct { *Transition label_ int - label *IntervalSet + label *IntervalSet } -func NewAtomTransition ( target IATNState, label int ) *AtomTransition { +func NewAtomTransition(target IATNState, label int) *AtomTransition { t := new(AtomTransition) - t.InitTransition( target ) + t.InitTransition(target) t.label_ = label // The token type or character value or, signifies special label. t.label = t.makeLabel() @@ -150,7 +149,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet { return s } -func (t *AtomTransition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool { +func (t *AtomTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return t.label_ == symbol } @@ -161,15 +160,14 @@ func (t *AtomTransition) toString() string { type RuleTransition struct { *Transition - followState IATNState + followState IATNState ruleIndex, precedence int - } -func NewRuleTransition ( ruleStart IATNState, ruleIndex, precedence int, followState IATNState ) *RuleTransition { +func NewRuleTransition(ruleStart IATNState, ruleIndex, precedence int, followState IATNState) *RuleTransition { t := new(RuleTransition) - t.InitTransition( ruleStart ) + t.InitTransition(ruleStart) t.ruleIndex = ruleIndex t.precedence = precedence @@ -180,23 +178,21 @@ func NewRuleTransition ( ruleStart IATNState, ruleIndex, precedence int, followS return t } - -func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } - type EpsilonTransition struct { *Transition - isEpsilon bool + isEpsilon bool outermostPrecedenceReturn int } -func NewEpsilonTransition ( target IATNState, outermostPrecedenceReturn int ) *EpsilonTransition { +func NewEpsilonTransition(target IATNState, outermostPrecedenceReturn int) *EpsilonTransition { t := new(EpsilonTransition) - t.InitTransition( target ) + t.InitTransition(target) t.serializationType = TransitionEPSILON t.isEpsilon = true @@ -204,8 +200,7 @@ func NewEpsilonTransition ( target IATNState, outermostPrecedenceReturn int ) *E return t } - -func (t *EpsilonTransition) matches( symbol, minVocabSymbol, maxVocabSymbol int ) bool { +func (t *EpsilonTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -219,10 +214,10 @@ type RangeTransition struct { start, stop int } -func NewRangeTransition ( target IATNState, start, stop int ) *RangeTransition { +func NewRangeTransition(target IATNState, start, stop int) *RangeTransition { t := new(RangeTransition) - t.InitTransition( target ) + t.InitTransition(target) t.serializationType = TransitionRANGE t.start = start @@ -231,14 +226,13 @@ func NewRangeTransition ( target IATNState, start, stop int ) *RangeTransition { return t } - func (t *RangeTransition) makeLabel() *IntervalSet { var s = NewIntervalSet() s.addRange(t.start, t.stop) return s } -func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return symbol >= t.start && symbol <= t.stop } @@ -250,10 +244,10 @@ type AbstractPredicateTransition struct { *Transition } -func NewAbstractPredicateTransition ( target IATNState ) *AbstractPredicateTransition { +func NewAbstractPredicateTransition(target IATNState) *AbstractPredicateTransition { t := new(AbstractPredicateTransition) - t.InitTransition( target ) + t.InitTransition(target) return t } @@ -261,11 +255,11 @@ func NewAbstractPredicateTransition ( target IATNState ) *AbstractPredicateTrans type PredicateTransition struct { *Transition - isCtxDependent bool + isCtxDependent bool ruleIndex, predIndex int } -func NewPredicateTransition ( target IATNState, ruleIndex, predIndex int, isCtxDependent bool ) *PredicateTransition { +func NewPredicateTransition(target IATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { t := new(PredicateTransition) t.InitTransition(target) @@ -278,8 +272,7 @@ func NewPredicateTransition ( target IATNState, ruleIndex, predIndex int, isCtxD return t } - -func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -294,14 +287,14 @@ func (t *PredicateTransition) toString() string { type ActionTransition struct { *Transition - isCtxDependent bool + isCtxDependent bool ruleIndex, actionIndex, predIndex int } -func NewActionTransition ( target IATNState, ruleIndex, actionIndex int, isCtxDependent bool ) *ActionTransition { +func NewActionTransition(target IATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { t := new(ActionTransition) - t.InitTransition( target ) + t.InitTransition(target) t.serializationType = TransitionACTION t.ruleIndex = ruleIndex @@ -311,9 +304,7 @@ func NewActionTransition ( target IATNState, ruleIndex, actionIndex int, isCtxDe return t } - - -func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -321,24 +312,23 @@ func (t *ActionTransition) toString() string { return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) } - type SetTransition struct { *Transition } -func NewSetTransition ( target IATNState, set *IntervalSet ) *SetTransition { +func NewSetTransition(target IATNState, set *IntervalSet) *SetTransition { t := new(SetTransition) - t.InitTransition( target ) - t.InitSetTransition( set ) + t.InitTransition(target) + t.InitSetTransition(set) return t } -func (t *SetTransition) InitSetTransition( set *IntervalSet ) { +func (t *SetTransition) InitSetTransition(set *IntervalSet) { t.serializationType = TransitionSET - if (set !=nil && set !=nil) { + if set != nil && set != nil { t.label = set } else { t.label = NewIntervalSet() @@ -347,35 +337,31 @@ func (t *SetTransition) InitSetTransition( set *IntervalSet ) { } - -func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return t.label.contains(symbol) } - func (t *SetTransition) toString() string { return t.label.toString() } - type NotSetTransition struct { SetTransition } -func NewNotSetTransition ( target IATNState, set *IntervalSet) *NotSetTransition { +func NewNotSetTransition(target IATNState, set *IntervalSet) *NotSetTransition { t := new(NotSetTransition) - t.InitTransition( target ) - t.InitSetTransition( set ) + t.InitTransition(target) + t.InitSetTransition(set) t.serializationType = TransitionNOT_SET return t } - -func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains( symbol) +func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains(symbol) } func (t *NotSetTransition) toString() string { @@ -386,16 +372,16 @@ type WildcardTransition struct { *Transition } -func NewWildcardTransition ( target IATNState ) *WildcardTransition { +func NewWildcardTransition(target IATNState) *WildcardTransition { t := new(WildcardTransition) - t.InitTransition( target ) + t.InitTransition(target) t.serializationType = TransitionWILDCARD return t } -func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return symbol >= minVocabSymbol && symbol <= maxVocabSymbol } @@ -409,10 +395,10 @@ type PrecedencePredicateTransition struct { precedence int } -func NewPrecedencePredicateTransition ( target IATNState, precedence int ) *PrecedencePredicateTransition { +func NewPrecedencePredicateTransition(target IATNState, precedence int) *PrecedencePredicateTransition { t := new(PrecedencePredicateTransition) - t.InitTransition( target ) + t.InitTransition(target) t.serializationType = TransitionPRECEDENCE t.precedence = precedence @@ -421,8 +407,7 @@ func NewPrecedencePredicateTransition ( target IATNState, precedence int ) *Prec return t } - -func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { return false } @@ -433,15 +418,3 @@ func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { func (t *PrecedencePredicateTransition) toString() string { return fmt.Sprint(t.precedence) + " >= _p" } - - - - - - - - - - - - diff --git a/runtime/Go/src/antlr4/Tree.go b/runtime/Go/src/antlr4/Tree.go index e2341438f..9e61eb8cd 100644 --- a/runtime/Go/src/antlr4/Tree.go +++ b/runtime/Go/src/antlr4/Tree.go @@ -1,6 +1,5 @@ package antlr4 - // The basic notion of a tree has a parent, a payload, and a list of children. // It is the most abstract interface for all the trees used by ANTLR. /// @@ -15,7 +14,7 @@ type Tree interface { getChildCount() int getChildren() []Tree setChildren([]Tree) -// toStringTree() string + // toStringTree() string } type SyntaxTree interface { @@ -27,10 +26,10 @@ type SyntaxTree interface { type ParseTree interface { SyntaxTree -// T accept(ParseTreeVisitor visitor); + // T accept(ParseTreeVisitor visitor); accept(visitor ParseTreeVisitor) interface{} getText() string -// toStringTree([]string, IRecognizer) string + // toStringTree([]string, IRecognizer) string } type RuleNode interface { @@ -116,7 +115,6 @@ func (this *TerminalNodeImpl) setChildren(t []Tree) { panic("Cannot set children on terminal node") } - func (this *TerminalNodeImpl) getSymbol() *Token { return this.symbol } @@ -134,7 +132,7 @@ func (this *TerminalNodeImpl) getPayload() interface{} { } func (this *TerminalNodeImpl) getSourceInterval() *Interval { - if (this.symbol == nil) { + if this.symbol == nil { return TreeINVALID_INTERVAL } var tokenIndex = this.symbol.tokenIndex @@ -145,7 +143,7 @@ func (this *TerminalNodeImpl) getChildCount() int { return 0 } -func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor ) interface{} { +func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor) interface{} { return visitor.visitTerminal(this) } @@ -154,14 +152,13 @@ func (this *TerminalNodeImpl) getText() string { } func (this *TerminalNodeImpl) toString() string { - if (this.symbol.tokenType == TokenEOF) { + if this.symbol.tokenType == TokenEOF { return "" } else { return this.symbol.text() } } - // Represents a token that was consumed during resynchronization // rather than during a valid match operation. For example, // we will create this kind of a node during single token insertion @@ -182,13 +179,11 @@ func (this *ErrorNodeImpl) isErrorNode() bool { return true } -func (this *ErrorNodeImpl) accept( visitor ParseTreeVisitor ) interface{} { +func (this *ErrorNodeImpl) accept(visitor ParseTreeVisitor) interface{} { return visitor.visitErrorNode(this) } - type ParseTreeWalker struct { - } func NewParseTreeWalker() *ParseTreeWalker { @@ -210,6 +205,7 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) { this.exitRule(listener, t.(RuleNode)) } } + // // The discovery of a rule node, involves sending two events: the generic // {@link ParseTreeListener//enterEveryRule} and a diff --git a/runtime/Go/src/antlr4/Trees.go b/runtime/Go/src/antlr4/Trees.go index 8bd008e67..67a78587c 100644 --- a/runtime/Go/src/antlr4/Trees.go +++ b/runtime/Go/src/antlr4/Trees.go @@ -1,4 +1,5 @@ package antlr4 + import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ @@ -8,62 +9,61 @@ import "fmt" // parse trees and extract data appropriately. func TreestoStringTree(tree Tree, ruleNames []string, recog IRecognizer) string { - if(recog!=nil) { - ruleNames = recog.getRuleNames() - } + if recog != nil { + ruleNames = recog.getRuleNames() + } - var s = TreesgetNodeText(tree, ruleNames, nil) + var s = TreesgetNodeText(tree, ruleNames, nil) - s = EscapeWhitespace(s, false) - var c = tree.getChildCount() - if(c==0) { - return s - } - var res = "(" + s + " " - if(c>0) { - s = TreestoStringTree(tree.getChild(0), ruleNames, nil) - res += s - } - for i :=1; i 0 { + s = TreestoStringTree(tree.getChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreestoStringTree(tree.getChild(i), ruleNames, nil) + res += (" " + s) + } + res += ")" + return res } func TreesgetNodeText(t Tree, ruleNames []string, recog *Parser) string { - if(recog!=nil) { - ruleNames = recog.getRuleNames() - } + if recog != nil { + ruleNames = recog.getRuleNames() + } - if(ruleNames!=nil) { - if t2, ok := t.(RuleNode); ok { - return ruleNames[t2.getRuleContext().getRuleIndex()] - } else if t2, ok := t.(ErrorNode); ok { - return fmt.Sprint(t2) - } else if t2, ok := t.(TerminalNode); ok { - if(t2.getSymbol()!=nil) { - return t2.getSymbol().text() - } - } - } + if ruleNames != nil { + if t2, ok := t.(RuleNode); ok { + return ruleNames[t2.getRuleContext().getRuleIndex()] + } else if t2, ok := t.(ErrorNode); ok { + return fmt.Sprint(t2) + } else if t2, ok := t.(TerminalNode); ok { + if t2.getSymbol() != nil { + return t2.getSymbol().text() + } + } + } - // no recog for rule names - var payload = t.getPayload() - if p2, ok := payload.(*Token); ok { - return p2.text() - } + // no recog for rule names + var payload = t.getPayload() + if p2, ok := payload.(*Token); ok { + return p2.text() + } - return fmt.Sprint(t.getPayload()) + return fmt.Sprint(t.getPayload()) } - // Return ordered list of all children of this node func TreesgetChildren(t Tree) []Tree { var list = make([]Tree, 0) - for i := 0;i< t.getChildCount();i++ { + for i := 0; i < t.getChildCount(); i++ { list = append(list, t.getChild(i)) } return list @@ -73,21 +73,21 @@ func TreesgetChildren(t Tree) []Tree { // list is the root and the last is the parent of this node. // func TreesgetAncestors(t Tree) []Tree { - var ancestors = make([]Tree, 0) - t = t.getParent() - for(t!=nil) { - f := []Tree { t } + var ancestors = make([]Tree, 0) + t = t.getParent() + for t != nil { + f := []Tree{t} ancestors = append(f, ancestors...) - t = t.getParent() - } - return ancestors -} - -func TreesfindAllTokenNodes(t ParseTree, ttype int) []ParseTree { - return TreesfindAllNodes(t, ttype, true) + t = t.getParent() + } + return ancestors } -func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { +func TreesfindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { return TreesfindAllNodes(t, ruleIndex, false) } @@ -104,26 +104,24 @@ func Trees_findAllNodes(t ParseTree, index int, findTokens bool, nodes []ParseTr t3, ok2 := t.(IParserRuleContext) if findTokens && ok { - if(t2.getSymbol().tokenType==index) { + if t2.getSymbol().tokenType == index { nodes = append(nodes, t2) } - } else if(!findTokens && ok2) { - if(t3.getRuleIndex()==index) { + } else if !findTokens && ok2 { + if t3.getRuleIndex() == index { nodes = append(nodes, t3) } } // check children - for i := 0;i b { return a } @@ -31,7 +31,7 @@ type IntStack []int var ErrEmptyStack = errors.New("Stack is empty") func (s *IntStack) Pop() (int, error) { - l := len(*s)-1 + l := len(*s) - 1 if l < 0 { return 0, ErrEmptyStack } @@ -44,8 +44,8 @@ func (s *IntStack) Push(e int) { *s = append(*s, e) } -func arrayToString(a []interface{}) string{ - return fmt.Sprint( a ) +func arrayToString(a []interface{}) string { + return fmt.Sprint(a) } func hashCode(s string) string { @@ -55,24 +55,24 @@ func hashCode(s string) string { } type Set struct { - data map[string][]interface{} - hashFunction func(interface{}) string - equalsFunction func(interface{},interface{}) bool + data map[string][]interface{} + hashFunction func(interface{}) string + equalsFunction func(interface{}, interface{}) bool } -func NewSet(hashFunction func(interface{}) string, equalsFunction func(interface{},interface{}) bool) *Set { +func NewSet(hashFunction func(interface{}) string, equalsFunction func(interface{}, interface{}) bool) *Set { s := new(Set) - s.data = make( map[string][]interface{}) + s.data = make(map[string][]interface{}) - if (hashFunction == nil){ + if hashFunction == nil { s.hashFunction = standardHashFunction } else { s.hashFunction = hashFunction } - if (equalsFunction == nil){ + if equalsFunction == nil { s.equalsFunction = standardEqualsFunction } else { s.equalsFunction = equalsFunction @@ -97,7 +97,7 @@ func getBytes(key interface{}) ([]byte, error) { func standardHashFunction(a interface{}) string { h := fnv.New32a() - v,_ := getBytes(a) + v, _ := getBytes(a) h.Write(v) return fmt.Sprint(h.Sum32()) } @@ -115,16 +115,16 @@ func (this *Set) add(value interface{}) interface{} { if this.data[key] != nil { for i := 0; i < len(values); i++ { - if(this.equalsFunction(value, values[i])) { + if this.equalsFunction(value, values[i]) { return values[i] } } - this.data[key] = append( this.data[key], value ) + this.data[key] = append(this.data[key], value) return value } - this.data[key] = []interface{}{ value } + this.data[key] = []interface{}{value} return value } @@ -138,7 +138,7 @@ func (this *Set) contains(value interface{}) bool { if this.data[key] != nil { for i := 0; i < len(values); i++ { - if(this.equalsFunction(value, values[i])) { + if this.equalsFunction(value, values[i]) { return true } } @@ -149,7 +149,7 @@ func (this *Set) contains(value interface{}) bool { func (this *Set) values() []interface{} { var l = make([]interface{}, len(this.data)) - for key,_ := range this.data { + for key, _ := range this.data { if strings.Index(key, "hash_") == 0 { l = append(l, this.data[key]...) } @@ -161,7 +161,6 @@ func (this *Set) toString() string { return fmt.Sprint(this.data) } - type BitSet struct { data map[int]bool } @@ -181,7 +180,7 @@ func (this *BitSet) clear(index int) { } func (this *BitSet) or(set *BitSet) { - for k,_ := range set.data { + for k, _ := range set.data { this.add(k) } } @@ -197,7 +196,7 @@ func (this *BitSet) contains(value int) bool { func (this *BitSet) values() []int { ks := make([]int, len(this.data)) i := 0 - for k,_ := range this.data { + for k, _ := range this.data { ks[i] = k i++ } @@ -207,7 +206,7 @@ func (this *BitSet) values() []int { func (this *BitSet) minValue() int { min := 0 - for k,_ := range this.data { + for k, _ := range this.data { if k < min { min = k } @@ -218,15 +217,15 @@ func (this *BitSet) minValue() int { func (this *BitSet) equals(other interface{}) bool { otherBitSet, ok := other.(*BitSet) - if !ok { + if !ok { return false } - if len(this.data) != len(otherBitSet.data){ + if len(this.data) != len(otherBitSet.data) { return false } - for k,v := range this.data { + for k, v := range this.data { if otherBitSet.data[k] != v { return false } @@ -243,7 +242,6 @@ func (this *BitSet) toString() string { return fmt.Sprint(this.data) } - type AltDict struct { data map[string]interface{} } @@ -267,15 +265,13 @@ func (this *AltDict) put(key string, value interface{}) { func (this *AltDict) values() []interface{} { vs := make([]interface{}, len(this.data)) i := 0 - for _,v := range this.data { + for _, v := range this.data { vs[i] = v i++ } return vs } - - type DoubleDict struct { data map[string]map[string]interface{} } @@ -289,7 +285,7 @@ func NewDoubleDict() *DoubleDict { func (this *DoubleDict) get(a string, b string) interface{} { var d = this.data[a] - if (d == nil){ + if d == nil { return nil } @@ -299,7 +295,7 @@ func (this *DoubleDict) get(a string, b string) interface{} { func (this *DoubleDict) set(a, b string, o interface{}) { var d = this.data[a] - if(d==nil) { + if d == nil { d = make(map[string]interface{}) this.data[a] = d } @@ -309,11 +305,11 @@ func (this *DoubleDict) set(a, b string, o interface{}) { func EscapeWhitespace(s string, escapeSpaces bool) string { - s = strings.Replace(s,"\t","\\t", -1) - s = strings.Replace(s,"\n","\\n", -1) - s = strings.Replace(s,"\r","\\r", -1) - if(escapeSpaces) { - s = strings.Replace(s," ","\u00B7", -1) + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) } return s } @@ -325,17 +321,10 @@ func TitleCase(str string) string { panic("Not implemented") -// re := regexp.MustCompile("\w\S*") -// return re.ReplaceAllStringFunc(str, func(s string) { -// return strings.ToUpper(s[0:1]) + s[1:2] -// }) + // re := regexp.MustCompile("\w\S*") + // return re.ReplaceAllStringFunc(str, func(s string) { + // return strings.ToUpper(s[0:1]) + s[1:2] + // }) return "" } - - - - - - - diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg index e55868b59..5ec1db678 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Go/Go.stg @@ -15,10 +15,7 @@ ParserFile(file, parser, namedActions) ::= << package parser // -import( - "antlr4" - "strings" -) +import "antlr4" @@ -31,11 +28,7 @@ ListenerFile(file, header) ::= << package parser // -// TODO: this should probably be an interface - -import( - "antlr4" -) +import "antlr4" // This class defines a complete listener for a parse tree produced by . @@ -45,11 +38,11 @@ type Listener struct { #. -func (l *Listener) enter(ctx *ParserRuleContext) { +func (l *Listener) enter(ctx antlr4.IParserRuleContext) { \} // Exit a parse tree produced by #. -func (l *Listener) exit(ctx *ParserRuleContext) { +func (l *Listener) exit(ctx antlr4.IParserRuleContext) { \} }; separator="\n"> @@ -61,21 +54,19 @@ VisitorFile(file, header) ::= << package parser // -import( - "antlr4" -) +import "antlr4"
// This class defines a complete generic visitor for a parse tree produced by . type Visitor struct { - ParseTreeVisitor + } #. -func (l Visitor) visit(ctx *ParserRuleContext) { +func (l Visitor) visit(ctx IParserRuleContext) { \} }; separator="\n"> @@ -91,38 +82,36 @@ var = require('./'). // TODO -type struct { - +var deserializer = antlr4.NewATNDeserializer() +var deserializedAtn = deserializer.Deserialize(serializedATN) + +var literalNames = []string{ }; null="nil", separator=", ", wrap, anchor> } +var symbolicNames = []string{ }; null="nil", separator=", ", wrap, anchor> } +var ruleNames = []string{ "}; separator=", ", wrap, anchor> } + +type struct { + - _interp *ParserATNSimulator ruleNames []string literalNames []string symbolicNames []string grammarFileName string } -func New(input) { +func New(input TokenStream) { - // TODO could be package level variable + var decisionToDFA = make([]antlr4.DFA,len(deserializedAtn.DecisionToState)) + var sharedContextCache = antlr4.NewPredictionContextCache() - var deserializer = NewATNDeserializer() - var deserializedAtn = deserializer.deserialize(serializedATN) - var decisionToDFA = make([]DFA,len(deserializedAtn.decisionToState)) - - for index, ds := range deserializedAtn.decisionToState { - decisionToDFA[index] = NewDFA(ds, index) + for index, ds := range deserializedAtn.DecisionToState { + decisionToDFA[index] = antlr4.NewDFA(ds, index) } - var sharedContextCache = NewPredictionContextCache() - - var literalNames = [...]string{ }; null="nil", separator=", ", wrap, anchor> } - var symbolicNames = [...]string{ }; null="nil", separator=", ", wrap, anchor> } - var ruleNames = [...]string{ "}; separator=", ", wrap, anchor> } - - // init the parser parser := new() - parser._interp = NewParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache) + parser.InitParser(input) + + parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedAtn, decisionToDFA, sharedContextCache) parser.ruleNames = ruleNames parser.literalNames = literalNames parser.symbolicNames = symbolicNames @@ -133,7 +122,7 @@ func New(input) { } const( - EOF = TokenEOF + EOF = antlr4.TokenEOF = }; separator="\n", wrap, anchor> @@ -242,7 +231,7 @@ func (p *) (}; sep // TODO not sure how exceptions are passed into clause - if v, ok = x.(error.RecognitionException); ok { + if v, ok = x.(RecognitionException); ok { localctx.exception = v p._errHandler.reportError(p, v) p._errHandler.recover(p, v) @@ -273,23 +262,27 @@ LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs, }; separator="\n"> func (p *) (_p, }>) { - // if(_p==undefined) { - // _p = 0 - //} - _parentctx := p._ctx - _parentState := p.state + + _parentctx := p.getParent() + _parentState := p.getState() localctx := New(p, p._ctx, _parentState}>) _prevctx := localctx _startState := p.enterRecursionRule(localctx, , RULE_, _p) + + defer func(){ + + p.unrollRecursionContexts(_parentctx) + } + try { } catch( error) { - if(error instanceof error.RecognitionException) { + if(error instanceof IRecognitionException) { localctx.exception = error p._errHandler.reportError(p, error) p._errHandler.recover(p, error) @@ -441,7 +434,7 @@ case +1: Sync(s) ::= "sync()" -ThrowNoViableAlt(t) ::= "panic(new error.NoViableAltException(p))" +ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))" TestSetInline(s) ::= << }; separator=" || "> @@ -519,7 +512,7 @@ ArgAction(a, chunks) ::= "" SemPred(p, chunks, failChunks) ::= << p.state = if !( ) { - panic( error.FailedPredicateException(p, , , )) + panic( FailedPredicateException(p, , , )) } >> @@ -564,13 +557,13 @@ TokenPropertyRef_int(t) ::= "(. == null ? 0 : parseInt( RulePropertyRef_start(r) ::= "(.==null ? null : ..start)" RulePropertyRef_stop(r) ::= "(.==null ? null : ..stop)" -RulePropertyRef_text(r) ::= "(.==null ? null : p._input.getText(new Interval(..start,..stop)))" +RulePropertyRef_text(r) ::= "(.==null ? null : p._input.getText(NewInterval(..start,..stop)))" RulePropertyRef_ctx(r) ::= "." RulePropertyRef_parser(r) ::= "this" ThisRulePropertyRef_start(r) ::= "localctx.start" ThisRulePropertyRef_stop(r) ::= "localctx.stop" -ThisRulePropertyRef_text(r) ::= "p._input.getText(new Interval(localctx.start, p._input.LT(-1)))" +ThisRulePropertyRef_text(r) ::= "p._input.getText(NewInterval(localctx.start, p._input.LT(-1)))" ThisRulePropertyRef_ctx(r) ::= "localctx" ThisRulePropertyRef_parser(r) ::= "p" @@ -653,20 +646,19 @@ StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMe superClass={ParserRuleContext}) ::= << type struct { - *ParserRuleContext + *antlr4.ParserRuleContext - parent *ParserRuleContext - parser *Parser - ruleIndex + parser antlr4.IParser } -func New(parser *Parser, parent *ParserRuleContext, invokingState int}>) { +func New(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int}>) { var p = new() + p.InitParserRuleContext( parent, invokingState ) p.parser = parser - p.ruleIndex = RULE_ + p.RuleIndex = RULE_ }; separator="\n"> = || null;}; separator="\n"> return p @@ -688,18 +680,16 @@ func (s *) copyFrom(ctx ) { AltLabelStructDecl(struct,attrs,getters,dispatchMethods) ::= << type struct { - parent *ParserRuleContext - parser *Parser - ruleIndex int + parent antlr4.IParserRuleContext + parser antlr4.IParser } -func New(parser *Parser, ctx *ParserRuleContext) { +func New(parser antlr4.IParser, ctx antlr4.IParserRuleContext) { var p = new() Context.call(this, parser) - ;}; separator="\n"> Context.prototype.copyFrom.call(this, ctx) @@ -713,25 +703,23 @@ func New(parser *Parser, ctx *ParserRuleContext) { ListenerDispatchMethod(method) ::= << -func (s *) enterexitRule(listener *ParseTreeListener) { - // TODO - switch t := listener.(type) { - case *Listener: - listener.enterexit(s) - } +func (s *) enterexitRule(listener antlr4.ParseTreeListener) { + + listener.(*Listener).enterexit(s) + } >> VisitorDispatchMethod(method) ::= << -func (s *) accept(visitor *ParseTreeVisitor) { +func (s *) accept(visitor antlr4.ParseTreeVisitor) interface{} { switch t := listener.(type) { case *Listener: - return visitor.visit(s) + return t.visit(s) default: - return visitor.visitChildren(s) + return t.visitChildren(s) } } @@ -754,13 +742,13 @@ recRuleSetReturnAction(src,name) ::= "$=$." recRuleSetStopToken() ::= "p._ctx.stop = p._input.LT(-1);" recRuleAltStartAction(ruleName, ctxName, label) ::= << -localctx = new Context(this, _parentctx, _parentState) +localctx = NewContext(this, _parentctx, _parentState) localctx.