Various refactorings to properly support package export

This commit is contained in:
Peter Boyer 2015-12-23 11:00:30 -06:00
parent ff70876ae8
commit f5cf1cbf68
43 changed files with 1538 additions and 574 deletions

View File

@ -0,0 +1,861 @@
/** ANTLR tool checks output templates are compatible with tool code generation.
* For now, a simple string Match used on x.y of x.y.z scheme.
* Must Match Tool.VERSION during load to templates.
*
* REQUIRED.
*/
fileHeader(grammarFileName, ANTLRVersion) ::= <<
// Generated from <grammarFileName; format="java-escape"> by ANTLR <ANTLRVersion>
>>
// args must be <object-model-object>, <fields-resulting-in-STs>
ParserFile(file, parser, namedActions) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import (
"antlr4"
"strings"
)
<namedActions.header>
<parser>
>>
ListenerFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import "antlr4"
// This class defines a complete listener for a parse tree produced by <file.parserName>.
type <file.grammarName>Listener struct {
}
<file.listenerNames:{lname |
// Enter a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) enter<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
// Exit a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
}; separator="\n">
>>
VisitorFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import "antlr4"
<header>
// This class defines a complete generic visitor for a parse tree produced by <file.parserName>.
type <file.grammarName>Visitor struct {
}
<file.visitorNames:{lname |
// Visit a parse tree produced by <file.parserName>#<lname>.
func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx IParserRuleContext) {
\}
}; separator="\n">
>>
Parser(parser, funcs, atn, sempredFuncs, superClass) ::= <<
<if(superClass)>
var <superClass> = require('./<superClass>').<superClass> // TODO
<endif>
var parserATN = <atn>
var deserializer = antlr4.NewATNDeserializer()
var deserializedATN = deserializer.Deserialize( []rune( parserATN ) )
var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <parser.name> struct {
<superClass; null="*antlr4.Parser">
ruleNames []string
literalNames []string
symbolicNames []string
grammarFileName string
}
func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
var decisionToDFA = make([]*antlr4.DFA,len(deserializedATN.DecisionToState))
var sharedContextCache = antlr4.NewPredictionContextCache()
for index, ds := range deserializedATN.DecisionToState {
decisionToDFA[index] = antlr4.NewDFA(ds, index)
}
parser := new(<parser.name>)
parser.InitParser(input)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedATN, decisionToDFA, sharedContextCache)
parser.ruleNames = ruleNames
parser.literalNames = literalNames
parser.symbolicNames = symbolicNames
<namedActions.members>
parser.grammarFileName = "<parser.grammarFileName; format="java-escape">"
return parser
}
const(
<parser.name>EOF = antlr4.TokenEOF
<if(parser.tokens)>
<parser.tokens:{k | <parser.name><k> = <parser.tokens.(k)>}; separator="\n", wrap, anchor>
<endif>
)
const (
<parser.rules:{r | <parser.name>RULE_<r.name> = <r.index>}; separator="\n", wrap, anchor>
)
<funcs; separator="\n">
<if(sempredFuncs)>
func (p *<parser.name>) Sempred(localctx, ruleIndex int, predIndex int) {
switch ruleIndex {
<parser.sempredFuncs.values:{f | case <f.ruleIndex>:
return p.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No predicate with index:" + ruleIndex)
}
}
<sempredFuncs.values; separator="\n">
<endif>
>>
dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= <<
<if(actionFuncs)>
func (l *<lexer.name>) Action(localctx, ruleIndex int, actionIndex int) {
switch ruleIndex) {
<recog.actionFuncs.values:{f|
case <f.ruleIndex>:
p.<f.name>_Action(localctx, actionIndex)
}; separator="\n">
default:
panic("No registered action for:" + ruleIndex)
}
}
<actionFuncs.values; separator="\n">
<endif>
<if(sempredFuncs)>
func (l *<lexer.name>) Sempred(localctx, ruleIndex, predIndex) {
switch ruleIndex) {
<recog.sempredFuncs.values:{f| case <f.ruleIndex>:
return l.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No registered predicate for:" + ruleIndex)
}
}
<sempredFuncs.values; separator="\n">
<endif>
>>
/* This generates a private method since the actionIndex is generated, making an
* overriding implementation impossible to maintain.
*/
RuleActionFunction(r, actions) ::= <<
func (l *<lexer.name>) <r.name>_Action(localctx , actionIndex) {
switch actionIndex) {
<actions:{index|
case <index>:
<actions.(index)>
}; separator="\n">
default:
panic("No registered action for:" + actionIndex)
}
}
>>
/* This generates a private method since the predIndex is generated, making an
* overriding implementation impossible to maintain.
*/
RuleSempredFunction(r, actions) ::= <<
func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_Sempred(localctx, predIndex int) {
switch predIndex {
<actions:{index| case <index>:
return <actions.(index)>;}; separator="\n">
default:
panic("No predicate with index:" + predIndex)
}
}
>>
RuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,namedActions,finallyAction,postamble,exceptions) ::= <<
<ruleCtx>
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; separator=", ">) {
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), p.GetState()<currentRule.args:{a | , <a.name>}>)
p.EnterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>)
<namedActions.init>
<locals; separator="\n">
defer func() {
if err := recover(); err != nil {
<if(exceptions)>
<exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause
<else>
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException( v )
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(re)
}
<endif>
// TODO if the above panic call is invoked then the below finally clause may not be called
<finallyAction>
p.ExitRule()
}
}
<code>
<postamble; separator="\n">
<namedActions.after>
return localctx
}
>>
LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,
namedActions,finallyAction,postamble) ::=
<<
<ruleCtx>
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) {
_parentctx := p.GetParent().(IParserRuleContext)
_parentState := p.GetState()
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), _parentState<args:{a | , <a.name>}>)
_prevctx := localctx
_startState := <currentRule.startState>
p.EnterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
<namedActions.init>
<locals; separator="\n">
defer func(){
<finallyAction>
p.UnrollRecursionContexts(_parentctx)
}
try {
<code>
<postamble; separator="\n">
<namedActions.after>
} catch( error) {
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(error)
}
} finally {
<finallyAction>
p.UnrollRecursionContexts(_parentctx)
}
return localctx
}
>>
CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= <<
<if(currentOuterMostAltCodeBlock.altLabel)>localctx = New<currentOuterMostAltCodeBlock.altLabel; format="cap">Context(p, localctx)<endif>
p.EnterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>)
<CodeBlockForAlt(currentAltCodeBlock=currentOuterMostAltCodeBlock, ...)>
>>
CodeBlockForAlt(currentAltCodeBlock, locals, preamble, ops) ::= <<
<locals; separator="\n">
<preamble; separator="\n">
<ops; separator="\n">
>>
LL1AltBlock(choice, preamble, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
<if(choice.label)><labelref(choice.label)> = p.GetTokenStream().LT(1)<endif>
<preamble; separator="\n">
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
default:
<error>
}
>>
LL1OptionalBlock(choice, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
default:
<error>
}
>>
LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= <<
p.SetState(<choice.stateNumber>)
<preamble; separator="\n">
if <expr> {
<alts; separator="\n">
}
<!else if ( !(<followExpr>) ) <error>!>
>>
LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for <loopExpr> {
<alts; separator="\n">
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for ok := true; ok; ok = <loopExpr> {
<alts; separator="\n">
p.SetState(<choice.stateNumber>); <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
// LL(*) stuff
AltBlock(choice, preamble, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
<preamble; separator="\n">
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
switch la_) {
<alts:{alt |
case <i>:
<alt>
}; separator="\n">
}
>>
OptionalBlock(choice, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
<alts:{alt |
if la_==<i><if(!choice.ast.greedy)>+1<endif> {
<alt>
}; separator="\n} else ">
}
>>
StarBlock(choice, alts, Sync, iteration) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
_alt := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
for _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
if(_alt==1<if(!choice.ast.greedy)>+1<endif>) {
<iteration>
<alts> <! should only be one !>
}
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
}
>>
PlusBlock(choice, alts, error) ::= <<
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
_alt := 1<if(!choice.ast.greedy)>+1<endif>
for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
switch _alt) {
<alts:{alt|
case <i><if(!choice.ast.greedy)>+1<endif>:
<alt>
//}; separator="\n">
default:
<error>
}
p.SetState(<choice.loopBackStateNumber>) <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>, p.GetParserRuleContext())
}
>>
Sync(s) ::= "Sync(<s.expecting.name>)"
ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))"
TestSetInline(s) ::= <<
<s.bitsets:{bits | <if(rest(rest(bits.ttypes)))><bitsetBitfieldComparison(s, bits)><else><bitsetInlineComparison(s, bits)><endif>}; separator=" || ">
>>
// Javascript language spec - shift operators are 32 bits long max
testShiftInRange(shiftAmount) ::= <<
((<shiftAmount>) & ~0x1f) == 0
>>
// produces smaller bytecode only when bits.ttypes contains more than two items
bitsetBitfieldComparison(s, bits) ::= <%
(<testShiftInRange({<offsetShiftVar(s.varName, bits.shift)>})> && ((1 \<\< <offsetShiftVar(s.varName, bits.shift)>) & (<bits.ttypes:{ttype | (1 \<\< <offsetShiftType(ttype, bits.shift)>)}; separator=" | ">)) !== 0)
%>
isZero ::= [
"0":true,
default:false
]
offsetShiftVar(shiftAmount, offset) ::= <%
<if(!isZero.(offset))>(<shiftAmount> - <offset>)<else><shiftAmount><endif>
%>
offsetShiftType(shiftAmount, offset) ::= <%
<if(!isZero.(offset))>(<parser.name>.<shiftAmount> - <offset>)<else><parser.name>.<shiftAmount><endif>
%>
// produces more efficient bytecode when bits.ttypes contains at most two items
bitsetInlineComparison(s, bits) ::= <%
<bits.ttypes:{ttype | <s.varName>==<parser.name><ttype>}; separator=" || ">
%>
cases(ttypes) ::= <<
<ttypes:{t | case <parser.name><t>:}; separator="\n">
>>
InvokeRule(r, argExprsChunks) ::= <<
p.SetState(<r.stateNumber>)
<if(r.labels)><r.labels:{l | <labelref(l)> = }><endif>p.<r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>)
>>
MatchToken(m) ::= <<
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.Match(<parser.name><m.name>)
>>
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>"
CommonSetStuff(m, expr, capture, invert) ::= <<
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }>p.GetTokenStream().LT(1);<endif>
<capture>
<if(invert)>if <m.varName>\<=0 || <expr> <else>if !(<expr>)<endif> {
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.GetErrorHandler().RecoverInline(p)
}
else {
p.Consume()
}
>>
Wildcard(w) ::= <<
p.SetState(<w.stateNumber>)
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>MatchWildcard()
>>
// ACTION STUFF
Action(a, foo, chunks) ::= "<chunks>"
ArgAction(a, chunks) ::= "<chunks>"
SemPred(p, chunks, failChunks) ::= <<
p.SetState(<p.stateNumber>)
if !( <chunks>) {
panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
}
>>
ExceptionClause(e, catchArg, catchAction) ::= <<
catch (<catchArg>) {
<catchAction>
}
>>
// lexer actions are not associated with model objects
LexerSkipCommand() ::= "p.skip()"
LexerMoreCommand() ::= "p.more()"
LexerPopModeCommand() ::= "p.popMode()"
LexerTypeCommand(arg) ::= "p._type = <arg>"
LexerChannelCommand(arg) ::= "p._channel = <arg>"
LexerModeCommand(arg) ::= "p._mode = <arg>"
LexerPushModeCommand(arg) ::= "p.pushMode(<arg>)"
ActionText(t) ::= "<t.text>"
ActionTemplate(t) ::= "<t.st>"
ArgRef(a) ::= "localctx.<a.name>"
LocalRef(a) ::= "localctx.<a.name>"
RetValueRef(a) ::= "localctx.<a.name>"
QRetValueRef(a) ::= "<ctx(a)>.<a.dict>.<a.name>"
/** How to translate $tokenLabel */
TokenRef(t) ::= "<ctx(t)>.<t.name>"
LabelRef(t) ::= "<ctx(t)>.<t.name>"
ListLabelRef(t) ::= "<ctx(t)>.<ListLabelName(t.name)>"
SetAttr(s,rhsChunks) ::= "<ctx(s)>.<s.name> = <rhsChunks>"
TokenLabelType() ::= "<file.TokenLabelType; null={Token}>"
InputSymbolType() ::= "<file.InputSymbolType; null={Token}>"
TokenPropertyRef_text(t) ::= "(<ctx(t)>.<t.label>==null ? null : <ctx(t)>.<t.label>.text)"
TokenPropertyRef_type(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.type)"
TokenPropertyRef_line(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.line)"
TokenPropertyRef_pos(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.column)"
TokenPropertyRef_channel(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.channel)"
TokenPropertyRef_index(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.tokenIndex)"
TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : parseInt(<ctx(t)>.<t.label>.text))"
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)"
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p.GetTokenStream().GetTextFromInterval(NewInterval(<ctx(r)>.<r.label>.GetStart(),<ctx(r)>.<r.label>.GetStop())))"
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
RulePropertyRef_parser(r) ::= "this"
ThisRulePropertyRef_start(r) ::= "localctx.start"
ThisRulePropertyRef_stop(r) ::= "localctx.stop"
ThisRulePropertyRef_text(r) ::= "p.GetTokenStream().GetTextFromInterval(NewInterval(localctx.GetStart(), p.GetTokenStream().LT(-1)))"
ThisRulePropertyRef_ctx(r) ::= "localctx"
ThisRulePropertyRef_parser(r) ::= "p"
NonLocalAttrRef(s) ::= "getInvokingContext(<s.ruleIndex>).<s.name>"
SetNonLocalAttr(s, rhsChunks) ::= "getInvokingContext(<s.ruleIndex>).<s.name> = <rhsChunks>"
AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName> = append(<ctx(a.label)>.<a.listName>, push(<labelref(a.label)>)"
TokenDecl(t) ::= "p.<t.name> = nil // <TokenLabelType()>"
TokenTypeDecl(t) ::= "<t.name> := 0 // <TokenLabelType()> type"
TokenListDecl(t) ::= "p.<t.name> = [] // of <TokenLabelType()>s"
RuleContextDecl(r) ::= "p.<r.name> = nil // <r.ctxName>"
RuleContextListDecl(rdecl) ::= "p.<rdecl.name> = [] // of <rdecl.ctxName>s"
ContextTokenGetterDecl(t) ::= <<
<t.name>() {
return s.GetToken(<parser.name><t.name>, 0)
}
>>
// should never be called
ContextTokenListGetterDecl(t) ::= <<
def <t.name>_list(self):
return self.GetTokens(<parser.name><t.name>)
>>
ContextTokenListIndexedGetterDecl(t) ::= <<
<t.name>(i int) {
if 0 > i {
return s.GetTokens(<parser.name><t.name>)
} else {
return s.GetToken(<parser.name><t.name>, i)
}
}
>>
ContextRuleGetterDecl(r) ::= <<
<r.name>() {
return s.GetTypedRuleContext(<r.ctxName>,0)
}
>>
// should never be called
ContextRuleListGetterDecl(r) ::= <<
func <r.name>_list(self):
return s.GetTypedRuleContexts(<r.ctxName>)
>>
ContextRuleListIndexedGetterDecl(r) ::= <<
<r.name>(i int) {
if 0 > i {
return s.GetTypedRuleContexts(<r.ctxName>)
} else {
return s.GetTypedRuleContext(<r.ctxName>,i)
}
}
>>
LexerRuleContext() ::= "RuleContext"
/** The rule context name is the rule followed by a suffix; e.g.,
* r becomes rContext.
*/
RuleContextNameSuffix() ::= "Context"
ImplicitTokenLabel(tokenName) ::= "_<tokenName>"
ImplicitRuleLabel(ruleName) ::= "_<ruleName>"
ImplicitSetLabel(id) ::= "_tset<id>"
ListLabelName(label) ::= "<label>"
CaptureNextToken(d) ::= "<d.varName> = p.GetTokenStream().LT(1)"
CaptureNextTokenType(d) ::= "<d.varName> = p.GetTokenStream().LA(1);"
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers,
superClass={ParserRuleContext}) ::= <<
type <struct.name> struct {
*antlr4.ParserRuleContext
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) *<struct.name> {
var p = new(<struct.name>)
p.InitParserRuleContext( parent, invokingState )
p.parser = parser
p.RuleIndex = <parser.name>RULE_<struct.derivedFromName>
<attrs:{a | <a>}; separator="\n">
<struct.ctorAttrs:{a | p.<a.name> = <a.name> || null;}; separator="\n">
return p
}
<getters:{g | func (s *<struct.name>) <g>}; separator="\n\n">
<if(struct.provideCopyFrom)> <! don't need copy unless we have subclasses !>
func (s *<struct.name>) copyFrom(ctx <struct.name>) {
<superClass>.prototype.copyFrom.call(s, ctx)
<struct.attrs:{a | s.<a.name> = ctx.<a.name>;}; separator="\n">
}
<endif>
<dispatchMethods; separator="\n">
<extensionMembers; separator="\n">
>>
AltLabelStructDecl(struct,attrs,getters,dispatchMethods) ::= <<
type <struct.name> struct {
parent antlr4.IParserRuleContext
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) *<struct.name> {
var p = new(<struct.name>)
<currentRule.name; format="cap">Context.call(this, parser)
<attrs:{a | <a>;}; separator="\n">
<currentRule.name; format="cap">Context.prototype.copyFrom.call(this, ctx)
return p
}
<getters:{g | func (s *<struct.name>) <g>}; separator="\n\n">
<dispatchMethods; separator="\n">
>>
ListenerDispatchMethod(method) ::= <<
func (s *<struct.name>) <if(method.isEnter)>enter<else>exit<endif>Rule(listener antlr4.ParseTreeListener) {
listener.(*<parser.grammarName>Listener).<if(method.isEnter)>enter<else>exit<endif><struct.derivedFromName; format="cap">(s)
}
>>
VisitorDispatchMethod(method) ::= <<
func (s *<struct.name>) accept(visitor antlr4.ParseTreeVisitor) interface{} {
switch t := listener.(type) {
case *<parser.grammarName>Listener:
return t.visit<struct.derivedFromName; format="cap">(s)
default:
return t.visitChildren(s)
}
}
>>
AttributeDecl(d) ::= "p.<d.name> = <if(d.InitValue)><d.InitValue><else>null<endif>"
/** If we don't know location of label def x, use this template */
labelref(x) ::= "<if(!x.isLocal)>localctx.<endif><x.name>"
/** For any action chunk, what is correctly-typed context struct ptr? */
ctx(actionChunk) ::= "localctx"
// used for left-recursive rules
recRuleAltPredicate(ruleName,opPrec) ::= "p.Precpred(p.GetParserRuleContext(), <opPrec>)"
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>"
recRuleSetStopToken() ::= "p.GetParserRuleContext().stop = p.GetTokenStream().LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
localctx = New<ctxName>Context(this, _parentctx, _parentState)
<if(label)>localctx.<label> = _prevctx;<endif>
p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label, isListLabel) ::= <<
localctx = New<currentAltLabel; format="cap">Context(this, New<ruleName; format="cap">Context(this, _parentctx, _parentState))
<if(label)>
<if(isListLabel)>
localctx.<label>.push(_prevctx)
<else>
localctx.<label> = _prevctx
<endif>
<endif>
p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleReplaceContext(ctxName) ::= <<
localctx = New<ctxName>Context(this, localctx)
p.GetParserRuleContext() = localctx
_prevctx = localctx
>>
recRuleSetPrevCtx() ::= <<
if(p._parseListeners!=nil) {
p.triggerExitRuleEvent()
}
_prevctx = localctx
>>
LexerFile(lexerFile, lexer, namedActions) ::= <<
<fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)>
package parser
import (
"antlr4"
"strings"
)
<namedActions.header>
<lexer>
>>
Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= <<
var serializedLexerAtn = <atn>
var lexerDeserializer = antlr4.NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.Deserialize( []rune( serializedLexerAtn ) )
var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerSymbolicNames = []string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerRuleNames = []string{ <lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <lexer.name> struct {
<if(superClass)><superClass><else>*antlr4.Lexer<endif>
modeNames []string
literalNames []string
symbolicNames []string
ruleNames []string
grammarFileName string
EOF string
}
func New<lexer.name>(input antlr4.CharStream) *<lexer.name> {
var lexerDecisionToDFA = make([]*antlr4.DFA,len(lexerAtn.DecisionToState))
for index, ds := range lexerAtn.DecisionToState {
lexerDecisionToDFA[index] = antlr4.NewDFA(ds, index)
}
lex := new(<lexer.name>)
lex.InitLexer(input)
lex.Interpreter = antlr4.NewLexerATNSimulator(lex, lexerAtn, lexerDecisionToDFA, antlr4.NewPredictionContextCache())
lex.modeNames = lexerModeNames
lex.ruleNames = lexerRuleNames
lex.literalNames = lexerLiteralNames
lex.symbolicNames = lexerSymbolicNames
lex.grammarFileName = "<lexer.grammarFileName>"
lex.EOF = antlr4.TokenEOF
return lex
}
const (
<lexer.tokens:{k | <lexer.name><k> = <lexer.tokens.(k)>}; separator="\n", wrap, anchor>
)
const (
<rest(lexer.modes):{m| <lexer.name><m> = <i>}; separator="\n">
)
<namedActions.members>
<dumpActions(lexer, "", actionFuncs, sempredFuncs)>
>>
SerializedATN(model) ::= <<
<! only one segment, can be inlined !>
strings.Join( []string{ "<model.serialized; wrap={",<\n> "}>" }, "" )
>>
/** Using a type to init value map, try to init a type; if not in table
* must be an object, default value is "nil".
*/
InitValue(typeName) ::= <<
<javaTypeInitMap.(typeName)>
>>
codeFileExtension() ::= ".go"

View File

@ -2,7 +2,6 @@ package antlr4
type ATN struct { type ATN struct {
DecisionToState []*DecisionState DecisionToState []*DecisionState
grammarType int grammarType int
maxTokenType int maxTokenType int
states []IATNState states []IATNState
@ -80,13 +79,13 @@ func (this *ATN) nextTokens(s IATNState, ctx IRuleContext) *IntervalSet {
func (this *ATN) addState(state IATNState) { func (this *ATN) addState(state IATNState) {
if state != nil { if state != nil {
state.setATN(this) state.setATN(this)
state.setStateNumber(len(this.states)) state.SetStateNumber(len(this.states))
} }
this.states = append(this.states, state) this.states = append(this.states, state)
} }
func (this *ATN) removeState(state IATNState) { func (this *ATN) removeState(state IATNState) {
this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list this.states[state.GetStateNumber()] = nil // just free mem, don't shift states in list
} }
func (this *ATN) defineDecisionState(s *DecisionState) int { func (this *ATN) defineDecisionState(s *DecisionState) int {
@ -108,7 +107,7 @@ func (this *ATN) getDecisionState(decision int) *DecisionState {
// considers the complete parser context, but does not evaluate semantic // considers the complete parser context, but does not evaluate semantic
// predicates (i.e. all predicates encountered during the calculation are // predicates (i.e. all predicates encountered during the calculation are
// assumed true). If a path in the ATN exists from the starting state to the // assumed true). If a path in the ATN exists from the starting state to the
// {@link RuleStopState} of the outermost context without matching any // {@link RuleStopState} of the outermost context without Matching any
// symbols, {@link Token//EOF} is added to the returned set. // symbols, {@link Token//EOF} is added to the returned set.
// //
// <p>If {@code context} is {@code nil}, it is treated as // <p>If {@code context} is {@code nil}, it is treated as
@ -141,7 +140,7 @@ func (this *ATN) getExpectedTokens(stateNumber int, ctx IRuleContext) *IntervalS
following = this.nextTokens(rt.(*RuleTransition).followState, nil) following = this.nextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following) expected.addSet(following)
expected.removeOne(TokenEpsilon) expected.removeOne(TokenEpsilon)
ctx = ctx.getParent().(IRuleContext) ctx = ctx.GetParent().(IRuleContext)
} }
if following.contains(TokenEpsilon) { if following.contains(TokenEpsilon) {
expected.addOne(TokenEOF) expected.addOne(TokenEOF)

View File

@ -18,7 +18,7 @@ type IATNConfig interface {
getPrecedenceFilterSuppressed() bool getPrecedenceFilterSuppressed() bool
setPrecedenceFilterSuppressed(bool) setPrecedenceFilterSuppressed(bool)
getState() IATNState GetState() IATNState
getAlt() int getAlt() int
getSemanticContext() SemanticContext getSemanticContext() SemanticContext
@ -70,7 +70,7 @@ func NewATNConfig3(c IATNConfig, state IATNState, semanticContext SemanticContex
} }
func NewATNConfig2(c IATNConfig, semanticContext SemanticContext) *ATNConfig { func NewATNConfig2(c IATNConfig, semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, c.getState(), c.getContext(), semanticContext) return NewATNConfig(c, c.GetState(), c.getContext(), semanticContext)
} }
func NewATNConfig1(c IATNConfig, state IATNState, context IPredictionContext) *ATNConfig { func NewATNConfig1(c IATNConfig, state IATNState, context IPredictionContext) *ATNConfig {
@ -92,7 +92,7 @@ func (this *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
this.precedenceFilterSuppressed = v this.precedenceFilterSuppressed = v
} }
func (this *ATNConfig) getState() IATNState { func (this *ATNConfig) GetState() IATNState {
return this.state return this.state
} }
@ -153,7 +153,7 @@ func (this *ATNConfig) equals(other interface{}) bool {
} }
func (this *ATNConfig) shortHashString() string { func (this *ATNConfig) shortHashString() string {
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString() return "" + strconv.Itoa(this.state.GetStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString()
} }
func (this *ATNConfig) hashString() string { func (this *ATNConfig) hashString() string {
@ -165,7 +165,7 @@ func (this *ATNConfig) hashString() string {
c = this.context.hashString() c = this.context.hashString()
} }
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString() return "" + strconv.Itoa(this.state.GetStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString()
} }
func (this *ATNConfig) toString() string { func (this *ATNConfig) toString() string {
@ -268,7 +268,7 @@ func (this *LexerATNConfig) hashString() string {
f = "0" f = "0"
} }
return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) + return "" + strconv.Itoa(this.state.GetStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) +
fmt.Sprint(this.semanticContext) + f + fmt.Sprint(this.lexerActionExecutor) fmt.Sprint(this.semanticContext) + f + fmt.Sprint(this.lexerActionExecutor)
} }

View File

@ -21,7 +21,7 @@ func equalATNConfigs(a, b interface{}) bool {
if a == nil || b == nil { if a == nil || b == nil {
return false return false
} }
return a.(*ATNConfig).state.getStateNumber() == b.(*ATNConfig).state.getStateNumber() && return a.(*ATNConfig).state.GetStateNumber() == b.(*ATNConfig).state.GetStateNumber() &&
a.(*ATNConfig).alt == b.(*ATNConfig).alt && a.(*ATNConfig).alt == b.(*ATNConfig).alt &&
a.(*ATNConfig).semanticContext.equals(b.(*ATNConfig).semanticContext) a.(*ATNConfig).semanticContext.equals(b.(*ATNConfig).semanticContext)
} }
@ -129,10 +129,10 @@ func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool {
return true return true
} }
func (this *ATNConfigSet) getStates() *Set { func (this *ATNConfigSet) GetStates() *Set {
var states = NewSet(nil, nil) var states = NewSet(nil, nil)
for i := 0; i < len(this.configs); i++ { for i := 0; i < len(this.configs); i++ {
states.add(this.configs[i].getState()) states.add(this.configs[i].GetState())
} }
return states return states
} }

View File

@ -443,10 +443,10 @@ func (this *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
atn.ruleToStartState[idx].addTransition(NewEpsilonTransition(bypassStart, -1), -1) atn.ruleToStartState[idx].addTransition(NewEpsilonTransition(bypassStart, -1), -1)
bypassStop.addTransition(NewEpsilonTransition(endState, -1), -1) bypassStop.addTransition(NewEpsilonTransition(endState, -1), -1)
var matchState = NewBasicState() var MatchState = NewBasicState()
atn.addState(matchState) atn.addState(MatchState)
matchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) MatchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
bypassStart.addTransition(NewEpsilonTransition(matchState, -1), -1) bypassStart.addTransition(NewEpsilonTransition(MatchState, -1), -1)
} }
func (this *ATNDeserializer) stateIsEndStateFor(state IATNState, idx int) IATNState { func (this *ATNDeserializer) stateIsEndStateFor(state IATNState, idx int) IATNState {

View File

@ -23,7 +23,7 @@ func NewATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *ATNS
// by literally rebuilding them with cached subgraphs only.</p> // by literally rebuilding them with cached subgraphs only.</p>
// //
// <p>I tried a cache for use during closure operations, that was // <p>I tried a cache for use during closure operations, that was
// whacked after each adaptivePredict(). It cost a little bit // whacked after each AdaptivePredict(). It cost a little bit
// more time I think and doesn't save on the overall footprint // more time I think and doesn't save on the overall footprint
// so it's not worth the complexity.</p> // so it's not worth the complexity.</p>

View File

@ -50,10 +50,10 @@ type IATNState interface {
getATN() *ATN getATN() *ATN
setATN(*ATN) setATN(*ATN)
getStateType() int GetStateType() int
getStateNumber() int GetStateNumber() int
setStateNumber(int) SetStateNumber(int)
getTransitions() []ITransition getTransitions() []ITransition
setTransitions([]ITransition) setTransitions([]ITransition)
@ -125,15 +125,15 @@ func (as *ATNState) setTransitions(t []ITransition) {
as.transitions = t as.transitions = t
} }
func (as *ATNState) getStateType() int { func (as *ATNState) GetStateType() int {
return as.stateType return as.stateType
} }
func (as *ATNState) getStateNumber() int { func (as *ATNState) GetStateNumber() int {
return as.stateNumber return as.stateNumber
} }
func (as *ATNState) setStateNumber(stateNumber int) { func (as *ATNState) SetStateNumber(stateNumber int) {
as.stateNumber = stateNumber as.stateNumber = stateNumber
} }
@ -151,7 +151,7 @@ func (this *ATNState) toString() string {
func (this *ATNState) equals(other interface{}) bool { func (this *ATNState) equals(other interface{}) bool {
if ot, ok := other.(IATNState); ok { if ot, ok := other.(IATNState); ok {
return this.stateNumber == ot.getStateNumber() return this.stateNumber == ot.GetStateNumber()
} else { } else {
return false return false
} }

View File

@ -92,7 +92,7 @@ func (bt *BufferedTokenStream) get(index int) *Token {
return bt.tokens[index] return bt.tokens[index]
} }
func (bt *BufferedTokenStream) consume() { func (bt *BufferedTokenStream) Consume() {
var skipEofCheck = false var skipEofCheck = false
if bt.index >= 0 { if bt.index >= 0 {
if bt.fetchedEOF { if bt.fetchedEOF {
@ -110,7 +110,7 @@ func (bt *BufferedTokenStream) consume() {
if !skipEofCheck && bt.LA(1) == TokenEOF { if !skipEofCheck && bt.LA(1) == TokenEOF {
panic("cannot consume EOF") panic("cannot consume EOF")
} }
if bt.sync(bt.index + 1) { if bt.Sync(bt.index + 1) {
bt.index = bt.adjustSeekIndex(bt.index + 1) bt.index = bt.adjustSeekIndex(bt.index + 1)
} }
} }
@ -121,7 +121,7 @@ func (bt *BufferedTokenStream) consume() {
// {@code false}. // {@code false}.
// @see //get(int i) // @see //get(int i)
// / // /
func (bt *BufferedTokenStream) sync(i int) bool { func (bt *BufferedTokenStream) Sync(i int) bool {
var n = i - len(bt.tokens) + 1 // how many more elements we need? var n = i - len(bt.tokens) + 1 // how many more elements we need?
if n > 0 { if n > 0 {
var fetched = bt.fetch(n) var fetched = bt.fetch(n)
@ -152,7 +152,7 @@ func (bt *BufferedTokenStream) fetch(n int) int {
} }
// Get all tokens from start..stop inclusively/// // Get all tokens from start..stop inclusively///
func (bt *BufferedTokenStream) getTokens(start int, stop int, types *IntervalSet) []*Token { func (bt *BufferedTokenStream) GetTokens(start int, stop int, types *IntervalSet) []*Token {
if start < 0 || stop < 0 { if start < 0 || stop < 0 {
return nil return nil
@ -194,7 +194,7 @@ func (bt *BufferedTokenStream) LT(k int) *Token {
return bt.LB(-k) return bt.LB(-k)
} }
var i = bt.index + k - 1 var i = bt.index + k - 1
bt.sync(i) bt.Sync(i)
if i >= len(bt.tokens) { // return EOF token if i >= len(bt.tokens) { // return EOF token
// EOF must be last token // EOF must be last token
return bt.tokens[len(bt.tokens)-1] return bt.tokens[len(bt.tokens)-1]
@ -226,11 +226,11 @@ func (bt *BufferedTokenStream) lazyInit() {
} }
func (bt *BufferedTokenStream) setup() { func (bt *BufferedTokenStream) setup() {
bt.sync(0) bt.Sync(0)
bt.index = bt.adjustSeekIndex(0) bt.index = bt.adjustSeekIndex(0)
} }
func (bt *BufferedTokenStream) getTokenSource() TokenSource { func (bt *BufferedTokenStream) GetTokenSource() TokenSource {
return bt.tokenSource return bt.tokenSource
} }
@ -246,7 +246,7 @@ func (bt *BufferedTokenStream) setTokenSource(tokenSource TokenSource) {
// on channel between i and EOF. // on channel between i and EOF.
// / // /
func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int { func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
bt.sync(i) bt.Sync(i)
if i >= len(bt.tokens) { if i >= len(bt.tokens) {
return -1 return -1
} }
@ -256,7 +256,7 @@ func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
return -1 return -1
} }
i += 1 i += 1
bt.sync(i) bt.Sync(i)
token = bt.tokens[i] token = bt.tokens[i]
} }
return i return i
@ -333,7 +333,7 @@ func (bt *BufferedTokenStream) getSourceName() string {
} }
// Get the text of all tokens in bt buffer./// // Get the text of all tokens in bt buffer.///
func (bt *BufferedTokenStream) getText(interval *Interval) string { func (bt *BufferedTokenStream) GetText(interval *Interval) string {
bt.lazyInit() bt.lazyInit()
bt.fill() bt.fill()
if interval == nil { if interval == nil {

View File

@ -3,5 +3,5 @@ package antlr4
type CharStream interface { type CharStream interface {
IntStream IntStream
getTextFromInterval(*Interval) string GetTextFromInterval(*Interval) string
} }

View File

@ -21,10 +21,10 @@ func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
// constructing tokens to explicitly set the text. This is useful for cases // constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings // where the input stream might not be able to provide arbitrary substrings
// of text from the input after the lexer creates a token (e.g. the // of text from the input after the lexer creates a token (e.g. the
// implementation of {@link CharStream//getText} in // implementation of {@link CharStream//GetText} in
// {@link UnbufferedCharStream} panics an // {@link UnbufferedCharStream} panics an
// {@link UnsupportedOperationException}). Explicitly setting the token text // {@link UnsupportedOperationException}). Explicitly setting the token text
// allows {@link Token//getText} to be called at any time regardless of the // allows {@link Token//GetText} to be called at any time regardless of the
// input stream implementation. // input stream implementation.
// //
// <p> // <p>
@ -52,7 +52,7 @@ func (this *CommonTokenFactory) create(source *TokenSourceCharStreamPair, ttype
if text != "" { if text != "" {
t.setText(text) t.setText(text)
} else if this.copyText && source.charStream != nil { } else if this.copyText && source.charStream != nil {
t.setText(source.charStream.getTextFromInterval(NewInterval(start, stop))) t.setText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
} }
return t.Token return t.Token
} }

View File

@ -5,7 +5,7 @@
// //
// <p> // <p>
// This token stream provides access to all tokens by index or when calling // This token stream provides access to all tokens by index or when calling
// methods like {@link //getText}. The channel filtering is only used for code // methods like {@link //GetText}. The channel filtering is only used for code
// accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and // accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
// {@link //LB}.</p> // {@link //LB}.</p>
// //
@ -18,7 +18,7 @@
// //
// <p> // <p>
// Note: lexer rules which use the {@code ->skip} lexer command or call // Note: lexer rules which use the {@code ->skip} lexer command or call
// {@link Lexer//skip} do not produce tokens at all, so input text matched by // {@link Lexer//skip} do not produce tokens at all, so input text Matched by
// such a rule will not be available as part of the token stream, regardless of // such a rule will not be available as part of the token stream, regardless of
// channel.</p> // channel.</p>
/// ///
@ -74,7 +74,7 @@ func (ts *CommonTokenStream) LT(k int) *Token {
// find k good tokens // find k good tokens
for n < k { for n < k {
// skip off-channel tokens, but make sure to not look past EOF // skip off-channel tokens, but make sure to not look past EOF
if ts.sync(i + 1) { if ts.Sync(i + 1) {
i = ts.nextTokenOnChannel(i+1, ts.channel) i = ts.nextTokenOnChannel(i+1, ts.channel)
} }
n += 1 n += 1

View File

@ -64,7 +64,7 @@ func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
return return
} }
// synchronization on s0 here is ok. when the DFA is turned into a // Synchronization on s0 here is ok. when the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again // precedence DFA, s0 will be initialized once and not updated again
// s0.edges is never nil for a precedence DFA // s0.edges is never nil for a precedence DFA
this.s0.edges[precedence] = startState this.s0.edges[precedence] = startState
@ -103,7 +103,7 @@ func (this *DFA) setPrecedenceDfa(precedenceDfa bool) {
} }
} }
func (this *DFA) getStates() map[string]*DFAState { func (this *DFA) GetStates() map[string]*DFAState {
return this._states return this._states
} }

View File

@ -50,11 +50,11 @@ func (this *DFASerializer) toString() string {
for j := 0; j < n; j++ { for j := 0; j < n; j++ {
var t = s.edges[j] var t = s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF { if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += this.getStateString(s) buf += this.GetStateString(s)
buf += "-" buf += "-"
buf += this.getEdgeLabel(j) buf += this.getEdgeLabel(j)
buf += "->" buf += "->"
buf += this.getStateString(t) buf += this.GetStateString(t)
buf += "\n" buf += "\n"
} }
} }
@ -81,7 +81,7 @@ func (this *DFASerializer) getEdgeLabel(i int) string {
} }
} }
func (this *DFASerializer) getStateString(s *DFAState) string { func (this *DFASerializer) GetStateString(s *DFAState) string {
var a, b string var a, b string

View File

@ -75,7 +75,7 @@ func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
// {@link Token//EOF} maps to {@code edges[0]}. // {@link Token//EOF} maps to {@code edges[0]}.
this.edges = nil this.edges = nil
this.isAcceptState = false this.isAcceptState = false
// if accept state, what ttype do we match or alt do we predict? // if accept state, what ttype do we Match or alt do we predict?
// This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link // This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link
// //predicates}{@code !=nil} or // //predicates}{@code !=nil} or
// {@link //requiresFullContext}. // {@link //requiresFullContext}.

View File

@ -12,7 +12,7 @@ import (
// //
// <ul> // <ul>
// <li><b>Ambiguities</b>: These are cases where more than one path through the // <li><b>Ambiguities</b>: These are cases where more than one path through the
// grammar can match the input.</li> // grammar can Match the input.</li>
// <li><b>Weak context sensitivity</b>: These are cases where full-context // <li><b>Weak context sensitivity</b>: These are cases where full-context
// prediction resolved an SLL conflict to a unique alternative which equaled the // prediction resolved an SLL conflict to a unique alternative which equaled the
// minimum alternative of the SLL conflict.</li> // minimum alternative of the SLL conflict.</li>
@ -47,7 +47,7 @@ func (this *DiagnosticErrorListener) reportAmbiguity(recognizer *Parser, dfa *DF
": ambigAlts=" + ": ambigAlts=" +
this.getConflictingAlts(ambigAlts, configs).toString() + this.getConflictingAlts(ambigAlts, configs).toString() +
", input='" + ", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil) recognizer.notifyErrorListeners(msg, nil, nil)
} }
@ -56,7 +56,7 @@ func (this *DiagnosticErrorListener) reportAttemptingFullContext(recognizer *Par
var msg = "reportAttemptingFullContext d=" + var msg = "reportAttemptingFullContext d=" +
this.getDecisionDescription(recognizer, dfa) + this.getDecisionDescription(recognizer, dfa) +
", input='" + ", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil) recognizer.notifyErrorListeners(msg, nil, nil)
} }
@ -64,7 +64,7 @@ func (this *DiagnosticErrorListener) reportContextSensitivity(recognizer *Parser
var msg = "reportContextSensitivity d=" + var msg = "reportContextSensitivity d=" +
this.getDecisionDescription(recognizer, dfa) + this.getDecisionDescription(recognizer, dfa) +
", input='" + ", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil) recognizer.notifyErrorListeners(msg, nil, nil)
} }

View File

@ -9,11 +9,11 @@ import (
type IErrorStrategy interface { type IErrorStrategy interface {
reset(IParser) reset(IParser)
recoverInline(IParser) *Token RecoverInline(IParser) *Token
recover(IParser, IRecognitionException) Recover(IParser, IRecognitionException)
sync(IParser) Sync(IParser)
inErrorRecoveryMode(IParser) bool inErrorRecoveryMode(IParser) bool
reportError(IParser, IRecognitionException) ReportError(IParser, IRecognitionException)
reportMatch(IParser) reportMatch(IParser)
} }
@ -23,19 +23,19 @@ type ErrorStrategy struct {
func (this *ErrorStrategy) reset(recognizer IParser) { func (this *ErrorStrategy) reset(recognizer IParser) {
} }
func (this *ErrorStrategy) recoverInline(recognizer IParser) { func (this *ErrorStrategy) RecoverInline(recognizer IParser) {
} }
func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException) { func (this *ErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
} }
func (this *ErrorStrategy) sync(recognizer IParser) { func (this *ErrorStrategy) Sync(recognizer IParser) {
} }
func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser) { func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser) {
} }
func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException) { func (this *ErrorStrategy) ReportError(recognizer IParser, e IRecognitionException) {
} }
func (this *ErrorStrategy) reportMatch(recognizer IParser) { func (this *ErrorStrategy) reportMatch(recognizer IParser) {
@ -133,16 +133,16 @@ func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) {
// <ul> // <ul>
// <li>{@link NoViableAltException}: Dispatches the call to // <li>{@link NoViableAltException}: Dispatches the call to
// {@link //reportNoViableAlternative}</li> // {@link //reportNoViableAlternative}</li>
// <li>{@link InputMismatchException}: Dispatches the call to // <li>{@link InputMisMatchException}: Dispatches the call to
// {@link //reportInputMismatch}</li> // {@link //reportInputMisMatch}</li>
// <li>{@link FailedPredicateException}: Dispatches the call to // <li>{@link FailedPredicateException}: Dispatches the call to
// {@link //reportFailedPredicate}</li> // {@link //reportFailedPredicate}</li>
// <li>All other types: calls {@link Parser//notifyErrorListeners} to report // <li>All other types: calls {@link Parser//notifyErrorListeners} to report
// the exception</li> // the exception</li>
// </ul> // </ul>
// //
func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognitionException) { func (this *DefaultErrorStrategy) ReportError(recognizer IParser, e IRecognitionException) {
// if we've already reported an error and have not matched a token // if we've already reported an error and have not Matched a token
// yet successfully, don't report any errors. // yet successfully, don't report any errors.
if this.inErrorRecoveryMode(recognizer) { if this.inErrorRecoveryMode(recognizer) {
return // don't report spurious errors return // don't report spurious errors
@ -156,8 +156,8 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition
recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e) recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e)
case *NoViableAltException: case *NoViableAltException:
this.reportNoViableAlternative(recognizer, t) this.reportNoViableAlternative(recognizer, t)
case *InputMismatchException: case *InputMisMatchException:
this.reportInputMismatch(recognizer, t) this.reportInputMisMatch(recognizer, t)
case *FailedPredicateException: case *FailedPredicateException:
this.reportFailedPredicate(recognizer, t) this.reportFailedPredicate(recognizer, t)
} }
@ -166,46 +166,46 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition
// //
// {@inheritDoc} // {@inheritDoc}
// //
// <p>The default implementation resynchronizes the parser by consuming tokens // <p>The default implementation reSynchronizes the parser by consuming tokens
// until we find one in the resynchronization set--loosely the set of tokens // until we find one in the reSynchronization set--loosely the set of tokens
// that can follow the current rule.</p> // that can follow the current rule.</p>
// //
func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionException) { func (this *DefaultErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
if this.lastErrorIndex == recognizer.getInputStream().index() && if this.lastErrorIndex == recognizer.getInputStream().index() &&
this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState()) { this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.GetState()) {
// uh oh, another error at same token index and previously-visited // uh oh, another error at same token index and previously-visited
// state in ATN must be a case where LT(1) is in the recovery // state in ATN must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token // token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop this is a failsafe. // at least to prevent an infinite loop this is a failsafe.
recognizer.consume() recognizer.Consume()
} }
this.lastErrorIndex = recognizer.getInputStream().index() this.lastErrorIndex = recognizer.getInputStream().index()
if this.lastErrorStates == nil { if this.lastErrorStates == nil {
this.lastErrorStates = NewIntervalSet() this.lastErrorStates = NewIntervalSet()
} }
this.lastErrorStates.addOne(recognizer.getState()) this.lastErrorStates.addOne(recognizer.GetState())
var followSet = this.getErrorRecoverySet(recognizer) var followSet = this.getErrorRecoverySet(recognizer)
this.consumeUntil(recognizer, followSet) this.consumeUntil(recognizer, followSet)
} }
// The default implementation of {@link ANTLRErrorStrategy//sync} makes sure // The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
// that the current lookahead symbol is consistent with what were expecting // that the current lookahead symbol is consistent with what were expecting
// at this point in the ATN. You can call this anytime but ANTLR only // at this point in the ATN. You can call this anytime but ANTLR only
// generates code to check before subrules/loops and each iteration. // generates code to check before subrules/loops and each iteration.
// //
// <p>Implements Jim Idle's magic sync mechanism in closures and optional // <p>Implements Jim Idle's magic Sync mechanism in closures and optional
// subrules. E.g.,</p> // subrules. E.g.,</p>
// //
// <pre> // <pre>
// a : sync ( stuff sync )* // a : Sync ( stuff Sync )*
// sync : {consume to what can follow sync} // Sync : {consume to what can follow Sync}
// </pre> // </pre>
// //
// At the start of a sub rule upon error, {@link //sync} performs single // At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current // token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the // rule and uses the default error recovery, which consumes until the
// resynchronization set of the current rule. // reSynchronization set of the current rule.
// //
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block // <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
// with an empty alternative), then the expected set includes what follows // with an empty alternative), then the expected set includes what follows
@ -218,7 +218,7 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce
// <p><strong>ORIGINS</strong></p> // <p><strong>ORIGINS</strong></p>
// //
// <p>Previous versions of ANTLR did a poor job of their recovery within loops. // <p>Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail // A single misMatch token or missing token would force the parser to bail
// out of the entire rules surrounding the loop. So, for rule</p> // out of the entire rules surrounding the loop. So, for rule</p>
// //
// <pre> // <pre>
@ -234,22 +234,22 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce
// some reason speed is suffering for you, you can turn off this // some reason speed is suffering for you, you can turn off this
// functionality by simply overriding this method as a blank { }.</p> // functionality by simply overriding this method as a blank { }.</p>
// //
func (this *DefaultErrorStrategy) sync(recognizer IParser) { func (this *DefaultErrorStrategy) Sync(recognizer IParser) {
// If already recovering, don't try to sync // If already recovering, don't try to Sync
if this.inErrorRecoveryMode(recognizer) { if this.inErrorRecoveryMode(recognizer) {
return return
} }
var s = recognizer.getInterpreter().atn.states[recognizer.getState()] var s = recognizer.GetInterpreter().atn.states[recognizer.GetState()]
var la = recognizer.getTokenStream().LA(1) var la = recognizer.GetTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off // try cheaper subset first might get lucky. seems to shave a wee bit off
if la == TokenEOF || recognizer.getATN().nextTokens(s, nil).contains(la) { if la == TokenEOF || recognizer.getATN().nextTokens(s, nil).contains(la) {
return return
} }
// Return but don't end recovery. only do that upon valid token match // Return but don't end recovery. only do that upon valid token Match
if recognizer.isExpectedToken(la) { if recognizer.isExpectedToken(la) {
return return
} }
switch s.getStateType() { switch s.GetStateType() {
case ATNStateBLOCK_START: case ATNStateBLOCK_START:
case ATNStateSTAR_BLOCK_START: case ATNStateSTAR_BLOCK_START:
case ATNStatePLUS_BLOCK_START: case ATNStatePLUS_BLOCK_START:
@ -258,7 +258,7 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) {
if this.singleTokenDeletion(recognizer) != nil { if this.singleTokenDeletion(recognizer) != nil {
return return
} else { } else {
panic(NewInputMismatchException(recognizer)) panic(NewInputMisMatchException(recognizer))
} }
break break
case ATNStatePLUS_LOOP_BACK: case ATNStatePLUS_LOOP_BACK:
@ -274,22 +274,22 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) {
} }
} }
// This is called by {@link //reportError} when the exception is a // This is called by {@link //ReportError} when the exception is a
// {@link NoViableAltException}. // {@link NoViableAltException}.
// //
// @see //reportError // @see //ReportError
// //
// @param recognizer the parser instance // @param recognizer the parser instance
// @param e the recognition exception // @param e the recognition exception
// //
func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, e *NoViableAltException) { func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, e *NoViableAltException) {
var tokens = recognizer.getTokenStream() var tokens = recognizer.GetTokenStream()
var input string var input string
if tokens != nil { if tokens != nil {
if e.startToken.tokenType == TokenEOF { if e.startToken.tokenType == TokenEOF {
input = "<EOF>" input = "<EOF>"
} else { } else {
input = tokens.getTextFromTokens(e.startToken, e.offendingToken) input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
} }
} else { } else {
input = "<unknown input>" input = "<unknown input>"
@ -299,31 +299,31 @@ func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser,
} }
// //
// This is called by {@link //reportError} when the exception is an // This is called by {@link //ReportError} when the exception is an
// {@link InputMismatchException}. // {@link InputMisMatchException}.
// //
// @see //reportError // @see //ReportError
// //
// @param recognizer the parser instance // @param recognizer the parser instance
// @param e the recognition exception // @param e the recognition exception
// //
func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *InputMismatchException) { func (this *DefaultErrorStrategy) reportInputMisMatch(recognizer IParser, e *InputMisMatchException) {
var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) + var msg = "misMatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) " expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, e.offendingToken, e) recognizer.notifyErrorListeners(msg, e.offendingToken, e)
} }
// //
// This is called by {@link //reportError} when the exception is a // This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}. // {@link FailedPredicateException}.
// //
// @see //reportError // @see //ReportError
// //
// @param recognizer the parser instance // @param recognizer the parser instance
// @param e the recognition exception // @param e the recognition exception
// //
func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *FailedPredicateException) { func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *FailedPredicateException) {
var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()] var ruleName = recognizer.getRuleNames()[recognizer.GetParserRuleContext().getRuleIndex()]
var msg = "rule " + ruleName + " " + e.message var msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e) recognizer.notifyErrorListeners(msg, e.offendingToken, e)
} }
@ -335,7 +335,7 @@ func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *F
// {@code recognizer} is in error recovery mode. // {@code recognizer} is in error recovery mode.
// //
// <p>This method is called when {@link //singleTokenDeletion} identifies // <p>This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched // single-token deletion as a viable recovery strategy for a misMatched
// input error.</p> // input error.</p>
// //
// <p>The default implementation simply returns if the handler is already in // <p>The default implementation simply returns if the handler is already in
@ -351,7 +351,7 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
} }
this.beginErrorCondition(recognizer) this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken() var t = recognizer.getCurrentToken()
var tokenName = this.getTokenErrorDisplay(t) var tokenName = this.GetTokenErrorDisplay(t)
var expecting = this.getExpectedTokens(recognizer) var expecting = this.getExpectedTokens(recognizer)
var msg = "extraneous input " + tokenName + " expecting " + var msg = "extraneous input " + tokenName + " expecting " +
expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
@ -364,7 +364,7 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
// method returns, {@code recognizer} is in error recovery mode. // method returns, {@code recognizer} is in error recovery mode.
// //
// <p>This method is called when {@link //singleTokenInsertion} identifies // <p>This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched // single-token insertion as a viable recovery strategy for a misMatched
// input error.</p> // input error.</p>
// //
// <p>The default implementation simply returns if the handler is already in // <p>The default implementation simply returns if the handler is already in
@ -382,21 +382,21 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
var t = recognizer.getCurrentToken() var t = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer) var expecting = this.getExpectedTokens(recognizer)
var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) + var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) +
" at " + this.getTokenErrorDisplay(t) " at " + this.GetTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, nil) recognizer.notifyErrorListeners(msg, t, nil)
} }
// <p>The default implementation attempts to recover from the mismatched input // <p>The default implementation attempts to recover from the misMatched input
// by using single token insertion and deletion as described below. If the // by using single token insertion and deletion as described below. If the
// recovery attempt fails, this method panics an // recovery attempt fails, this method panics an
// {@link InputMismatchException}.</p> // {@link InputMisMatchException}.</p>
// //
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p> // <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
// //
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the // <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
// right token, however, then assume {@code LA(1)} is some extra spurious // right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was // token and delete it. Then consume and return the next token (which was
// the {@code LA(2)} token) as the successful result of the match operation.</p> // the {@code LA(2)} token) as the successful result of the Match operation.</p>
// //
// <p>This recovery strategy is implemented by {@link // <p>This recovery strategy is implemented by {@link
// //singleTokenDeletion}.</p> // //singleTokenDeletion}.</p>
@ -407,7 +407,7 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// after the expected {@code LA(1)} token, then assume the token is missing // after the expected {@code LA(1)} token, then assume the token is missing
// and use the parser's {@link TokenFactory} to create it on the fly. The // and use the parser's {@link TokenFactory} to create it on the fly. The
// "insertion" is performed by returning the created token as the successful // "insertion" is performed by returning the created token as the successful
// result of the match operation.</p> // result of the Match operation.</p>
// //
// <p>This recovery strategy is implemented by {@link // <p>This recovery strategy is implemented by {@link
// //singleTokenInsertion}.</p> // //singleTokenInsertion}.</p>
@ -422,7 +422,7 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// stat &rarr expr &rarr atom // stat &rarr expr &rarr atom
// </pre> // </pre>
// //
// and it will be trying to match the {@code ')'} at this point in the // and it will be trying to Match the {@code ')'} at this point in the
// derivation: // derivation:
// //
// <pre> // <pre>
@ -430,54 +430,54 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// ^ // ^
// </pre> // </pre>
// //
// The attempt to match {@code ')'} will fail when it sees {@code ''} and // The attempt to Match {@code ')'} will fail when it sees {@code ''} and
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} // call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
// is in the set of tokens that can follow the {@code ')'} token reference // is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}. // in rule {@code atom}. It can assume that you forgot the {@code ')'}.
// //
func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token { func (this *DefaultErrorStrategy) RecoverInline(recognizer IParser) *Token {
// SINGLE TOKEN DELETION // SINGLE TOKEN DELETION
var matchedSymbol = this.singleTokenDeletion(recognizer) var MatchedSymbol = this.singleTokenDeletion(recognizer)
if matchedSymbol != nil { if MatchedSymbol != nil {
// we have deleted the extra token. // we have deleted the extra token.
// now, move past ttype token as if all were ok // now, move past ttype token as if all were ok
recognizer.consume() recognizer.Consume()
return matchedSymbol return MatchedSymbol
} }
// SINGLE TOKEN INSERTION // SINGLE TOKEN INSERTION
if this.singleTokenInsertion(recognizer) { if this.singleTokenInsertion(recognizer) {
return this.getMissingSymbol(recognizer) return this.getMissingSymbol(recognizer)
} }
// even that didn't work must panic the exception // even that didn't work must panic the exception
panic(NewInputMismatchException(recognizer)) panic(NewInputMisMatchException(recognizer))
} }
// //
// This method implements the single-token insertion inline error recovery // This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token // strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this // deletion strategy fails to recover from the misMatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery // method returns {@code true}, {@code recognizer} will be in error recovery
// mode. // mode.
// //
// <p>This method determines whether or not single-token insertion is viable by // <p>This method determines whether or not single-token insertion is viable by
// checking if the {@code LA(1)} input symbol could be successfully matched // checking if the {@code LA(1)} input symbol could be successfully Matched
// if it were instead the {@code LA(2)} symbol. If this method returns // if it were instead the {@code LA(2)} symbol. If this method returns
// {@code true}, the caller is responsible for creating and inserting a // {@code true}, the caller is responsible for creating and inserting a
// token with the correct type to produce this behavior.</p> // token with the correct type to produce this behavior.</p>
// //
// @param recognizer the parser instance // @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery // @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false} // strategy for the current misMatched input, otherwise {@code false}
// //
func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool { func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool {
var currentSymbolType = recognizer.getTokenStream().LA(1) var currentSymbolType = recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current // if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery // ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token // is free to conjure up and insert the missing token
var atn = recognizer.getInterpreter().atn var atn = recognizer.GetInterpreter().atn
var currentState = atn.states[recognizer.getState()] var currentState = atn.states[recognizer.GetState()]
var next = currentState.getTransitions()[0].getTarget() var next = currentState.getTransitions()[0].getTarget()
var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext()) var expectingAtLL2 = atn.nextTokens(next, recognizer.GetParserRuleContext())
if expectingAtLL2.contains(currentSymbolType) { if expectingAtLL2.contains(currentSymbolType) {
this.reportMissingToken(recognizer) this.reportMissingToken(recognizer)
return true return true
@ -488,36 +488,36 @@ func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool
// This method implements the single-token deletion inline error recovery // This method implements the single-token deletion inline error recovery
// strategy. It is called by {@link //recoverInline} to attempt to recover // strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error // from misMatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil, // handler state will not have changed. If this method returns non-nil,
// {@code recognizer} will <em>not</em> be in error recovery mode since the // {@code recognizer} will <em>not</em> be in error recovery mode since the
// returned token was a successful match. // returned token was a successful Match.
// //
// <p>If the single-token deletion is successful, this method calls // <p>If the single-token deletion is successful, this method calls
// {@link //reportUnwantedToken} to report the error, followed by // {@link //reportUnwantedToken} to report the error, followed by
// {@link Parser//consume} to actually "delete" the extraneous token. Then, // {@link Parser//consume} to actually "delete" the extraneous token. Then,
// before returning {@link //reportMatch} is called to signal a successful // before returning {@link //reportMatch} is called to signal a successful
// match.</p> // Match.</p>
// //
// @param recognizer the parser instance // @param recognizer the parser instance
// @return the successfully matched {@link Token} instance if single-token // @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise // deletion successfully recovers from the misMatched input, otherwise
// {@code nil} // {@code nil}
// //
func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token { func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token {
var nextTokenType = recognizer.getTokenStream().LA(2) var nextTokenType = recognizer.GetTokenStream().LA(2)
var expecting = this.getExpectedTokens(recognizer) var expecting = this.getExpectedTokens(recognizer)
if expecting.contains(nextTokenType) { if expecting.contains(nextTokenType) {
this.reportUnwantedToken(recognizer) this.reportUnwantedToken(recognizer)
// print("recoverFromMismatchedToken deleting " \ // print("recoverFromMisMatchedToken deleting " \
// + str(recognizer.getTokenStream().LT(1)) \ // + str(recognizer.GetTokenStream().LT(1)) \
// + " since " + str(recognizer.getTokenStream().LT(2)) \ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr) // + " is what we want", file=sys.stderr)
recognizer.consume() // simply delete extra token recognizer.Consume() // simply delete extra token
// we want to return the token we're actually matching // we want to return the token we're actually Matching
var matchedSymbol = recognizer.getCurrentToken() var MatchedSymbol = recognizer.getCurrentToken()
this.reportMatch(recognizer) // we know current token is correct this.reportMatch(recognizer) // we know current token is correct
return matchedSymbol return MatchedSymbol
} else { } else {
return nil return nil
} }
@ -528,7 +528,7 @@ func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token
// The recognizer attempts to recover from single missing // The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol. // symbols. But, actions might refer to that missing symbol.
// For example, x=ID {f($x)}. The action clearly assumes // For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier matched previously and that // that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but // $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that // the next token in the stream is what we want we assume that
// this token is missing and we keep going. Because we // this token is missing and we keep going. Because we
@ -553,12 +553,12 @@ func (this *DefaultErrorStrategy) getMissingSymbol(recognizer IParser) *Token {
tokenText = "<missing " + recognizer.getLiteralNames()[expectedTokenType] + ">" tokenText = "<missing " + recognizer.getLiteralNames()[expectedTokenType] + ">"
} }
var current = currentSymbol var current = currentSymbol
var lookback = recognizer.getTokenStream().LT(-1) var lookback = recognizer.GetTokenStream().LT(-1)
if current.tokenType == TokenEOF && lookback != nil { if current.tokenType == TokenEOF && lookback != nil {
current = lookback current = lookback
} }
tf := recognizer.getTokenFactory() tf := recognizer.GetTokenFactory()
return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column) return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column)
} }
@ -574,7 +574,7 @@ func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *Interva
// your token objects because you don't have to go modify your lexer // your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type. // so that it creates a NewJava type.
// //
func (this *DefaultErrorStrategy) getTokenErrorDisplay(t *Token) string { func (this *DefaultErrorStrategy) GetTokenErrorDisplay(t *Token) string {
if t == nil { if t == nil {
return "<no token>" return "<no token>"
} }
@ -652,18 +652,18 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// (context-sensitive or otherwise). We need the combined set of // (context-sensitive or otherwise). We need the combined set of
// all context-sensitive FOLLOW sets--the set of all tokens that // all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to // could follow any reference in the call chain. We need to
// resync to one of those tokens. Note that FOLLOW(c)='^' and if // reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we resync'd to that token, we'd consume until EOF. We need to // we reSync'd to that token, we'd consume until EOF. We need to
// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. // Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would // In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would // not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though. // return normally. Rule b would not find the required '^' though.
// At this point, it gets a mismatched token error and panics an // At this point, it gets a misMatched token error and panics an
// exception (since LA(1) is not in the viable following token // exception (since LA(1) is not in the viable following token
// set). The rule exception handler tries to recover, but finds // set). The rule exception handler tries to recover, but finds
// the same recovery set and doesn't consume anything. Rule b // the same recovery set and doesn't consume anything. Rule b
// exits normally returning to rule a. Now it finds the ']' (and // exits normally returning to rule a. Now it finds the ']' (and
// with the successful match exits errorRecovery mode). // with the successful Match exits errorRecovery mode).
// //
// So, you can see that the parser walks up the call chain looking // So, you can see that the parser walks up the call chain looking
// for the token that was a member of the recovery set. // for the token that was a member of the recovery set.
@ -689,8 +689,8 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// at run-time upon error to avoid overhead during parsing. // at run-time upon error to avoid overhead during parsing.
// //
func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *IntervalSet { func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *IntervalSet {
var atn = recognizer.getInterpreter().atn var atn = recognizer.GetInterpreter().atn
var ctx = recognizer.getParserRuleContext() var ctx = recognizer.GetParserRuleContext()
var recoverSet = NewIntervalSet() var recoverSet = NewIntervalSet()
for ctx != nil && ctx.getInvokingState() >= 0 { for ctx != nil && ctx.getInvokingState() >= 0 {
// compute what follows who invoked us // compute what follows who invoked us
@ -698,18 +698,18 @@ func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *Inter
var rt = invokingState.getTransitions()[0] var rt = invokingState.getTransitions()[0]
var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil) var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow) recoverSet.addSet(follow)
ctx = ctx.getParent().(IParserRuleContext) ctx = ctx.GetParent().(IParserRuleContext)
} }
recoverSet.removeOne(TokenEpsilon) recoverSet.removeOne(TokenEpsilon)
return recoverSet return recoverSet
} }
// Consume tokens until one matches the given token set.// // Consume tokens until one Matches the given token set.//
func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *IntervalSet) { func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *IntervalSet) {
var ttype = recognizer.getTokenStream().LA(1) var ttype = recognizer.GetTokenStream().LA(1)
for ttype != TokenEOF && !set.contains(ttype) { for ttype != TokenEOF && !set.contains(ttype) {
recognizer.consume() recognizer.Consume()
ttype = recognizer.getTokenStream().LA(1) ttype = recognizer.GetTokenStream().LA(1)
} }
} }
@ -728,7 +728,7 @@ func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *Interval
// stage of two-stage parsing to immediately terminate if an error is // stage of two-stage parsing to immediately terminate if an error is
// encountered, and immediately fall back to the second stage. In addition to // encountered, and immediately fall back to the second stage. In addition to
// avoiding wasted work by attempting to recover from errors here, the empty // avoiding wasted work by attempting to recover from errors here, the empty
// implementation of {@link BailErrorStrategy//sync} improves the performance of // implementation of {@link BailErrorStrategy//Sync} improves the performance of
// the first stage.</li> // the first stage.</li>
// <li><strong>Silent validation:</strong> When syntax errors are not being // <li><strong>Silent validation:</strong> When syntax errors are not being
// reported or logged, and the parse result is simply ignored if errors occur, // reported or logged, and the parse result is simply ignored if errors occur,
@ -758,11 +758,11 @@ func NewBailErrorStrategy() *BailErrorStrategy {
// rule func catches. Use {@link Exception//getCause()} to get the // rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}. // original {@link RecognitionException}.
// //
func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionException) { func (this *BailErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
var context = recognizer.getParserRuleContext() var context = recognizer.GetParserRuleContext()
for context != nil { for context != nil {
context.setException(e) context.SetException(e)
context = context.getParent().(IParserRuleContext) context = context.GetParent().(IParserRuleContext)
} }
panic(NewParseCancellationException()) // TODO we don't emit e properly panic(NewParseCancellationException()) // TODO we don't emit e properly
} }
@ -770,11 +770,11 @@ func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionExcepti
// Make sure we don't attempt to recover inline if the parser // Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception. // successfully recovers, it won't panic an exception.
// //
func (this *BailErrorStrategy) recoverInline(recognizer IParser) { func (this *BailErrorStrategy) RecoverInline(recognizer IParser) {
this.recover(recognizer, NewInputMismatchException(recognizer)) this.Recover(recognizer, NewInputMisMatchException(recognizer))
} }
// Make sure we don't attempt to recover from problems in subrules.// // Make sure we don't attempt to recover from problems in subrules.//
func (this *BailErrorStrategy) sync(recognizer IParser) { func (this *BailErrorStrategy) Sync(recognizer IParser) {
// pass // pass
} }

View File

@ -4,7 +4,7 @@ import ()
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just // The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
// 3 kinds of errors: prediction errors, failed predicate errors, and // 3 kinds of errors: prediction errors, failed predicate errors, and
// mismatched input errors. In each case, the parser knows where it is // misMatched input errors. In each case, the parser knows where it is
// in the input, where it is in the ATN, the rule invocation stack, // in the input, where it is in the ATN, the rule invocation stack,
// and what kind of problem occurred. // and what kind of problem occurred.
@ -54,10 +54,10 @@ func (t *RecognitionException) InitRecognitionException(message string, recogniz
// occurred. For {@link NoViableAltException} and // occurred. For {@link NoViableAltException} and
// {@link LexerNoViableAltException} exceptions, this is the // {@link LexerNoViableAltException} exceptions, this is the
// {@link DecisionState} number. For others, it is the state whose outgoing // {@link DecisionState} number. For others, it is the state whose outgoing
// edge we couldn't match. // edge we couldn't Match.
t.offendingState = -1 t.offendingState = -1
if t.recognizer != nil { if t.recognizer != nil {
t.offendingState = t.recognizer.getState() t.offendingState = t.recognizer.GetState()
} }
} }
@ -73,7 +73,7 @@ func (this *RecognitionException) getOffendingToken() *Token {
// //
// Gets the set of input symbols which could potentially follow the // Gets the set of input symbols which could potentially follow the
// previously matched symbol at the time this exception was panicn. // previously Matched symbol at the time this exception was panicn.
// //
// <p>If the set of expected tokens is not known and could not be computed, // <p>If the set of expected tokens is not known and could not be computed,
// this method returns {@code nil}.</p> // this method returns {@code nil}.</p>
@ -116,7 +116,7 @@ func NewLexerNoViableAltException(lexer *Lexer, input CharStream, startIndex int
func (this *LexerNoViableAltException) toString() string { func (this *LexerNoViableAltException) toString() string {
var symbol = "" var symbol = ""
if this.startIndex >= 0 && this.startIndex < this.input.size() { if this.startIndex >= 0 && this.startIndex < this.input.size() {
symbol = this.input.getTextFromInterval(NewInterval(this.startIndex, this.startIndex)) symbol = this.input.GetTextFromInterval(NewInterval(this.startIndex, this.startIndex))
} }
return "LexerNoViableAltException" + symbol return "LexerNoViableAltException" + symbol
} }
@ -138,7 +138,7 @@ type NoViableAltException struct {
func NewNoViableAltException(recognizer IParser, input CharStream, startToken *Token, offendingToken *Token, deadEndConfigs *ATNConfigSet, ctx IParserRuleContext) *NoViableAltException { func NewNoViableAltException(recognizer IParser, input CharStream, startToken *Token, offendingToken *Token, deadEndConfigs *ATNConfigSet, ctx IParserRuleContext) *NoViableAltException {
if ctx == nil { if ctx == nil {
ctx = recognizer.getParserRuleContext() ctx = recognizer.GetParserRuleContext()
} }
if offendingToken == nil { if offendingToken == nil {
@ -156,7 +156,7 @@ func NewNoViableAltException(recognizer IParser, input CharStream, startToken *T
this := new(NoViableAltException) this := new(NoViableAltException)
this.InitRecognitionException("", recognizer, input, ctx) this.InitRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.index() that couldn't match // Which configurations did we try at input.index() that couldn't Match
// input.LT(1)?// // input.LT(1)?//
this.deadEndConfigs = deadEndConfigs this.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might // The token object at the start index the input stream might
@ -169,17 +169,17 @@ func NewNoViableAltException(recognizer IParser, input CharStream, startToken *T
return this return this
} }
type InputMismatchException struct { type InputMisMatchException struct {
RecognitionException RecognitionException
} }
// This signifies any kind of mismatched input exceptions such as // This signifies any kind of misMatched input exceptions such as
// when the current input does not match the expected token. // when the current input does not Match the expected token.
// //
func NewInputMismatchException(recognizer IParser) *InputMismatchException { func NewInputMisMatchException(recognizer IParser) *InputMisMatchException {
this := new(InputMismatchException) this := new(InputMisMatchException)
this.InitRecognitionException("", recognizer, recognizer.getInputStream(), recognizer.getParserRuleContext()) this.InitRecognitionException("", recognizer, recognizer.getInputStream(), recognizer.GetParserRuleContext())
this.offendingToken = recognizer.getCurrentToken() this.offendingToken = recognizer.getCurrentToken()
@ -188,7 +188,7 @@ func NewInputMismatchException(recognizer IParser) *InputMismatchException {
} }
// A semantic predicate failed during validation. Validation of predicates // A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like matching a token. // occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during // Disambiguating predicate evaluation occurs when we test a predicate during
// prediction. // prediction.

View File

@ -23,7 +23,7 @@ func (is *InputStream) reset() {
is.index = 0 is.index = 0
} }
func (is *InputStream) consume() { func (is *InputStream) Consume() {
if is.index >= is.size { if is.index >= is.size {
// assert is.LA(1) == TokenEOF // assert is.LA(1) == TokenEOF
panic("cannot consume EOF") panic("cannot consume EOF")
@ -66,7 +66,7 @@ func (is *InputStream) seek(index int) {
is.index = intMin(index, is.size) is.index = intMin(index, is.size)
} }
func (is *InputStream) getText(start int, stop int) string { func (is *InputStream) GetText(start int, stop int) string {
if stop >= is.size { if stop >= is.size {
stop = is.size - 1 stop = is.size - 1
} }

View File

@ -1,7 +1,7 @@
package antlr4 package antlr4
type IntStream interface { type IntStream interface {
consume() Consume()
LA(int) int LA(int) int
mark() int mark() int
release(marker int) release(marker int)

View File

@ -23,7 +23,7 @@ const (
// Calculates the SLL(1) expected lookahead set for each outgoing transition // Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each // of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition // outgoing transition in {@code s}. If the closure from transition
// <em>i</em> leads to a semantic predicate before matching a symbol, the // <em>i</em> leads to a semantic predicate before Matching a symbol, the
// element at index <em>i</em> of the result will be {@code nil}. // element at index <em>i</em> of the result will be {@code nil}.
// //
// @param s the ATN state // @param s the ATN state
@ -154,7 +154,7 @@ func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, loo
}() }()
calledRuleStack.clear(returnState.getRuleIndex()) calledRuleStack.clear(returnState.getRuleIndex())
la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) la._LOOK(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} }
return return
@ -172,7 +172,7 @@ func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, loo
continue continue
} }
newContext := SingletonPredictionContextcreate(ctx, t1.followState.getStateNumber()) newContext := SingletonPredictionContextcreate(ctx, t1.followState.GetStateNumber())
defer func() { defer func() {
calledRuleStack.remove(t1.getTarget().getRuleIndex()) calledRuleStack.remove(t1.getTarget().getRuleIndex())

View File

@ -7,7 +7,7 @@ import (
// A lexer is recognizer that draws input symbols from a character stream. // A lexer is recognizer that draws input symbols from a character stream.
// lexer grammars result in a subclass of this object. A Lexer object // lexer grammars result in a subclass of this object. A Lexer object
// uses simplified match() and error recovery mechanisms in the interest // uses simplified Match() and error recovery mechanisms in the interest
// of speed. // of speed.
/// ///
@ -23,7 +23,7 @@ type ILexer interface {
} }
type Lexer struct { type Lexer struct {
Recognizer *Recognizer
Interpreter *LexerATNSimulator Interpreter *LexerATNSimulator
@ -64,8 +64,8 @@ func (l *Lexer) InitLexer(input CharStream) {
// The goal of all lexer rules/methods is to create a token object. // The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to // l is an instance variable as multiple rules may collaborate to
// create a single token. nextToken will return l object after // create a single token. nextToken will return l object after
// matching lexer rule(s). If you subclass to allow multiple token // Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be matched or // emissions, then set l to the last token to be Matched or
// something nonnil so that the auto token emit mechanism will not // something nonnil so that the auto token emit mechanism will not
// emit another token. // emit another token.
l._token = nil l._token = nil
@ -139,14 +139,14 @@ func (l *Lexer) getInputStream() CharStream {
} }
func (l *Lexer) getSourceName() string { func (l *Lexer) getSourceName() string {
return l._input.getSourceName() return l.grammarFileName
} }
func (l *Lexer) setChannel(v int) { func (l *Lexer) setChannel(v int) {
l._channel = v l._channel = v
} }
func (l *Lexer) getTokenFactory() TokenFactory { func (l *Lexer) GetTokenFactory() TokenFactory {
return l._factory return l._factory
} }
@ -161,16 +161,16 @@ func (l *Lexer) safeMatch() (ret int) {
if e := recover(); e != nil { if e := recover(); e != nil {
if re, ok := e.(IRecognitionException); ok { if re, ok := e.(IRecognitionException); ok {
l.notifyListeners(re) // report error l.notifyListeners(re) // report error
l.recover(re) l.Recover(re)
ret = LexerSkip // default ret = LexerSkip // default
} }
} }
}() }()
return l.Interpreter.match(l._input, l._mode) return l.Interpreter.Match(l._input, l._mode)
} }
// Return a token from l source i.e., match a token on the char stream. // Return a token from l source i.e., Match a token on the char stream.
func (l *Lexer) nextToken() *Token { func (l *Lexer) nextToken() *Token {
if l._input == nil { if l._input == nil {
panic("nextToken requires a non-nil input stream.") panic("nextToken requires a non-nil input stream.")
@ -181,7 +181,7 @@ func (l *Lexer) nextToken() *Token {
// previously in finally block // previously in finally block
defer func() { defer func() {
// make sure we release marker after match or // make sure we release marker after Match or
// unbuffered char stream will keep buffering // unbuffered char stream will keep buffering
l._input.release(tokenStartMarker) l._input.release(tokenStartMarker)
}() }()
@ -282,7 +282,7 @@ func (l *Lexer) setInputStream(input CharStream) {
// By default does not support multiple emits per nextToken invocation // By default does not support multiple emits per nextToken invocation
// for efficiency reasons. Subclass and override l method, nextToken, // for efficiency reasons. Subclass and override l method, nextToken,
// and getToken (to push tokens into a list and pull from that list // and GetToken (to push tokens into a list and pull from that list
// rather than a single variable as l implementation does). // rather than a single variable as l implementation does).
// / // /
func (l *Lexer) emitToken(token *Token) { func (l *Lexer) emitToken(token *Token) {
@ -330,13 +330,13 @@ func (l *Lexer) getCharIndex() int {
return l._input.index() return l._input.index()
} }
// Return the text matched so far for the current token or any text override. // Return the text Matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text. //Set the complete text of l token it wipes any previous changes to the text.
func (l *Lexer) text() string { func (l *Lexer) text() string {
if l._text != nil { if l._text != nil {
return *l._text return *l._text
} else { } else {
return l.Interpreter.getText(l._input) return l.Interpreter.GetText(l._input)
} }
} }
@ -364,7 +364,7 @@ func (l *Lexer) getAllTokens() []*Token {
func (l *Lexer) notifyListeners(e IRecognitionException) { func (l *Lexer) notifyListeners(e IRecognitionException) {
var start = l._tokenStartCharIndex var start = l._tokenStartCharIndex
var stop = l._input.index() var stop = l._input.index()
var text = l._input.getTextFromInterval(NewInterval(start, stop)) var text = l._input.GetTextFromInterval(NewInterval(start, stop))
var msg = "token recognition error at: '" + text + "'" var msg = "token recognition error at: '" + text + "'"
var listener = l.getErrorListenerDispatch() var listener = l.getErrorListenerDispatch()
listener.syntaxError(l, nil, l._tokenStartLine, l._tokenStartColumn, msg, e) listener.syntaxError(l, nil, l._tokenStartLine, l._tokenStartColumn, msg, e)
@ -388,19 +388,19 @@ func (l *Lexer) getCharErrorDisplay(c rune) string {
return "'" + l.getErrorDisplayForChar(c) + "'" return "'" + l.getErrorDisplayForChar(c) + "'"
} }
// Lexers can normally match any char in it's vocabulary after matching // Lexers can normally Match any char in it's vocabulary after Matching
// a token, so do the easy thing and just kill a character and hope // a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack // it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule. // to do sophisticated error recovery if you are in a fragment rule.
// / // /
func (l *Lexer) recover(re IRecognitionException) { func (l *Lexer) Recover(re IRecognitionException) {
if l._input.LA(1) != TokenEOF { if l._input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok { if _, ok := re.(*LexerNoViableAltException); ok {
// skip a char and try again // skip a char and try again
l.Interpreter.consume(l._input) l.Interpreter.consume(l._input)
} else { } else {
// TODO: Do we lose character or line position information? // TODO: Do we lose character or line position information?
l._input.consume() l._input.Consume()
} }
} }
} }

View File

@ -59,7 +59,7 @@ type LexerATNSimulator struct {
column int column int
mode int mode int
prevAccept *SimState prevAccept *SimState
match_calls int Match_calls int
} }
func NewLexerATNSimulator(recog *Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { func NewLexerATNSimulator(recog *Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
@ -94,7 +94,7 @@ var LexerATNSimulatordfa_debug = false
var LexerATNSimulatorMIN_DFA_EDGE = 0 var LexerATNSimulatorMIN_DFA_EDGE = 0
var LexerATNSimulatorMAX_DFA_EDGE = 127 // forces unicode to stay in ATN var LexerATNSimulatorMAX_DFA_EDGE = 127 // forces unicode to stay in ATN
var LexerATNSimulatormatch_calls = 0 var LexerATNSimulatorMatch_calls = 0
func (this *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { func (this *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
this.column = simulator.column this.column = simulator.column
@ -103,9 +103,9 @@ func (this *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
this.startIndex = simulator.startIndex this.startIndex = simulator.startIndex
} }
func (this *LexerATNSimulator) match(input CharStream, mode int) int { func (this *LexerATNSimulator) Match(input CharStream, mode int) int {
this.match_calls += 1 this.Match_calls += 1
this.mode = mode this.mode = mode
var mark = input.mark() var mark = input.mark()
@ -117,7 +117,7 @@ func (this *LexerATNSimulator) match(input CharStream, mode int) int {
this.prevAccept.reset() this.prevAccept.reset()
var dfa = this.decisionToDFA[mode] var dfa = this.decisionToDFA[mode]
if dfa.s0 == nil { if dfa.s0 == nil {
return this.matchATN(input) return this.MatchATN(input)
} else { } else {
return this.execATN(input, dfa.s0) return this.execATN(input, dfa.s0)
} }
@ -131,11 +131,11 @@ func (this *LexerATNSimulator) reset() {
this.mode = LexerDefaultMode this.mode = LexerDefaultMode
} }
func (this *LexerATNSimulator) matchATN(input CharStream) int { func (this *LexerATNSimulator) MatchATN(input CharStream) int {
var startState = this.atn.modeToStartState[this.mode] var startState = this.atn.modeToStartState[this.mode]
if LexerATNSimulatordebug { if LexerATNSimulatordebug {
fmt.Println("matchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString()) fmt.Println("MatchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString())
} }
var old_mode = this.mode var old_mode = this.mode
var s0_closure = this.computeStartState(input, startState) var s0_closure = this.computeStartState(input, startState)
@ -151,7 +151,7 @@ func (this *LexerATNSimulator) matchATN(input CharStream) int {
var predict = this.execATN(input, next) var predict = this.execATN(input, next)
if LexerATNSimulatordebug { if LexerATNSimulatordebug {
fmt.Println("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString()) fmt.Println("DFA after MatchATN: " + this.decisionToDFA[old_mode].toLexerString())
} }
return predict return predict
} }
@ -190,10 +190,10 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// A character will take us back to an existing DFA state // A character will take us back to an existing DFA state
// that already has lots of edges out of it. e.g., .* in comments. // that already has lots of edges out of it. e.g., .* in comments.
// print("Target for:" + str(s) + " and:" + str(t)) // print("Target for:" + str(s) + " and:" + str(t))
var target = this.getExistingTargetState(s, t) var target = this.getExistingTarGetState(s, t)
// print("Existing:" + str(target)) // print("Existing:" + str(target))
if target == nil { if target == nil {
target = this.computeTargetState(input, s, t) target = this.computeTarGetState(input, s, t)
// print("Computed:" + str(target)) // print("Computed:" + str(target))
} }
if target == ATNSimulatorERROR { if target == ATNSimulatorERROR {
@ -227,7 +227,7 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// @return The existing target DFA state for the given input symbol // @return The existing target DFA state for the given input symbol
// {@code t}, or {@code nil} if the target state for this edge is not // {@code t}, or {@code nil} if the target state for this edge is not
// already cached // already cached
func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { func (this *LexerATNSimulator) getExistingTarGetState(s *DFAState, t int) *DFAState {
if s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE { if s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE {
return nil return nil
} }
@ -252,7 +252,7 @@ func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFASt
// @return The computed target DFA state for the given input symbol // @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, this method // {@code t}. If {@code t} does not lead to a valid DFA state, this method
// returns {@link //ERROR}. // returns {@link //ERROR}.
func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { func (this *LexerATNSimulator) computeTarGetState(input CharStream, s *DFAState, t int) *DFAState {
var reach = NewOrderedATNConfigSet() var reach = NewOrderedATNConfigSet()
// if we don't find an existing DFA state // if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions // Fill reach starting from closure, following t transitions
@ -264,7 +264,7 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState,
// cause a failover from DFA later. // cause a failover from DFA later.
this.addDFAEdge(s, t, ATNSimulatorERROR, nil) this.addDFAEdge(s, t, ATNSimulatorERROR, nil)
} }
// stop when we can't match any more char // stop when we can't Match any more char
return ATNSimulatorERROR return ATNSimulatorERROR
} }
// Add an edge from s to target DFA found/created for reach // Add an edge from s to target DFA found/created for reach
@ -300,10 +300,10 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *
continue continue
} }
if LexerATNSimulatordebug { if LexerATNSimulatordebug {
fmt.Printf("testing %s at %s\n", this.getTokenName(t), cfg.toString()) // this.recog, true)) fmt.Printf("testing %s at %s\n", this.GetTokenName(t), cfg.toString()) // this.recog, true))
} }
for j := 0; j < len(cfg.getState().getTransitions()); j++ { for j := 0; j < len(cfg.GetState().getTransitions()); j++ {
var trans = cfg.getState().getTransitions()[j] // for each transition var trans = cfg.GetState().getTransitions()[j] // for each transition
var target = this.getReachableTarget(trans, t) var target = this.getReachableTarget(trans, t)
if target != nil { if target != nil {
var lexerActionExecutor = cfg.(*LexerATNConfig).lexerActionExecutor var lexerActionExecutor = cfg.(*LexerATNConfig).lexerActionExecutor
@ -337,7 +337,7 @@ func (this *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *Lex
} }
func (this *LexerATNSimulator) getReachableTarget(trans ITransition, t int) IATNState { func (this *LexerATNSimulator) getReachableTarget(trans ITransition, t int) IATNState {
if trans.matches(t, 0, 0xFFFE) { if trans.Matches(t, 0, 0xFFFE) {
return trans.getTarget() return trans.getTarget()
} else { } else {
return nil return nil
@ -391,7 +391,7 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig,
if config.context != nil && !config.context.isEmpty() { if config.context != nil && !config.context.isEmpty() {
for i := 0; i < config.context.length(); i++ { for i := 0; i < config.context.length(); i++ {
if config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE { if config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE {
var newContext = config.context.getParent(i) // "pop" return state var newContext = config.context.GetParent(i) // "pop" return state
var returnState = this.atn.states[config.context.getReturnState(i)] var returnState = this.atn.states[config.context.getReturnState(i)]
cfg := NewLexerATNConfig2(config, returnState, newContext) cfg := NewLexerATNConfig2(config, returnState, newContext)
currentAltReachedAcceptState = this.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon) currentAltReachedAcceptState = this.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
@ -426,7 +426,7 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
if trans.getSerializationType() == TransitionRULE { if trans.getSerializationType() == TransitionRULE {
rt := trans.(*RuleTransition) rt := trans.(*RuleTransition)
var newContext = SingletonPredictionContextcreate(config.context, rt.followState.getStateNumber()) var newContext = SingletonPredictionContextcreate(config.context, rt.followState.GetStateNumber())
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
} else if trans.getSerializationType() == TransitionPRECEDENCE { } else if trans.getSerializationType() == TransitionPRECEDENCE {
@ -485,7 +485,7 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
trans.getSerializationType() == TransitionRANGE || trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET { trans.getSerializationType() == TransitionSET {
if treatEofAsEpsilon { if treatEofAsEpsilon {
if trans.matches(TokenEOF, 0, 0xFFFF) { if trans.Matches(TokenEOF, 0, 0xFFFF) {
cfg = NewLexerATNConfig4(config, trans.getTarget()) cfg = NewLexerATNConfig4(config, trans.getTarget())
} }
} }
@ -496,9 +496,9 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
// Evaluate a predicate specified in the lexer. // Evaluate a predicate specified in the lexer.
// //
// <p>If {@code speculative} is {@code true}, this method was called before // <p>If {@code speculative} is {@code true}, this method was called before
// {@link //consume} for the matched character. This method should call // {@link //consume} for the Matched character. This method should call
// {@link //consume} before evaluating the predicate to ensure position // {@link //consume} before evaluating the predicate to ensure position
// sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine}, // sensitive values, including {@link Lexer//GetText}, {@link Lexer//getLine},
// and {@link Lexer//getcolumn}, properly reflect the current // and {@link Lexer//getcolumn}, properly reflect the current
// lexer state. This method should restore {@code input} and the simulator // lexer state. This method should restore {@code input} and the simulator
// to the original state before returning (i.e. undo the actions made by the // to the original state before returning (i.e. undo the actions made by the
@ -519,7 +519,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, pr
return true return true
} }
if !speculative { if !speculative {
return this.recog.sempred(nil, ruleIndex, predIndex) return this.recog.Sempred(nil, ruleIndex, predIndex)
} }
var savedcolumn = this.column var savedcolumn = this.column
var savedLine = this.line var savedLine = this.line
@ -534,7 +534,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, pr
}() }()
this.consume(input) this.consume(input)
return this.recog.sempred(nil, ruleIndex, predIndex) return this.recog.Sempred(nil, ruleIndex, predIndex)
} }
func (this *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { func (this *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
@ -550,7 +550,7 @@ func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState,
// marker indicating dynamic predicate evaluation makes this edge // marker indicating dynamic predicate evaluation makes this edge
// dependent on the specific input sequence, so the static edge in the // dependent on the specific input sequence, so the static edge in the
// DFA should be omitted. The target DFAState is still created since // DFA should be omitted. The target DFAState is still created since
// execATN has the ability to resynchronize with the DFA state cache // execATN has the ability to reSynchronize with the DFA state cache
// following the predicate evaluation step. // following the predicate evaluation step.
// //
// TJP notes: next time through the DFA, we see a pred again and eval. // TJP notes: next time through the DFA, we see a pred again and eval.
@ -595,7 +595,7 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var cfg = configs.configs[i] var cfg = configs.configs[i]
_, ok := cfg.getState().(*RuleStopState) _, ok := cfg.GetState().(*RuleStopState)
if ok { if ok {
firstConfigWithRuleStopState = cfg firstConfigWithRuleStopState = cfg
@ -605,19 +605,19 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
if firstConfigWithRuleStopState != nil { if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true proposed.isAcceptState = true
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.getState().getRuleIndex()] proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().getRuleIndex()]
} }
var hash = proposed.hashString() var hash = proposed.hashString()
var dfa = this.decisionToDFA[this.mode] var dfa = this.decisionToDFA[this.mode]
var existing = dfa.getStates()[hash] var existing = dfa.GetStates()[hash]
if existing != nil { if existing != nil {
return existing return existing
} }
var newState = proposed var newState = proposed
newState.stateNumber = len(dfa.getStates()) newState.stateNumber = len(dfa.GetStates())
configs.setReadonly(true) configs.setReadonly(true)
newState.configs = configs newState.configs = configs
dfa.getStates()[hash] = newState dfa.GetStates()[hash] = newState
return newState return newState
} }
@ -625,10 +625,10 @@ func (this *LexerATNSimulator) getDFA(mode int) *DFA {
return this.decisionToDFA[mode] return this.decisionToDFA[mode]
} }
// Get the text matched so far for the current token. // Get the text Matched so far for the current token.
func (this *LexerATNSimulator) getText(input CharStream) string { func (this *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include. // index is first lookahead char, don't include.
return input.getTextFromInterval(NewInterval(this.startIndex, input.index()-1)) return input.GetTextFromInterval(NewInterval(this.startIndex, input.index()-1))
} }
func (this *LexerATNSimulator) consume(input CharStream) { func (this *LexerATNSimulator) consume(input CharStream) {
@ -639,10 +639,10 @@ func (this *LexerATNSimulator) consume(input CharStream) {
} else { } else {
this.column += 1 this.column += 1
} }
input.consume() input.Consume()
} }
func (this *LexerATNSimulator) getTokenName(tt int) string { func (this *LexerATNSimulator) GetTokenName(tt int) string {
if tt == -1 { if tt == -1 {
return "EOF" return "EOF"
} else { } else {

View File

@ -289,7 +289,7 @@ func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
// <p>Custom actions are implemented by calling {@link Lexer//action} with the // <p>Custom actions are implemented by calling {@link Lexer//action} with the
// appropriate rule and action indexes.</p> // appropriate rule and action indexes.</p>
func (this *LexerCustomAction) execute(lexer ILexer) { func (this *LexerCustomAction) execute(lexer ILexer) {
lexer.action(nil, this.ruleIndex, this.actionIndex) lexer.Action(nil, this.ruleIndex, this.actionIndex)
} }
func (this *LexerCustomAction) hashString() string { func (this *LexerCustomAction) hashString() string {

View File

@ -1,7 +1,7 @@
package antlr4 package antlr4
// Represents an executor for a sequence of lexer actions which traversed during // Represents an executor for a sequence of lexer actions which traversed during
// the matching operation of a lexer rule (token). // the Matching operation of a lexer rule (token).
// //
// <p>The executor tracks position information for position-dependent lexer actions // <p>The executor tracks position information for position-dependent lexer actions
// efficiently, ensuring that actions appearing only at the end of the rule do // efficiently, ensuring that actions appearing only at the end of the rule do
@ -40,7 +40,7 @@ func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor {
// {@code lexerAction}. // {@code lexerAction}.
// //
// @param lexerActionExecutor The executor for actions already traversed by // @param lexerActionExecutor The executor for actions already traversed by
// the lexer while matching a token within a particular // the lexer while Matching a token within a particular
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as // {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
// though it were an empty executor. // though it were an empty executor.
// @param lexerAction The lexer action to execute after the actions // @param lexerAction The lexer action to execute after the actions
@ -67,10 +67,10 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
// {@link IntStream//seek} on the input {@link CharStream} to set the input // {@link IntStream//seek} on the input {@link CharStream} to set the input
// position to the <em>end</em> of the current token. This behavior provides // position to the <em>end</em> of the current token. This behavior provides
// for efficient DFA representation of lexer actions which appear at the end // for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule matches a variable number of // of a lexer rule, even when the lexer rule Matches a variable number of
// characters.</p> // characters.</p>
// //
// <p>Prior to traversing a match transition in the ATN, the current offset // <p>Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer // from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing // actions which have not already been assigned a fixed offset. By storing
// the offsets relative to the token start index, the DFA representation of // the offsets relative to the token start index, the DFA representation of

View File

@ -32,20 +32,23 @@ func (this *TraceListener) exitEveryRule(ctx IParserRuleContext) {
type IParser interface { type IParser interface {
IRecognizer IRecognizer
getInterpreter() *ParserATNSimulator GetInterpreter() *ParserATNSimulator
GetErrorHandler() IErrorStrategy
GetTokenStream() TokenStream
GetTokenFactory() TokenFactory
GetParserRuleContext() IParserRuleContext
getInputStream() CharStream getInputStream() CharStream
consume() *Token Consume() *Token
getCurrentToken() *Token getCurrentToken() *Token
getTokenStream() TokenStream
getTokenFactory() TokenFactory
getLiteralNames() []string getLiteralNames() []string
getSymbolicNames() []string getSymbolicNames() []string
getExpectedTokens() *IntervalSet getExpectedTokens() *IntervalSet
getParserRuleContext() IParserRuleContext
notifyErrorListeners(msg string, offendingToken *Token, err IRecognitionException) notifyErrorListeners(msg string, offendingToken *Token, err IRecognitionException)
isExpectedToken(symbol int) bool isExpectedToken(symbol int) bool
getPrecedence() int getPrecedence() int
getRuleInvocationStack(IParserRuleContext) []string getRuleInvocationStack(IParserRuleContext) []string
} }
type Parser struct { type Parser struct {
@ -133,30 +136,34 @@ func (p *Parser) reset() {
} }
} }
func (p *Parser) GetErrorHandler() IErrorStrategy {
return p._errHandler
}
// Match current input symbol against {@code ttype}. If the symbol type // Match current input symbol against {@code ttype}. If the symbol type
// matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are // Matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are
// called to complete the match process. // called to complete the Match process.
// //
// <p>If the symbol type does not match, // <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error // {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is // strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by // {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to // {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p> // the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
// //
// @param ttype the token type to match // @param ttype the token type to Match
// @return the matched symbol // @return the Matched symbol
// @panics RecognitionException if the current input symbol did not match // @panics RecognitionException if the current input symbol did not Match
// {@code ttype} and the error strategy could not recover from the // {@code ttype} and the error strategy could not recover from the
// mismatched symbol // misMatched symbol
func (p *Parser) match(ttype int) *Token { func (p *Parser) Match(ttype int) *Token {
var t = p.getCurrentToken() var t = p.getCurrentToken()
if t.tokenType == ttype { if t.tokenType == ttype {
p._errHandler.reportMatch(p) p._errHandler.reportMatch(p)
p.consume() p.Consume()
} else { } else {
t = p._errHandler.recoverInline(p) t = p._errHandler.RecoverInline(p)
if p.buildParseTrees && t.tokenIndex == -1 { if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token // we must have conjured up a Newtoken during single token
// insertion // insertion
@ -167,29 +174,29 @@ func (p *Parser) match(ttype int) *Token {
return t return t
} }
// Match current input symbol as a wildcard. If the symbol type matches // Match current input symbol as a wildcard. If the symbol type Matches
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch} // (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch}
// and {@link //consume} are called to complete the match process. // and {@link //consume} are called to complete the Match process.
// //
// <p>If the symbol type does not match, // <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error // {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is // strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by // {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to // {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p> // the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
// //
// @return the matched symbol // @return the Matched symbol
// @panics RecognitionException if the current input symbol did not match // @panics RecognitionException if the current input symbol did not Match
// a wildcard and the error strategy could not recover from the mismatched // a wildcard and the error strategy could not recover from the misMatched
// symbol // symbol
func (p *Parser) matchWildcard() *Token { func (p *Parser) MatchWildcard() *Token {
var t = p.getCurrentToken() var t = p.getCurrentToken()
if t.tokenType > 0 { if t.tokenType > 0 {
p._errHandler.reportMatch(p) p._errHandler.reportMatch(p)
p.consume() p.Consume()
} else { } else {
t = p._errHandler.recoverInline(p) t = p._errHandler.RecoverInline(p)
if p.buildParseTrees && t.tokenIndex == -1 { if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token // we must have conjured up a Newtoken during single token
// insertion // insertion
@ -200,7 +207,7 @@ func (p *Parser) matchWildcard() *Token {
return t return t
} }
func (p *Parser) getParserRuleContext() IParserRuleContext { func (p *Parser) GetParserRuleContext() IParserRuleContext {
return p._ctx return p._ctx
} }
@ -280,7 +287,7 @@ func (p *Parser) triggerEnterRuleEvent() {
var ctx = p._ctx var ctx = p._ctx
for _, listener := range p._parseListeners { for _, listener := range p._parseListeners {
listener.enterEveryRule(ctx) listener.enterEveryRule(ctx)
ctx.enterRule(listener) ctx.EnterRule(listener)
} }
} }
} }
@ -312,7 +319,7 @@ func (this *Parser) getSymbolicNames() []string {
return this.symbolicNames return this.symbolicNames
} }
func (this *Parser) getInterpreter() *ParserATNSimulator { func (this *Parser) GetInterpreter() *ParserATNSimulator {
return this.Interpreter return this.Interpreter
} }
@ -320,13 +327,13 @@ func (this *Parser) getATN() *ATN {
return this.Interpreter.atn return this.Interpreter.atn
} }
func (p *Parser) getTokenFactory() TokenFactory { func (p *Parser) GetTokenFactory() TokenFactory {
return p._input.getTokenSource().getTokenFactory() return p._input.GetTokenSource().GetTokenFactory()
} }
// Tell our token source and error strategy about a Newway to create tokens.// // Tell our token source and error strategy about a Newway to create tokens.//
func (p *Parser) setTokenFactory(factory TokenFactory) { func (p *Parser) setTokenFactory(factory TokenFactory) {
p._input.getTokenSource().setTokenFactory(factory) p._input.GetTokenSource().setTokenFactory(factory)
} }
// The ATN with bypass alternatives is expensive to create so we create it // The ATN with bypass alternatives is expensive to create so we create it
@ -361,7 +368,7 @@ func (p *Parser) getATNWithBypassAlts() {
// ParseTree t = parser.expr() // ParseTree t = parser.expr()
// ParseTreePattern p = parser.compileParseTreePattern("&ltID&gt+0", // ParseTreePattern p = parser.compileParseTreePattern("&ltID&gt+0",
// MyParser.RULE_expr) // MyParser.RULE_expr)
// ParseTreeMatch m = p.match(t) // ParseTreeMatch m = p.Match(t)
// String id = m.get("ID") // String id = m.get("ID")
// </pre> // </pre>
@ -370,8 +377,8 @@ func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer
panic("NewParseTreePatternMatcher not implemented!") panic("NewParseTreePatternMatcher not implemented!")
// //
// if (lexer == nil) { // if (lexer == nil) {
// if (p.getTokenStream() != nil) { // if (p.GetTokenStream() != nil) {
// var tokenSource = p.getTokenStream().getTokenSource() // var tokenSource = p.GetTokenStream().GetTokenSource()
// if _, ok := tokenSource.(ILexer); ok { // if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource // lexer = tokenSource
// } // }
@ -386,14 +393,14 @@ func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer
} }
func (p *Parser) getInputStream() CharStream { func (p *Parser) getInputStream() CharStream {
return p.getTokenStream().(CharStream) return p.GetTokenStream().(CharStream)
} }
func (p *Parser) setInputStream(input TokenStream) { func (p *Parser) setInputStream(input TokenStream) {
p.setTokenStream(input) p.setTokenStream(input)
} }
func (p *Parser) getTokenStream() TokenStream { func (p *Parser) GetTokenStream() TokenStream {
return p._input return p._input
} }
@ -422,10 +429,10 @@ func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRe
listener.syntaxError(p, offendingToken, line, column, msg, err) listener.syntaxError(p, offendingToken, line, column, msg, err)
} }
func (p *Parser) consume() *Token { func (p *Parser) Consume() *Token {
var o = p.getCurrentToken() var o = p.getCurrentToken()
if o.tokenType != TokenEOF { if o.tokenType != TokenEOF {
p.getInputStream().consume() p.getInputStream().Consume()
} }
var hasListener = p._parseListeners != nil && len(p._parseListeners) > 0 var hasListener = p._parseListeners != nil && len(p._parseListeners) > 0
if p.buildParseTrees || hasListener { if p.buildParseTrees || hasListener {
@ -453,12 +460,12 @@ func (p *Parser) consume() *Token {
func (p *Parser) addContextToParseTree() { func (p *Parser) addContextToParseTree() {
// add current context to parent if we have a parent // add current context to parent if we have a parent
if p._ctx.getParent() != nil { if p._ctx.GetParent() != nil {
p._ctx.getParent().setChildren(append(p._ctx.getParent().getChildren(), p._ctx)) p._ctx.GetParent().setChildren(append(p._ctx.GetParent().getChildren(), p._ctx))
} }
} }
func (p *Parser) enterRule(localctx IParserRuleContext, state, ruleIndex int) { func (p *Parser) EnterRule(localctx IParserRuleContext, state, ruleIndex int) {
p.state = state p.state = state
p._ctx = localctx p._ctx = localctx
p._ctx.setStart(p._input.LT(1)) p._ctx.setStart(p._input.LT(1))
@ -477,16 +484,16 @@ func (p *Parser) exitRule() {
p.triggerExitRuleEvent() p.triggerExitRuleEvent()
} }
p.state = p._ctx.getInvokingState() p.state = p._ctx.getInvokingState()
p._ctx = p._ctx.getParent().(IParserRuleContext) p._ctx = p._ctx.GetParent().(IParserRuleContext)
} }
func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) { func (p *Parser) EnterOuterAlt(localctx IParserRuleContext, altNum int) {
// if we have Newlocalctx, make sure we replace existing ctx // if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree // that is previous child of parse tree
if p.buildParseTrees && p._ctx != localctx { if p.buildParseTrees && p._ctx != localctx {
if p._ctx.getParent() != nil { if p._ctx.GetParent() != nil {
p._ctx.getParent().(IParserRuleContext).removeLastChild() p._ctx.GetParent().(IParserRuleContext).removeLastChild()
p._ctx.getParent().(IParserRuleContext).addChild(localctx) p._ctx.GetParent().(IParserRuleContext).addChild(localctx)
} }
} }
p._ctx = localctx p._ctx = localctx
@ -505,7 +512,7 @@ func (p *Parser) getPrecedence() int {
} }
} }
func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleIndex, precedence int) { func (p *Parser) EnterRecursionRule(localctx IParserRuleContext, state, ruleIndex, precedence int) {
p.state = state p.state = state
p._precedenceStack.Push(precedence) p._precedenceStack.Push(precedence)
p._ctx = localctx p._ctx = localctx
@ -517,7 +524,7 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde
} }
// //
// Like {@link //enterRule} but for recursive rules. // Like {@link //EnterRule} but for recursive rules.
func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, ruleIndex int) { func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, ruleIndex int) {
var previous = p._ctx var previous = p._ctx
@ -536,7 +543,7 @@ func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, rul
} }
} }
func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) { func (p *Parser) UnrollRecursionContexts(parentCtx IParserRuleContext) {
p._precedenceStack.Pop() p._precedenceStack.Pop()
p._ctx.setStop(p._input.LT(-1)) p._ctx.setStop(p._input.LT(-1))
var retCtx = p._ctx // save current ctx (return value) var retCtx = p._ctx // save current ctx (return value)
@ -544,7 +551,7 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
if p._parseListeners != nil { if p._parseListeners != nil {
for p._ctx != parentCtx { for p._ctx != parentCtx {
p.triggerExitRuleEvent() p.triggerExitRuleEvent()
p._ctx = p._ctx.getParent().(IParserRuleContext) p._ctx = p._ctx.GetParent().(IParserRuleContext)
} }
} else { } else {
p._ctx = parentCtx p._ctx = parentCtx
@ -563,12 +570,12 @@ func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext {
if ctx.getRuleIndex() == ruleIndex { if ctx.getRuleIndex() == ruleIndex {
return ctx return ctx
} }
ctx = ctx.getParent().(IParserRuleContext) ctx = ctx.GetParent().(IParserRuleContext)
} }
return nil return nil
} }
func (p *Parser) precpred(localctx IRuleContext, precedence int) bool { func (p *Parser) Precpred(localctx IRuleContext, precedence int) bool {
return precedence >= p._precedenceStack[len(p._precedenceStack)-1] return precedence >= p._precedenceStack[len(p._precedenceStack)-1]
} }
@ -609,7 +616,7 @@ func (p *Parser) isExpectedToken(symbol int) bool {
if following.contains(symbol) { if following.contains(symbol) {
return true return true
} }
ctx = ctx.getParent().(IParserRuleContext) ctx = ctx.GetParent().(IParserRuleContext)
} }
if following.contains(TokenEpsilon) && symbol == TokenEOF { if following.contains(TokenEpsilon) && symbol == TokenEOF {
return true return true
@ -619,7 +626,7 @@ func (p *Parser) isExpectedToken(symbol int) bool {
} }
// Computes the set of input symbols which could follow the current parser // Computes the set of input symbols which could follow the current parser
// state and context, as given by {@link //getState} and {@link //getContext}, // state and context, as given by {@link //GetState} and {@link //getContext},
// respectively. // respectively.
// //
// @see ATN//getExpectedTokens(int, RuleContext) // @see ATN//getExpectedTokens(int, RuleContext)
@ -664,7 +671,7 @@ func (this *Parser) getRuleInvocationStack(p IParserRuleContext) []string {
} else { } else {
stack = append(stack, this.getRuleNames()[ruleIndex]) stack = append(stack, this.getRuleNames()[ruleIndex])
} }
p = p.getParent().(IParserRuleContext) p = p.GetParent().(IParserRuleContext)
} }
return stack return stack
} }
@ -705,7 +712,7 @@ func (p *Parser) getSourceName() string {
} }
// During a parse is sometimes useful to listen in on the rule entry and exit // During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token matches. p.is for quick and dirty debugging. // events as well as token Matches. p.is for quick and dirty debugging.
// //
func (p *Parser) setTrace(trace *TraceListener) { func (p *Parser) setTrace(trace *TraceListener) {
if trace == nil { if trace == nil {

View File

@ -43,7 +43,7 @@ func (this *ParserATNSimulator) InitParserATNSimulator(parser IParser, atn *ATN,
this._dfa = nil this._dfa = nil
// Each prediction operation uses a cache for merge of prediction contexts. // Each prediction operation uses a cache for merge of prediction contexts.
// Don't keep around as it wastes huge amounts of memory. DoubleKeyMap // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
// isn't synchronized but we're ok since two threads shouldn't reuse same // isn't Synchronized but we're ok since two threads shouldn't reuse same
// parser/atnsim object because it can only handle one input at a time. // parser/atnsim object because it can only handle one input at a time.
// This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
// the merge if we ever see a and b again. Note that (b,a)&rarrc should // the merge if we ever see a and b again. Note that (b,a)&rarrc should
@ -61,10 +61,10 @@ var ParserATNSimulatorprototyperetry_debug = false
func (this *ParserATNSimulator) reset() { func (this *ParserATNSimulator) reset() {
} }
func (this *ParserATNSimulator) adaptivePredict(input TokenStream, decision int, outerContext IParserRuleContext) int { func (this *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext IParserRuleContext) int {
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions { if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions {
fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + this.getLookaheadName(input) + " exec LA(1)==" + this.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).line) + ":" + " line " + strconv.Itoa(input.LT(1).line) + ":" +
strconv.Itoa(input.LT(1).column)) strconv.Itoa(input.LT(1).column))
@ -190,9 +190,9 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
} }
var t = input.LA(1) var t = input.LA(1)
for true { // for more work for true { // for more work
var D = this.getExistingTargetState(previousD, t) var D = this.getExistingTarGetState(previousD, t)
if D == nil { if D == nil {
D = this.computeTargetState(dfa, previousD, t) D = this.computeTarGetState(dfa, previousD, t)
} }
if D == ATNSimulatorERROR { if D == ATNSimulatorERROR {
// if any configs in previous dipped into outer context, that // if any configs in previous dipped into outer context, that
@ -266,7 +266,7 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
previousD = D previousD = D
if t != TokenEOF { if t != TokenEOF {
input.consume() input.Consume()
t = input.LA(1) t = input.LA(1)
} }
} }
@ -284,7 +284,7 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
// {@code t}, or {@code nil} if the target state for this edge is not // {@code t}, or {@code nil} if the target state for this edge is not
// already cached // already cached
func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { func (this *ParserATNSimulator) getExistingTarGetState(previousD *DFAState, t int) *DFAState {
var edges = previousD.edges var edges = previousD.edges
if edges == nil { if edges == nil {
return nil return nil
@ -304,7 +304,7 @@ func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t in
// {@code t}. If {@code t} does not lead to a valid DFA state, this method // {@code t}. If {@code t} does not lead to a valid DFA state, this method
// returns {@link //ERROR}. // returns {@link //ERROR}.
func (this *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { func (this *ParserATNSimulator) computeTarGetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
var reach = this.computeReachSet(previousD.configs, t, false) var reach = this.computeReachSet(previousD.configs, t, false)
if reach == nil { if reach == nil {
@ -435,7 +435,7 @@ func (this *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0
} }
previous = reach previous = reach
if t != TokenEOF { if t != TokenEOF {
input.consume() input.Consume()
t = input.LA(1) t = input.LA(1)
} }
} }
@ -494,8 +494,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// advantage of having a smaller intermediate set when calling closure. // advantage of having a smaller intermediate set when calling closure.
// //
// For full-context reach operations, separate handling is required to // For full-context reach operations, separate handling is required to
// ensure that the alternative matching the longest overall sequence is // ensure that the alternative Matching the longest overall sequence is
// chosen when multiple such configurations can match the input. // chosen when multiple such configurations can Match the input.
var skippedStopStates []*ATNConfig = nil var skippedStopStates []*ATNConfig = nil
@ -504,10 +504,10 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
var c = closure.configs[i] var c = closure.configs[i]
if ParserATNSimulatorprototypedebug { if ParserATNSimulatorprototypedebug {
fmt.Println("testing " + this.getTokenName(t) + " at " + c.toString()) fmt.Println("testing " + this.GetTokenName(t) + " at " + c.toString())
} }
_, ok := c.getState().(*RuleStopState) _, ok := c.GetState().(*RuleStopState)
if ok { if ok {
if fullCtx || t == TokenEOF { if fullCtx || t == TokenEOF {
@ -522,8 +522,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
continue continue
} }
for j := 0; j < len(c.getState().getTransitions()); j++ { for j := 0; j < len(c.GetState().getTransitions()); j++ {
var trans = c.getState().getTransitions()[j] var trans = c.GetState().getTransitions()[j]
var target = this.getReachableTarget(trans, t) var target = this.getReachableTarget(trans, t)
if target != nil { if target != nil {
var cfg = NewATNConfig4(c, target) var cfg = NewATNConfig4(c, target)
@ -539,7 +539,7 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// This block optimizes the reach operation for intermediate sets which // This block optimizes the reach operation for intermediate sets which
// trivially indicate a termination state for the overall // trivially indicate a termination state for the overall
// adaptivePredict operation. // AdaptivePredict operation.
// //
// The conditions assume that intermediate // The conditions assume that intermediate
// contains all configurations relevant to the reach set, but this // contains all configurations relevant to the reach set, but this
@ -594,8 +594,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// configuration. For full-context reach operations, these // configuration. For full-context reach operations, these
// configurations reached the end of the start rule, in which case we // configurations reached the end of the start rule, in which case we
// only add them back to reach if no configuration during the current // only add them back to reach if no configuration during the current
// closure operation reached such a state. This ensures adaptivePredict // closure operation reached such a state. This ensures AdaptivePredict
// chooses an alternative matching the longest overall sequence when // chooses an alternative Matching the longest overall sequence when
// multiple alternatives are viable. // multiple alternatives are viable.
// //
if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
@ -638,16 +638,16 @@ func (this *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNC
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var config = configs.configs[i] var config = configs.configs[i]
_, ok := config.getState().(*RuleStopState) _, ok := config.GetState().(*RuleStopState)
if ok { if ok {
result.add(config, this.mergeCache) result.add(config, this.mergeCache)
continue continue
} }
if lookToEndOfRule && config.getState().getEpsilonOnlyTransitions() { if lookToEndOfRule && config.GetState().getEpsilonOnlyTransitions() {
var nextTokens = this.atn.nextTokens(config.getState(), nil) var nextTokens = this.atn.nextTokens(config.GetState(), nil)
if nextTokens.contains(TokenEpsilon) { if nextTokens.contains(TokenEpsilon) {
var endOfRuleState = this.atn.ruleToStopState[config.getState().getRuleIndex()] var endOfRuleState = this.atn.ruleToStopState[config.GetState().getRuleIndex()]
result.add(NewATNConfig4(config, endOfRuleState), this.mergeCache) result.add(NewATNConfig4(config, endOfRuleState), this.mergeCache)
} }
} }
@ -740,7 +740,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
// the configuration was eliminated // the configuration was eliminated
continue continue
} }
statesFromAlt1[config.getState().getStateNumber()] = config.getContext() statesFromAlt1[config.GetState().GetStateNumber()] = config.getContext()
if updatedContext != config.getSemanticContext() { if updatedContext != config.getSemanticContext() {
configSet.add(NewATNConfig2(config, updatedContext), this.mergeCache) configSet.add(NewATNConfig2(config, updatedContext), this.mergeCache)
} else { } else {
@ -757,7 +757,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
// filter the prediction context for alternatives predicting alt>1 // filter the prediction context for alternatives predicting alt>1
// (basically a graph subtraction algorithm). // (basically a graph subtraction algorithm).
if !config.getPrecedenceFilterSuppressed() { if !config.getPrecedenceFilterSuppressed() {
var context = statesFromAlt1[config.getState().getStateNumber()] var context = statesFromAlt1[config.GetState().GetStateNumber()]
if context != nil && context.equals(config.getContext()) { if context != nil && context.equals(config.getContext()) {
// eliminated // eliminated
continue continue
@ -769,7 +769,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
} }
func (this *ParserATNSimulator) getReachableTarget(trans ITransition, ttype int) IATNState { func (this *ParserATNSimulator) getReachableTarget(trans ITransition, ttype int) IATNState {
if trans.matches(ttype, 0, this.atn.maxTokenType) { if trans.Matches(ttype, 0, this.atn.maxTokenType) {
return trans.getTarget() return trans.getTarget()
} else { } else {
return nil return nil
@ -852,7 +852,7 @@ func (this *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altTo
// the parser. Specifically, this could occur if the <em>only</em> configuration // the parser. Specifically, this could occur if the <em>only</em> configuration
// capable of successfully parsing to the end of the decision rule is // capable of successfully parsing to the end of the decision rule is
// blocked by a semantic predicate. By choosing this alternative within // blocked by a semantic predicate. By choosing this alternative within
// {@link //adaptivePredict} instead of panicing a // {@link //AdaptivePredict} instead of panicing a
// {@link NoViableAltException}, the resulting // {@link NoViableAltException}, the resulting
// {@link FailedPredicateException} in the parser will identify the specific // {@link FailedPredicateException} in the parser will identify the specific
// predicate which is preventing the parser from successfully parsing the // predicate which is preventing the parser from successfully parsing the
@ -865,9 +865,9 @@ func (this *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altTo
// @param outerContext The is the \gamma_0 initial parser context from the paper // @param outerContext The is the \gamma_0 initial parser context from the paper
// or the parser stack at the instant before prediction commences. // or the parser stack at the instant before prediction commences.
// //
// @return The value to return from {@link //adaptivePredict}, or // @return The value to return from {@link //AdaptivePredict}, or
// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not // {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
// identified and {@link //adaptivePredict} should report an error instead. // identified and {@link //AdaptivePredict} should report an error instead.
// //
func (this *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext IParserRuleContext) int { func (this *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext IParserRuleContext) int {
var cfgs = this.splitAccordingToSemanticValidity(configs, outerContext) var cfgs = this.splitAccordingToSemanticValidity(configs, outerContext)
@ -892,7 +892,7 @@ func (this *ParserATNSimulator) getAltThatFinishedDecisionEntryRule(configs *ATN
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i] var c = configs.configs[i]
_, ok := c.getState().(*RuleStopState) _, ok := c.GetState().(*RuleStopState)
if c.getReachesIntoOuterContext() > 0 || (ok && c.getContext().hasEmptyPath()) { if c.getReachesIntoOuterContext() > 0 || (ok && c.getContext().hasEmptyPath()) {
alts.addOne(c.getAlt()) alts.addOne(c.getAlt())
@ -995,7 +995,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
} }
} }
_, ok := config.getState().(*RuleStopState) _, ok := config.GetState().(*RuleStopState)
if ok { if ok {
// We hit rule end. If we have context info, use it // We hit rule end. If we have context info, use it
// run thru all possible stack tops in ctx // run thru all possible stack tops in ctx
@ -1003,19 +1003,19 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
for i := 0; i < config.getContext().length(); i++ { for i := 0; i < config.getContext().length(); i++ {
if config.getContext().getReturnState(i) == PredictionContextEMPTY_RETURN_STATE { if config.getContext().getReturnState(i) == PredictionContextEMPTY_RETURN_STATE {
if fullCtx { if fullCtx {
configs.add(NewATNConfig1(config, config.getState(), PredictionContextEMPTY), this.mergeCache) configs.add(NewATNConfig1(config, config.GetState(), PredictionContextEMPTY), this.mergeCache)
continue continue
} else { } else {
// we have no context info, just chase follow links (if greedy) // we have no context info, just chase follow links (if greedy)
if ParserATNSimulatorprototypedebug { if ParserATNSimulatorprototypedebug {
fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) fmt.Println("FALLING off rule " + this.getRuleName(config.GetState().getRuleIndex()))
} }
this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
} }
continue continue
} }
returnState := this.atn.states[config.getContext().getReturnState(i)] returnState := this.atn.states[config.getContext().getReturnState(i)]
newContext := config.getContext().getParent(i) // "pop" return state newContext := config.getContext().GetParent(i) // "pop" return state
c := NewATNConfig5(returnState, config.getAlt(), newContext, config.getSemanticContext()) c := NewATNConfig5(returnState, config.getAlt(), newContext, config.getSemanticContext())
// While we have context to pop back from, we may have // While we have context to pop back from, we may have
@ -1032,7 +1032,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
} else { } else {
// else if we have no context info, just chase follow links (if greedy) // else if we have no context info, just chase follow links (if greedy)
if ParserATNSimulatorprototypedebug { if ParserATNSimulatorprototypedebug {
fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex())) fmt.Println("FALLING off rule " + this.getRuleName(config.GetState().getRuleIndex()))
} }
} }
} }
@ -1041,7 +1041,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
// Do the actual work of walking epsilon edges// // Do the actual work of walking epsilon edges//
func (this *ParserATNSimulator) closure_(config IATNConfig, configs *ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEofAsEpsilon bool) { func (this *ParserATNSimulator) closure_(config IATNConfig, configs *ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEofAsEpsilon bool) {
var p = config.getState() var p = config.GetState()
// optimization // optimization
if !p.getEpsilonOnlyTransitions() { if !p.getEpsilonOnlyTransitions() {
configs.add(config, this.mergeCache) configs.add(config, this.mergeCache)
@ -1120,7 +1120,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF // EOF transitions act like epsilon transitions after the first EOF
// transition is traversed // transition is traversed
if treatEofAsEpsilon { if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) { if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget()) return NewATNConfig4(config, t.getTarget())
} }
} }
@ -1129,7 +1129,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF // EOF transitions act like epsilon transitions after the first EOF
// transition is traversed // transition is traversed
if treatEofAsEpsilon { if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) { if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget()) return NewATNConfig4(config, t.getTarget())
} }
} }
@ -1138,7 +1138,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF // EOF transitions act like epsilon transitions after the first EOF
// transition is traversed // transition is traversed
if treatEofAsEpsilon { if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) { if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget()) return NewATNConfig4(config, t.getTarget())
} }
} }
@ -1233,7 +1233,7 @@ func (this *ParserATNSimulator) ruleTransition(config IATNConfig, t *RuleTransit
fmt.Println("CALL rule " + this.getRuleName(t.getTarget().getRuleIndex()) + ", ctx=" + config.getContext().toString()) fmt.Println("CALL rule " + this.getRuleName(t.getTarget().getRuleIndex()) + ", ctx=" + config.getContext().toString())
} }
var returnState = t.followState var returnState = t.followState
var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.getStateNumber()) var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.GetStateNumber())
return NewATNConfig1(config, t.getTarget(), newContext) return NewATNConfig1(config, t.getTarget(), newContext)
} }
@ -1266,7 +1266,7 @@ func (this *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSe
// //
// a : A | A | A B // a : A | A | A B
// //
// After matching input A, we reach the stop state for rule A, state 1. // After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2 // State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two. // conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not // However, alternative 3 will be able to continue and so we do not
@ -1289,14 +1289,14 @@ func (this *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfig
return conflictingAlts return conflictingAlts
} }
func (this *ParserATNSimulator) getTokenName(t int) string { func (this *ParserATNSimulator) GetTokenName(t int) string {
if t == TokenEOF { if t == TokenEOF {
return "EOF" return "EOF"
} }
if this.parser != nil && this.parser.getLiteralNames() != nil { if this.parser != nil && this.parser.getLiteralNames() != nil {
if t >= len(this.parser.getLiteralNames()) { if t >= len(this.parser.getLiteralNames()) {
fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(this.parser.getLiteralNames(), ",")) fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(this.parser.getLiteralNames(), ","))
// fmt.Println(this.parser.getInputStream().getTokens()) // fmt.Println(this.parser.getInputStream().GetTokens())
} else { } else {
return this.parser.getLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" return this.parser.getLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
} }
@ -1305,10 +1305,10 @@ func (this *ParserATNSimulator) getTokenName(t int) string {
} }
func (this *ParserATNSimulator) getLookaheadName(input TokenStream) string { func (this *ParserATNSimulator) getLookaheadName(input TokenStream) string {
return this.getTokenName(input.LA(1)) return this.GetTokenName(input.LA(1))
} }
// Used for debugging in adaptivePredict around execATN but I cut // Used for debugging in AdaptivePredict around execATN but I cut
// it out for clarity now that alg. works well. We can leave this // it out for clarity now that alg. works well. We can leave this
// "dead" code for a bit. // "dead" code for a bit.
// //
@ -1326,7 +1326,7 @@ func (this *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
// if (len(c.state.getTransitions())>0) { // if (len(c.state.getTransitions())>0) {
// var t = c.state.getTransitions()[0] // var t = c.state.getTransitions()[0]
// if t2, ok := t.(*AtomTransition); ok { // if t2, ok := t.(*AtomTransition); ok {
// trans = "Atom "+ this.getTokenName(t2.label) // trans = "Atom "+ this.GetTokenName(t2.label)
// } else if t3, ok := t.(SetTransition); ok { // } else if t3, ok := t.(SetTransition); ok {
// _, ok := t.(*NotSetTransition) // _, ok := t.(*NotSetTransition)
// //
@ -1381,7 +1381,7 @@ func (this *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
// //
func (this *ParserATNSimulator) addDFAEdge(dfa *DFA, from_ *DFAState, t int, to *DFAState) *DFAState { func (this *ParserATNSimulator) addDFAEdge(dfa *DFA, from_ *DFAState, t int, to *DFAState) *DFAState {
if ParserATNSimulatorprototypedebug { if ParserATNSimulatorprototypedebug {
fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + this.getTokenName(t)) fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + this.GetTokenName(t))
} }
if to == nil { if to == nil {
return nil return nil
@ -1426,16 +1426,16 @@ func (this *ParserATNSimulator) addDFAState(dfa *DFA, D *DFAState) *DFAState {
return D return D
} }
var hash = D.hashString() var hash = D.hashString()
var existing, ok = dfa.getStates()[hash] var existing, ok = dfa.GetStates()[hash]
if ok { if ok {
return existing return existing
} }
D.stateNumber = len(dfa.getStates()) D.stateNumber = len(dfa.GetStates())
if !D.configs.readOnly { if !D.configs.readOnly {
D.configs.optimizeConfigs(this.ATNSimulator) D.configs.optimizeConfigs(this.ATNSimulator)
D.configs.setReadonly(true) D.configs.setReadonly(true)
} }
dfa.getStates()[hash] = D dfa.GetStates()[hash] = D
if ParserATNSimulatorprototypedebug { if ParserATNSimulatorprototypedebug {
fmt.Println("adding NewDFA state: " + D.toString()) fmt.Println("adding NewDFA state: " + D.toString())
} }
@ -1446,7 +1446,7 @@ func (this *ParserATNSimulator) reportAttemptingFullContext(dfa *DFA, conflictin
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1) var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + fmt.Println("reportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) ", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
} }
if this.parser != nil { if this.parser != nil {
this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
@ -1457,7 +1457,7 @@ func (this *ParserATNSimulator) reportContextSensitivity(dfa *DFA, prediction in
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1) var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() + fmt.Println("reportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) ", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
} }
if this.parser != nil { if this.parser != nil {
this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs) this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs)
@ -1470,7 +1470,7 @@ func (this *ParserATNSimulator) reportAmbiguity(dfa *DFA, D *DFAState, startInde
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug { if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1) var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportAmbiguity " + ambigAlts.toString() + ":" + configs.toString() + fmt.Println("reportAmbiguity " + ambigAlts.toString() + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval)) ", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
} }
if this.parser != nil { if this.parser != nil {
this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)

View File

@ -7,10 +7,10 @@ import (
type IParserRuleContext interface { type IParserRuleContext interface {
IRuleContext IRuleContext
setException(IRecognitionException) SetException(IRecognitionException)
addTokenNode(token *Token) *TerminalNodeImpl addTokenNode(token *Token) *TerminalNodeImpl
addErrorNode(badToken *Token) *ErrorNodeImpl addErrorNode(badToken *Token) *ErrorNodeImpl
enterRule(listener ParseTreeListener) EnterRule(listener ParseTreeListener)
exitRule(listener ParseTreeListener) exitRule(listener ParseTreeListener)
setStart(*Token) setStart(*Token)
@ -61,11 +61,11 @@ func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, i
} }
func (prc *ParserRuleContext) setException(e IRecognitionException) { func (prc *ParserRuleContext) SetException(e IRecognitionException) {
prc.exception = e prc.exception = e
} }
func (prc *ParserRuleContext) getParent() Tree { func (prc *ParserRuleContext) GetParent() Tree {
return prc.parentCtx return prc.parentCtx
} }
@ -90,7 +90,7 @@ func (prc *ParserRuleContext) copyFrom(ctx *ParserRuleContext) {
} }
// Double dispatch methods for listeners // Double dispatch methods for listeners
func (prc *ParserRuleContext) enterRule(listener ParseTreeListener) { func (prc *ParserRuleContext) EnterRule(listener ParseTreeListener) {
} }
func (prc *ParserRuleContext) exitRule(listener ParseTreeListener) { func (prc *ParserRuleContext) exitRule(listener ParseTreeListener) {
@ -113,7 +113,7 @@ func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext {
return child return child
} }
// * Used by enterOuterAlt to toss out a RuleContext previously added as // * Used by EnterOuterAlt to toss out a RuleContext previously added as
// we entered a rule. If we have // label, we will need to remove // we entered a rule. If we have // label, we will need to remove
// generic ruleContext object. // generic ruleContext object.
// / // /
@ -181,7 +181,7 @@ func (prc *ParserRuleContext) getStop() *Token {
return prc.stop return prc.stop
} }
func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode { func (prc *ParserRuleContext) GetToken(ttype int, i int) TerminalNode {
for j := 0; j < len(prc.children); j++ { for j := 0; j < len(prc.children); j++ {
var child = prc.children[j] var child = prc.children[j]
@ -198,7 +198,7 @@ func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode {
return nil return nil
} }
func (prc *ParserRuleContext) getTokens(ttype int) []TerminalNode { func (prc *ParserRuleContext) GetTokens(ttype int) []TerminalNode {
if prc.children == nil { if prc.children == nil {
return make([]TerminalNode, 0) return make([]TerminalNode, 0)
} else { } else {
@ -215,13 +215,13 @@ func (prc *ParserRuleContext) getTokens(ttype int) []TerminalNode {
} }
} }
func (prc *ParserRuleContext) getTypedRuleContext(ctxType reflect.Type, i int) *interface{} { func (prc *ParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) *interface{} {
panic("getTypedRuleContexts not implemented") panic("GetTypedRuleContexts not implemented")
// return prc.getChild(i, ctxType) // return prc.getChild(i, ctxType)
} }
func (prc *ParserRuleContext) getTypedRuleContexts(ctxType reflect.Type) []*interface{} { func (prc *ParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []*interface{} {
panic("getTypedRuleContexts not implemented") panic("GetTypedRuleContexts not implemented")
// if (prc.children== nil) { // if (prc.children== nil) {
// return [] // return []
// } else { // } else {

View File

@ -7,7 +7,7 @@ import (
type IPredictionContext interface { type IPredictionContext interface {
hashString() string hashString() string
getParent(int) IPredictionContext GetParent(int) IPredictionContext
getReturnState(int) int getReturnState(int) int
equals(IPredictionContext) bool equals(IPredictionContext) bool
length() int length() int
@ -44,7 +44,7 @@ var PredictionContextglobalNodeCount = 1
var PredictionContextid = PredictionContextglobalNodeCount var PredictionContextid = PredictionContextglobalNodeCount
// Stores the computed hash code of this {@link PredictionContext}. The hash // Stores the computed hash code of this {@link PredictionContext}. The hash
// code is computed in parts to match the following reference algorithm. // code is computed in parts to Match the following reference algorithm.
// //
// <pre> // <pre>
// private int referenceHashCode() { // private int referenceHashCode() {
@ -52,8 +52,8 @@ var PredictionContextid = PredictionContextglobalNodeCount
// //INITIAL_HASH}) // //INITIAL_HASH})
// //
// for (int i = 0 i &lt {@link //size()} i++) { // for (int i = 0 i &lt {@link //size()} i++) {
// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent // hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //GetParent
// getParent}(i)) // GetParent}(i))
// } // }
// //
// for (int i = 0 i &lt {@link //size()} i++) { // for (int i = 0 i &lt {@link //size()} i++) {
@ -92,7 +92,7 @@ func (this *PredictionContext) toString() string {
panic("Not implemented") panic("Not implemented")
} }
func (this *PredictionContext) getParent(index int) IPredictionContext { func (this *PredictionContext) GetParent(index int) IPredictionContext {
panic("Not implemented") panic("Not implemented")
} }
@ -185,7 +185,7 @@ func (this *SingletonPredictionContext) length() int {
return 1 return 1
} }
func (this *SingletonPredictionContext) getParent(index int) IPredictionContext { func (this *SingletonPredictionContext) GetParent(index int) IPredictionContext {
return this.parentCtx return this.parentCtx
} }
@ -257,7 +257,7 @@ func (this *EmptyPredictionContext) isEmpty() bool {
return true return true
} }
func (this *EmptyPredictionContext) getParent(index int) IPredictionContext { func (this *EmptyPredictionContext) GetParent(index int) IPredictionContext {
return nil return nil
} }
@ -310,7 +310,7 @@ func (this *ArrayPredictionContext) length() int {
return len(this.returnStates) return len(this.returnStates)
} }
func (this *ArrayPredictionContext) getParent(index int) IPredictionContext { func (this *ArrayPredictionContext) GetParent(index int) IPredictionContext {
return this.parents[index] return this.parents[index]
} }
@ -364,15 +364,15 @@ func predictionContextFromRuleContext(a *ATN, outerContext IRuleContext) IPredic
} }
// if we are in RuleContext of start rule, s, then PredictionContext // if we are in RuleContext of start rule, s, then PredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty) // is EMPTY. Nobody called us. (if we are empty, return empty)
if outerContext.getParent() == nil || outerContext == RuleContextEMPTY { if outerContext.GetParent() == nil || outerContext == RuleContextEMPTY {
return PredictionContextEMPTY return PredictionContextEMPTY
} }
// If we have a parent, convert it to a PredictionContext graph // If we have a parent, convert it to a PredictionContext graph
var parent = predictionContextFromRuleContext(a, outerContext.getParent().(IRuleContext)) var parent = predictionContextFromRuleContext(a, outerContext.GetParent().(IRuleContext))
var state = a.states[outerContext.getInvokingState()] var state = a.states[outerContext.getInvokingState()]
var transition = state.getTransitions()[0] var transition = state.getTransitions()[0]
return SingletonPredictionContextcreate(parent, transition.(*RuleTransition).followState.getStateNumber()) return SingletonPredictionContextcreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
} }
func calculateListsHashString(parents []PredictionContext, returnStates []int) string { func calculateListsHashString(parents []PredictionContext, returnStates []int) string {
@ -413,10 +413,10 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
} }
// convert singleton so both are arrays to normalize // convert singleton so both are arrays to normalize
if _, ok := a.(*SingletonPredictionContext); ok { if _, ok := a.(*SingletonPredictionContext); ok {
a = NewArrayPredictionContext([]IPredictionContext{a.getParent(0)}, []int{a.getReturnState(0)}) a = NewArrayPredictionContext([]IPredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
} }
if _, ok := b.(*SingletonPredictionContext); ok { if _, ok := b.(*SingletonPredictionContext); ok {
b = NewArrayPredictionContext([]IPredictionContext{b.getParent(0)}, []int{b.getReturnState(0)}) b = NewArrayPredictionContext([]IPredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
} }
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
} }
@ -581,11 +581,11 @@ func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictio
return PredictionContextEMPTY // $ + $ = $ return PredictionContextEMPTY // $ + $ = $
} else if a == PredictionContextEMPTY { // $ + x = [$,x] } else if a == PredictionContextEMPTY { // $ + x = [$,x]
var payloads = []int{b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE} var payloads = []int{b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{b.getParent(-1), nil} var parents = []IPredictionContext{b.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads) return NewArrayPredictionContext(parents, payloads)
} else if b == PredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) } else if b == PredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
var payloads = []int{a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE} var payloads = []int{a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{a.getParent(-1), nil} var parents = []IPredictionContext{a.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads) return NewArrayPredictionContext(parents, payloads)
} }
} }
@ -752,12 +752,12 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic
// var changed = false // var changed = false
// var parents = [] // var parents = []
// for i := 0; i < len(parents); i++ { // for i := 0; i < len(parents); i++ {
// var parent = getCachedPredictionContext(context.getParent(i), contextCache, visited) // var parent = getCachedPredictionContext(context.GetParent(i), contextCache, visited)
// if (changed || parent != context.getParent(i)) { // if (changed || parent != context.GetParent(i)) {
// if (!changed) { // if (!changed) {
// parents = [] // parents = []
// for j := 0; j < len(context); j++ { // for j := 0; j < len(context); j++ {
// parents[j] = context.getParent(j) // parents[j] = context.GetParent(j)
// } // }
// changed = true // changed = true
// } // }
@ -799,7 +799,7 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic
// visited[context] = context // visited[context] = context
// nodes.push(context) // nodes.push(context)
// for i := 0; i < len(context); i++ { // for i := 0; i < len(context); i++ {
// getAllContextNodes(context.getParent(i), nodes, visited) // getAllContextNodes(context.GetParent(i), nodes, visited)
// } // }
// return nodes // return nodes
// } // }

View File

@ -122,7 +122,7 @@ const (
// //
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p> // <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
// //
// <p>After matching input A, we reach the stop state for rule A, state 1. // <p>After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2 // State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two. // conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not stop // However, alternative 3 will be able to continue and so we do not stop
@ -168,7 +168,7 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNCon
// Configs in rule stop states indicate reaching the end of the decision // Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all // rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able // configs meet this condition, then none of the configurations is able
// to match additional input so we terminate prediction. // to Match additional input so we terminate prediction.
// //
if PredictionModeallConfigsInRuleStopStates(configs) { if PredictionModeallConfigsInRuleStopStates(configs) {
return true return true
@ -208,7 +208,7 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNCon
func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i] var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); ok { if _, ok := c.GetState().(*RuleStopState); ok {
return true return true
} }
} }
@ -228,7 +228,7 @@ func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i] var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); !ok { if _, ok := c.GetState().(*RuleStopState); !ok {
return false return false
} }
} }
@ -495,7 +495,7 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
for i := 0; i < len(configs.configs); i++ { for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i] var c = configs.configs[i]
var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString() var key = "key_" + strconv.Itoa(c.GetState().GetStateNumber()) + "/" + c.getContext().toString()
var alts = configToAlts[key] var alts = configToAlts[key]
if alts != nil { if alts != nil {
alts = NewBitSet() alts = NewBitSet()
@ -523,14 +523,14 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} // map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
// </pre> // </pre>
// //
func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict { func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
var m = NewAltDict() var m = NewAltDict()
for _, c := range configs.configs { for _, c := range configs.configs {
var alts = m.get(c.getState().toString()) var alts = m.get(c.GetState().toString())
if alts == nil { if alts == nil {
alts = NewBitSet() alts = NewBitSet()
m.put(c.getState().toString(), alts) m.put(c.GetState().toString(), alts)
} }
alts.(*BitSet).add(c.getAlt()) alts.(*BitSet).add(c.getAlt())
} }
@ -538,7 +538,7 @@ func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict {
} }
func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
var values = PredictionModegetStateToAltMap(configs).values() var values = PredictionModeGetStateToAltMap(configs).values()
for i := 0; i < len(values); i++ { for i := 0; i < len(values); i++ {
if values[i].(*BitSet).length() == 1 { if values[i].(*BitSet).length() == 1 {
return true return true

View File

@ -8,14 +8,15 @@ import (
) )
type IRecognizer interface { type IRecognizer interface {
getState() int GetState() int
SetState(int)
getATN() *ATN getATN() *ATN
action(_localctx IRuleContext, ruleIndex, actionIndex int) Action(_localctx IRuleContext, ruleIndex, actionIndex int)
getRuleNames() []string getRuleNames() []string
getErrorListenerDispatch() IErrorListener getErrorListenerDispatch() IErrorListener
sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool Sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool
precpred(localctx IRuleContext, precedence int) bool Precpred(localctx IRuleContext, precedence int) bool
} }
type Recognizer struct { type Recognizer struct {
@ -44,7 +45,7 @@ func (this *Recognizer) checkVersion(toolVersion string) {
} }
} }
func (this *Recognizer) action(context IRuleContext, ruleIndex, actionIndex int) { func (this *Recognizer) Action(context IRuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!") panic("action not implemented on Recognizer!")
} }
@ -60,16 +61,20 @@ func (this *Recognizer) getRuleNames() []string {
return nil return nil
} }
func (this *Recognizer) getTokenNames() []string { func (this *Recognizer) GetTokenNames() []string {
return nil return nil
} }
func (this *Recognizer) getState() int { func (this *Recognizer) GetState() int {
return this.state return this.state
} }
//func (this *Recognizer) getTokenTypeMap() { func (this *Recognizer) SetState(v int) {
// var tokenNames = this.getTokenNames() this.state = v
}
//func (this *Recognizer) GetTokenTypeMap() {
// var tokenNames = this.GetTokenNames()
// if (tokenNames==nil) { // if (tokenNames==nil) {
// panic("The current recognizer does not provide a list of token names.") // panic("The current recognizer does not provide a list of token names.")
// } // }
@ -101,9 +106,9 @@ func (this *Recognizer) getRuleIndexMap() map[string]int {
// return result // return result
} }
func (this *Recognizer) getTokenType(tokenName string) int { func (this *Recognizer) GetTokenType(tokenName string) int {
panic("Method not defined!") panic("Method not defined!")
// var ttype = this.getTokenTypeMap()[tokenName] // var ttype = this.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) { // if (ttype !=nil) {
// return ttype // return ttype
// } else { // } else {
@ -111,10 +116,10 @@ func (this *Recognizer) getTokenType(tokenName string) int {
// } // }
} }
//func (this *Recognizer) getTokenTypeMap() map[string]int { //func (this *Recognizer) GetTokenTypeMap() map[string]int {
// Vocabulary vocabulary = getVocabulary(); // Vocabulary vocabulary = getVocabulary();
// //
// synchronized (tokenTypeMapCache) { // Synchronized (tokenTypeMapCache) {
// Map<String, Integer> result = tokenTypeMapCache.get(vocabulary); // Map<String, Integer> result = tokenTypeMapCache.get(vocabulary);
// if (result == null) { // if (result == null) {
// result = new HashMap<String, Integer>(); // result = new HashMap<String, Integer>();
@ -157,9 +162,9 @@ func (this *Recognizer) getErrorHeader(e IRecognitionException) string {
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific // @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar // implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see // feature when necessary. For example, see
// {@link DefaultErrorStrategy//getTokenErrorDisplay}. // {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
// //
func (this *Recognizer) getTokenErrorDisplay(t *Token) string { func (this *Recognizer) GetTokenErrorDisplay(t *Token) string {
if t == nil { if t == nil {
return "<no token>" return "<no token>"
} }
@ -184,10 +189,10 @@ func (this *Recognizer) getErrorListenerDispatch() IErrorListener {
// subclass needs to override these if there are sempreds or actions // subclass needs to override these if there are sempreds or actions
// that the ATN interp needs to execute // that the ATN interp needs to execute
func (this *Recognizer) sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool { func (this *Recognizer) Sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool {
return true return true
} }
func (this *Recognizer) precpred(localctx IRuleContext, precedence int) bool { func (this *Recognizer) Precpred(localctx IRuleContext, precedence int) bool {
return true return true
} }

View File

@ -97,7 +97,7 @@ func (this *RuleContext) depth() int {
var n = 0 var n = 0
var p Tree = this var p Tree = this
for p != nil { for p != nil {
p = p.getParent() p = p.GetParent()
n += 1 n += 1
} }
return n return n
@ -130,13 +130,13 @@ func (this *RuleContext) getPayload() interface{} {
// added to the parse trees, they will not appear in the output of this // added to the parse trees, they will not appear in the output of this
// method. // method.
// //
func (this *RuleContext) getText() string { func (this *RuleContext) GetText() string {
if this.getChildCount() == 0 { if this.getChildCount() == 0 {
return "" return ""
} else { } else {
var s string var s string
for _, child := range this.children { for _, child := range this.children {
s += child.(IRuleContext).getText() s += child.(IRuleContext).GetText()
} }
return s return s
@ -147,7 +147,7 @@ func (this *RuleContext) getChild(i int) Tree {
return nil return nil
} }
func (this *RuleContext) getParent() Tree { func (this *RuleContext) GetParent() Tree {
return this.parentCtx return this.parentCtx
} }
@ -188,10 +188,10 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string
} }
s += ruleName s += ruleName
} }
if p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty()) { if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(IRuleContext).isEmpty()) {
s += " " s += " "
} }
p = p.getParent().(IRuleContext) p = p.GetParent().(IRuleContext)
} }
s += "]" s += "]"
return s return s

View File

@ -85,7 +85,7 @@ func (this *Predicate) evaluate(parser IRecognizer, outerContext IRuleContext) b
localctx = outerContext localctx = outerContext
} }
return parser.sempred(localctx, this.ruleIndex, this.predIndex) return parser.Sempred(localctx, this.ruleIndex, this.predIndex)
} }
func (this *Predicate) hashString() string { func (this *Predicate) hashString() string {
@ -121,11 +121,11 @@ func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
} }
func (this *PrecedencePredicate) evaluate(parser IRecognizer, outerContext IRuleContext) bool { func (this *PrecedencePredicate) evaluate(parser IRecognizer, outerContext IRuleContext) bool {
return parser.precpred(outerContext, this.precedence) return parser.Precpred(outerContext, this.precedence)
} }
func (this *PrecedencePredicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext { func (this *PrecedencePredicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext {
if parser.precpred(outerContext, this.precedence) { if parser.Precpred(outerContext, this.precedence) {
return SemanticContextNONE return SemanticContextNONE
} else { } else {
return nil return nil

View File

@ -51,7 +51,7 @@ const (
) )
// Explicitly set the text for this token. If {code text} is not // Explicitly set the text for this token. If {code text} is not
// {@code nil}, then {@link //getText} will return this value rather than // {@code nil}, then {@link //GetText} will return this value rather than
// extracting the text from the input. // extracting the text from the input.
// //
// @param text The explicit text of the token, or {@code nil} if the text // @param text The explicit text of the token, or {@code nil} if the text
@ -66,7 +66,7 @@ func (this *Token) setText(s string) {
this._text = s this._text = s
} }
func (this *Token) getTokenSource() TokenSource { func (this *Token) GetTokenSource() TokenSource {
return this.source.tokenSource return this.source.tokenSource
} }
@ -108,8 +108,8 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
// If {@code oldToken} is also a {@link CommonToken} instance, the newly // If {@code oldToken} is also a {@link CommonToken} instance, the newly
// constructed token will share a reference to the {@link //text} field and // constructed token will share a reference to the {@link //text} field and
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will // the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
// be assigned the result of calling {@link //getText}, and {@link //source} // be assigned the result of calling {@link //GetText}, and {@link //source}
// will be constructed from the result of {@link Token//getTokenSource} and // will be constructed from the result of {@link Token//GetTokenSource} and
// {@link Token//getInputStream}.</p> // {@link Token//getInputStream}.</p>
// //
// @param oldToken The token to copy. // @param oldToken The token to copy.
@ -134,7 +134,7 @@ func (this *CommonToken) text() string {
} }
var n = input.size() var n = input.size()
if this.start < n && this.stop < n { if this.start < n && this.stop < n {
return input.getTextFromInterval(NewInterval(this.start, this.stop)) return input.GetTextFromInterval(NewInterval(this.start, this.stop))
} else { } else {
return "<EOF>" return "<EOF>"
} }

View File

@ -9,5 +9,5 @@ type TokenSource interface {
getInputStream() CharStream getInputStream() CharStream
getSourceName() string getSourceName() string
setTokenFactory(factory TokenFactory) setTokenFactory(factory TokenFactory)
getTokenFactory() TokenFactory GetTokenFactory() TokenFactory
} }

View File

@ -6,11 +6,11 @@ type TokenStream interface {
LT(k int) *Token LT(k int) *Token
get(index int) *Token get(index int) *Token
getTokenSource() TokenSource GetTokenSource() TokenSource
setTokenSource(TokenSource) setTokenSource(TokenSource)
getText() string GetText() string
getTextFromInterval(*Interval) string GetTextFromInterval(*Interval) string
getTextFromRuleContext(IRuleContext) string GetTextFromRuleContext(IRuleContext) string
getTextFromTokens(*Token, *Token) string GetTextFromTokens(*Token, *Token) string
} }

View File

@ -20,7 +20,7 @@ type ITransition interface {
getIsEpsilon() bool getIsEpsilon() bool
getLabel() *IntervalSet getLabel() *IntervalSet
getSerializationType() int getSerializationType() int
matches(int, int, int) bool Matches(int, int, int) bool
} }
type Transition struct { type Transition struct {
@ -69,7 +69,7 @@ func (t *Transition) getSerializationType() int {
return t.serializationType return t.serializationType
} }
func (t *Transition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *Transition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented") panic("Not implemented")
} }
@ -149,7 +149,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
return s return s
} }
func (t *AtomTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label_ == symbol return t.label_ == symbol
} }
@ -178,7 +178,7 @@ func NewRuleTransition(ruleStart IATNState, ruleIndex, precedence int, followSta
return t return t
} }
func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false return false
} }
@ -200,7 +200,7 @@ func NewEpsilonTransition(target IATNState, outermostPrecedenceReturn int) *Epsi
return t return t
} }
func (t *EpsilonTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false return false
} }
@ -232,7 +232,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet {
return s return s
} }
func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop return symbol >= t.start && symbol <= t.stop
} }
@ -272,7 +272,7 @@ func NewPredicateTransition(target IATNState, ruleIndex, predIndex int, isCtxDep
return t return t
} }
func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false return false
} }
@ -304,7 +304,7 @@ func NewActionTransition(target IATNState, ruleIndex, actionIndex int, isCtxDepe
return t return t
} }
func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false return false
} }
@ -337,7 +337,7 @@ func (t *SetTransition) InitSetTransition(set *IntervalSet) {
} }
func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label.contains(symbol) return t.label.contains(symbol)
} }
@ -360,7 +360,7 @@ func NewNotSetTransition(target IATNState, set *IntervalSet) *NotSetTransition {
return t return t
} }
func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains(symbol) return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains(symbol)
} }
@ -381,7 +381,7 @@ func NewWildcardTransition(target IATNState) *WildcardTransition {
return t return t
} }
func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
} }
@ -407,7 +407,7 @@ func NewPrecedencePredicateTransition(target IATNState, precedence int) *Precede
return t return t
} }
func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false return false
} }

View File

@ -7,7 +7,7 @@ package antlr4
var TreeINVALID_INTERVAL = NewInterval(-1, -2) var TreeINVALID_INTERVAL = NewInterval(-1, -2)
type Tree interface { type Tree interface {
getParent() Tree GetParent() Tree
setParent(Tree) setParent(Tree)
getPayload() interface{} getPayload() interface{}
getChild(i int) Tree getChild(i int) Tree
@ -28,7 +28,7 @@ type ParseTree interface {
// <T> T accept(ParseTreeVisitor<? extends T> visitor); // <T> T accept(ParseTreeVisitor<? extends T> visitor);
accept(visitor ParseTreeVisitor) interface{} accept(visitor ParseTreeVisitor) interface{}
getText() string GetText() string
// toStringTree([]string, IRecognizer) string // toStringTree([]string, IRecognizer) string
} }
@ -119,7 +119,7 @@ func (this *TerminalNodeImpl) getSymbol() *Token {
return this.symbol return this.symbol
} }
func (this *TerminalNodeImpl) getParent() Tree { func (this *TerminalNodeImpl) GetParent() Tree {
return this.parentCtx return this.parentCtx
} }
@ -147,7 +147,7 @@ func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor) interface{} {
return visitor.visitTerminal(this) return visitor.visitTerminal(this)
} }
func (this *TerminalNodeImpl) getText() string { func (this *TerminalNodeImpl) GetText() string {
return this.symbol.text() return this.symbol.text()
} }
@ -159,8 +159,8 @@ func (this *TerminalNodeImpl) toString() string {
} }
} }
// Represents a token that was consumed during resynchronization // Represents a token that was consumed during reSynchronization
// rather than during a valid match operation. For example, // rather than during a valid Match operation. For example,
// we will create this kind of a node during single token insertion // we will create this kind of a node during single token insertion
// and deletion as well as during "consume until error recovery set" // and deletion as well as during "consume until error recovery set"
// upon no viable alternative exceptions. // upon no viable alternative exceptions.
@ -197,7 +197,7 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) {
} else if term, ok := t.(TerminalNode); ok { } else if term, ok := t.(TerminalNode); ok {
listener.visitTerminal(term) listener.visitTerminal(term)
} else { } else {
this.enterRule(listener, t.(RuleNode)) this.EnterRule(listener, t.(RuleNode))
for i := 0; i < t.getChildCount(); i++ { for i := 0; i < t.getChildCount(); i++ {
var child = t.getChild(i) var child = t.getChild(i)
this.walk(listener, child) this.walk(listener, child)
@ -212,10 +212,10 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) {
// {@link RuleContext}-specific event. First we trigger the generic and then // {@link RuleContext}-specific event. First we trigger the generic and then
// the rule specific. We to them in reverse order upon finishing the node. // the rule specific. We to them in reverse order upon finishing the node.
// //
func (this *ParseTreeWalker) enterRule(listener ParseTreeListener, r RuleNode) { func (this *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
var ctx = r.getRuleContext().(IParserRuleContext) var ctx = r.getRuleContext().(IParserRuleContext)
listener.enterEveryRule(ctx) listener.enterEveryRule(ctx)
ctx.enterRule(listener) ctx.EnterRule(listener)
} }
func (this *ParseTreeWalker) exitRule(listener ParseTreeListener, r RuleNode) { func (this *ParseTreeWalker) exitRule(listener ParseTreeListener, r RuleNode) {

View File

@ -74,11 +74,11 @@ func TreesgetChildren(t Tree) []Tree {
// //
func TreesgetAncestors(t Tree) []Tree { func TreesgetAncestors(t Tree) []Tree {
var ancestors = make([]Tree, 0) var ancestors = make([]Tree, 0)
t = t.getParent() t = t.GetParent()
for t != nil { for t != nil {
f := []Tree{t} f := []Tree{t}
ancestors = append(f, ancestors...) ancestors = append(f, ancestors...)
t = t.getParent() t = t.GetParent()
} }
return ancestors return ancestors
} }

3
runtime/Go/src/scratch/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*.js
*.tokens
*.go

View File

@ -0,0 +1,101 @@
grammar Arithmetic;
options { language = Go; }
equation
: expression relop expression
;
expression
: multiplyingExpression ((PLUS|MINUS) multiplyingExpression)*
;
multiplyingExpression
: powExpression ((TIMES|DIV) powExpression)*
;
powExpression
: atom (POW expression)?
;
atom
: scientific
| variable
| LPAREN expression RPAREN
;
scientific
: number (E number)?
;
relop
: EQ | GT | LT
;
number
: MINUS? DIGIT+ (POINT DIGIT+)?
;
variable
: MINUS? LETTER (LETTER | DIGIT)*;
LPAREN
: '('
;
RPAREN
: ')'
;
PLUS
: '+'
;
MINUS
: '-'
;
TIMES
: '*'
;
DIV
: '/'
;
GT
: '>'
;
LT
: '<'
;
EQ
: '='
;
POINT
: '.'
;
E
: 'e'
| 'E'
;
POW
: '^'
;
LETTER
: ('a'..'z') | ('A'..'Z')
;
DIGIT
: ('0'..'9')
;
WS
: [ \r\n\t]+ -> channel(HIDDEN)
;

View File

@ -0,0 +1,3 @@
lexer grammar T;
options { language = Go; }
ZERO: '0';

View File

@ -1,29 +1,22 @@
/** ANTLR tool checks output templates are compatible with tool code generation.
* For now, a simple string match used on x.y of x.y.z scheme.
* Must match Tool.VERSION during load to templates.
*
* REQUIRED.
*/
fileHeader(grammarFileName, ANTLRVersion) ::= << fileHeader(grammarFileName, ANTLRVersion) ::= <<
// Generated from <grammarFileName; format="java-escape"> by ANTLR <ANTLRVersion> // Generated from <grammarFileName; format="java-escape"> by ANTLR <ANTLRVersion>
>> >>
// args must be <object-model-object>, <fields-resulting-in-STs>
ParserFile(file, parser, namedActions) ::= << ParserFile(file, parser, namedActions) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)> <fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName> package parser // <file.grammarName>
import "antlr4" import (
"antlr4"
"strings"
)
<namedActions.header> <namedActions.header>
<parser> <parser>
>> >>
ListenerFile(file, header) ::= << ListenerFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)> <fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName> package parser // <file.grammarName>
@ -33,7 +26,7 @@ import "antlr4"
// This class defines a complete listener for a parse tree produced by <file.parserName>. // This class defines a complete listener for a parse tree produced by <file.parserName>.
type <file.grammarName>Listener struct { type <file.grammarName>Listener struct {
ParseTreeListener
} }
<file.listenerNames:{lname | <file.listenerNames:{lname |
@ -49,7 +42,6 @@ func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx antlr4.IParse
>> >>
VisitorFile(file, header) ::= << VisitorFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)> <fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName> package parser // <file.grammarName>
@ -73,24 +65,22 @@ func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx IParserRuleCon
>> >>
Parser(parser, funcs, atn, sempredFuncs, superClass) ::= << Parser(parser, funcs, atn, sempredFuncs, superClass) ::= <<
<if(superClass)> <if(superClass)>
var <superClass> = require('./<superClass>').<superClass> // TODO var <superClass> = require('./<superClass>').<superClass> // TODO
<endif> <endif>
<atn> var parserATN = <atn>
var deserializer = antlr4.NewATNDeserializer() var deserializer = antlr4.NewATNDeserializer()
var deserializedAtn = deserializer.Deserialize(serializedATN) var deserializedATN = deserializer.Deserialize( []rune( parserATN ) )
var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> } var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> } var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> } var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <parser.name> struct { type <parser.name> struct {
<superClass; null="*antlr4.Parser"> *<superClass; null="antlr4.Parser">
ruleNames []string ruleNames []string
literalNames []string literalNames []string
@ -100,10 +90,10 @@ type <parser.name> struct {
func New<parser.name>(input antlr4.TokenStream) *<parser.name> { func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
var decisionToDFA = make([]antlr4.DFA,len(deserializedAtn.DecisionToState)) var decisionToDFA = make([]*antlr4.DFA,len(deserializedATN.DecisionToState))
var sharedContextCache = antlr4.NewPredictionContextCache() var sharedContextCache = antlr4.NewPredictionContextCache()
for index, ds := range deserializedAtn.DecisionToState { for index, ds := range deserializedATN.DecisionToState {
decisionToDFA[index] = antlr4.NewDFA(ds, index) decisionToDFA[index] = antlr4.NewDFA(ds, index)
} }
@ -111,11 +101,11 @@ func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
parser.InitParser(input) parser.InitParser(input)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedAtn, decisionToDFA, sharedContextCache) parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedATN, decisionToDFA, sharedContextCache)
parser.ruleNames = ruleNames parser.ruleNames = ruleNames
parser.literalNames = literalNames parser.literalNames = literalNames
parser.symbolicNames = symbolicNames parser.symbolicNames = symbolicNames
<namedActions.members> // TODO <namedActions.members>
parser.grammarFileName = "<parser.grammarFileName; format="java-escape">" parser.grammarFileName = "<parser.grammarFileName; format="java-escape">"
return parser return parser
@ -135,10 +125,10 @@ const (
<funcs; separator="\n"> <funcs; separator="\n">
<if(sempredFuncs)> <if(sempredFuncs)>
func (p *<parser.name>) sempred(localctx, ruleIndex int, predIndex int) { func (p *<parser.name>) Sempred(localctx, ruleIndex int, predIndex int) {
switch ruleIndex { switch ruleIndex {
<parser.sempredFuncs.values:{f | case <f.ruleIndex>: <parser.sempredFuncs.values:{f | case <f.ruleIndex>:
return p.<f.name>_sempred(localctx, predIndex);}; separator="\n"> return p.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default: default:
panic("No predicate with index:" + ruleIndex) panic("No predicate with index:" + ruleIndex)
} }
@ -151,12 +141,12 @@ func (p *<parser.name>) sempred(localctx, ruleIndex int, predIndex int) {
dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= << dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= <<
<if(actionFuncs)> <if(actionFuncs)>
func (l *<lexer.name>) action(localctx, ruleIndex int, actionIndex int) { func (l *<lexer.name>) Action(localctx, ruleIndex int, actionIndex int) {
switch ruleIndex) { switch ruleIndex) {
<recog.actionFuncs.values:{f| <recog.actionFuncs.values:{f|
case <f.ruleIndex>: case <f.ruleIndex>:
p.<f.name>_action(localctx, actionIndex) p.<f.name>_Action(localctx, actionIndex)
break;}; separator="\n"> }; separator="\n">
default: default:
panic("No registered action for:" + ruleIndex) panic("No registered action for:" + ruleIndex)
} }
@ -165,10 +155,10 @@ case <f.ruleIndex>:
<actionFuncs.values; separator="\n"> <actionFuncs.values; separator="\n">
<endif> <endif>
<if(sempredFuncs)> <if(sempredFuncs)>
func (l *<lexer.name>) sempred(localctx, ruleIndex, predIndex) { func (l *<lexer.name>) Sempred(localctx, ruleIndex, predIndex) {
switch ruleIndex) { switch ruleIndex) {
<recog.sempredFuncs.values:{f| case <f.ruleIndex>: <recog.sempredFuncs.values:{f| case <f.ruleIndex>:
return l.<f.name>_sempred(localctx, predIndex);}; separator="\n"> return l.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default: default:
panic("No registered predicate for:" + ruleIndex) panic("No registered predicate for:" + ruleIndex)
} }
@ -184,12 +174,12 @@ func (l *<lexer.name>) sempred(localctx, ruleIndex, predIndex) {
*/ */
RuleActionFunction(r, actions) ::= << RuleActionFunction(r, actions) ::= <<
func (l *<lexer.name>) <r.name>_action(localctx , actionIndex) { func (l *<lexer.name>) <r.name>_Action(localctx , actionIndex) {
switch actionIndex) { switch actionIndex) {
<actions:{index| <actions:{index|
case <index>: case <index>:
<actions.(index)> <actions.(index)>
break;}; separator="\n"> }; separator="\n">
default: default:
panic("No registered action for:" + actionIndex) panic("No registered action for:" + actionIndex)
} }
@ -200,7 +190,7 @@ case <index>:
* overriding implementation impossible to maintain. * overriding implementation impossible to maintain.
*/ */
RuleSempredFunction(r, actions) ::= << RuleSempredFunction(r, actions) ::= <<
func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_sempred(localctx, predIndex int) { func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_Sempred(localctx, predIndex int) {
switch predIndex { switch predIndex {
<actions:{index| case <index>: <actions:{index| case <index>:
return <actions.(index)>;}; separator="\n"> return <actions.(index)>;}; separator="\n">
@ -221,8 +211,8 @@ RuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,namedActions,fina
func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; separator=", ">) { func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; separator=", ">) {
localctx := New<currentRule.ctxType>(p, p._ctx, p.state<currentRule.args:{a | , <a.name>}>) localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), p.GetState()<currentRule.args:{a | , <a.name>}>)
p.enterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>) p.EnterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>)
<namedActions.init> <namedActions.init>
<locals; separator="\n"> <locals; separator="\n">
@ -231,17 +221,17 @@ func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; sep
<if(exceptions)> <if(exceptions)>
<exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause <exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause
<else> <else>
if v, ok = x.(RecognitionException); ok { if v, ok = x.(antlr4.RecognitionException); ok {
localctx.exception = v localctx.SetException( v )
p._errHandler.reportError(p, v) p.GetErrorHandler().ReportError(p, v)
p._errHandler.recover(p, v) p.GetErrorHandler().Recover(p, v)
} else { } else {
panic(re) panic(re)
} }
<endif> <endif>
// TODO if the above panic call is invoked then the below finally clause may not be called // TODO if the above panic call is invoked then the below finally clause may not be called
<finallyAction> <finallyAction>
p.exitRule() p.ExitRule()
} }
} }
@ -263,18 +253,18 @@ LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,
func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) { func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) {
_parentctx := p.getParent() _parentctx := p.GetParent().(IParserRuleContext)
_parentState := p.getState() _parentState := p.GetState()
localctx := New<currentRule.ctxType>(p, p._ctx, _parentState<args:{a | , <a.name>}>) localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), _parentState<args:{a | , <a.name>}>)
_prevctx := localctx _prevctx := localctx
_startState := <currentRule.startState> _startState := <currentRule.startState>
p.enterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p) p.EnterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
<namedActions.init> <namedActions.init>
<locals; separator="\n"> <locals; separator="\n">
defer func(){ defer func(){
<finallyAction> <finallyAction>
p.unrollRecursionContexts(_parentctx) p.UnrollRecursionContexts(_parentctx)
} }
try { try {
@ -282,26 +272,25 @@ func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a |
<postamble; separator="\n"> <postamble; separator="\n">
<namedActions.after> <namedActions.after>
} catch( error) { } catch( error) {
if(error instanceof IRecognitionException) { if v, ok = x.(antlr4.RecognitionException); ok {
localctx.exception = error localctx.SetException(v)
p._errHandler.reportError(p, error) p.GetErrorHandler().ReportError(p, v)
p._errHandler.recover(p, error) p.GetErrorHandler().Recover(p, v)
} else { } else {
panic(error) panic(error)
} }
} finally { } finally {
<finallyAction> <finallyAction>
p.unrollRecursionContexts(_parentctx) p.UnrollRecursionContexts(_parentctx)
} }
return localctx return localctx
} }
>> >>
CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= << CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= <<
<if(currentOuterMostAltCodeBlock.altLabel)>localctx = New<currentOuterMostAltCodeBlock.altLabel; format="cap">Context(p, localctx)<endif> <if(currentOuterMostAltCodeBlock.altLabel)>localctx = New<currentOuterMostAltCodeBlock.altLabel; format="cap">Context(p, localctx)<endif>
p.enterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>) p.EnterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>)
<CodeBlockForAlt(currentAltCodeBlock=currentOuterMostAltCodeBlock, ...)> <CodeBlockForAlt(currentAltCodeBlock=currentOuterMostAltCodeBlock, ...)>
>> >>
@ -313,10 +302,10 @@ CodeBlockForAlt(currentAltCodeBlock, locals, preamble, ops) ::= <<
>> >>
LL1AltBlock(choice, preamble, alts, error) ::= << LL1AltBlock(choice, preamble, alts, error) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
<if(choice.label)><labelref(choice.label)> = p._input.LT(1);<endif> <if(choice.label)><labelref(choice.label)> = p.GetTokenStream().LT(1)<endif>
<preamble; separator="\n"> <preamble; separator="\n">
switch p._input.LA(1) { switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)> <choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt> <alt>
break;}; separator="\n"> break;}; separator="\n">
@ -326,8 +315,8 @@ default:
>> >>
LL1OptionalBlock(choice, alts, error) ::= << LL1OptionalBlock(choice, alts, error) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
switch p._input.LA(1) { switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)> <choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt> <alt>
break;}; separator="\n"> break;}; separator="\n">
@ -337,7 +326,7 @@ default:
>> >>
LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= << LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
<preamble; separator="\n"> <preamble; separator="\n">
if <expr> { if <expr> {
<alts; separator="\n"> <alts; separator="\n">
@ -346,25 +335,25 @@ if <expr> {
>> >>
LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= << LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
<preamble; separator="\n"> <preamble; separator="\n">
for <loopExpr> { for <loopExpr> {
<alts; separator="\n"> <alts; separator="\n">
p.state = <choice.loopBackStateNumber> p.SetState(<choice.loopBackStateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
<iteration> <iteration>
} }
>> >>
LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= << LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.state = <choice.blockStartStateNumber>; <! alt block decision !> p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
<preamble; separator="\n"> <preamble; separator="\n">
for ok := true; ok; ok = <loopExpr> { for ok := true; ok; ok = <loopExpr> {
<alts; separator="\n"> <alts; separator="\n">
p.state = <choice.stateNumber>; <! loopback/exit decision !> p.SetState(<choice.stateNumber>); <! loopback/exit decision !>
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
<iteration> <iteration>
} }
>> >>
@ -372,24 +361,23 @@ for ok := true; ok; ok = <loopExpr> {
// LL(*) stuff // LL(*) stuff
AltBlock(choice, preamble, alts, error) ::= << AltBlock(choice, preamble, alts, error) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif> <if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
<preamble; separator="\n"> <preamble; separator="\n">
la_ := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx) la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
switch la_) { switch la_) {
<alts:{alt | <alts:{alt |
case <i>: case <i>:
<alt> <alt>
// break
}; separator="\n"> }; separator="\n">
} }
>> >>
OptionalBlock(choice, alts, error) ::= << OptionalBlock(choice, alts, error) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
la_ := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx) la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
<alts:{alt | <alts:{alt |
if la_==<i><if(!choice.ast.greedy)>+1<endif> { if la_==<i><if(!choice.ast.greedy)>+1<endif> {
<alt> <alt>
@ -397,42 +385,42 @@ if la_==<i><if(!choice.ast.greedy)>+1<endif> {
} }
>> >>
StarBlock(choice, alts, sync, iteration) ::= << StarBlock(choice, alts, Sync, iteration) ::= <<
p.state = <choice.stateNumber> p.SetState(<choice.stateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
_alt := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx) _alt := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
for _alt!=<choice.exitAlt> && _alt!= ATNINVALID_ALT_NUMBER { for _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
if(_alt==1<if(!choice.ast.greedy)>+1<endif>) { if(_alt==1<if(!choice.ast.greedy)>+1<endif>) {
<iteration> <iteration>
<alts> <! should only be one !> <alts> <! should only be one !>
} }
p.state = <choice.loopBackStateNumber> p.SetState(<choice.loopBackStateNumber>)
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
_alt = p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx) _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
} }
>> >>
PlusBlock(choice, alts, error) ::= << PlusBlock(choice, alts, error) ::= <<
p.state = <choice.blockStartStateNumber>; <! alt block decision !> p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
_alt := 1<if(!choice.ast.greedy)>+1<endif> _alt := 1<if(!choice.ast.greedy)>+1<endif>
for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= ATNINVALID_ALT_NUMBER { for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
switch _alt) { switch _alt) {
<alts:{alt| <alts:{alt|
case <i><if(!choice.ast.greedy)>+1<endif>: case <i><if(!choice.ast.greedy)>+1<endif>:
<alt> <alt>
//break;}; separator="\n"> //}; separator="\n">
default: default:
<error> <error>
} }
p.state = <choice.loopBackStateNumber>; <! loopback/exit decision !> p.SetState(<choice.loopBackStateNumber>) <! loopback/exit decision !>
p._errHandler.sync(p) p.GetErrorHandler().Sync(p)
_alt = p._interp.adaptivePredict(p._input,<choice.decision>, p._ctx) _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>, p.GetParserRuleContext())
} }
>> >>
Sync(s) ::= "sync(<s.expecting.name>)" Sync(s) ::= "Sync(<s.expecting.name>)"
ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))" ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))"
@ -473,13 +461,13 @@ cases(ttypes) ::= <<
>> >>
InvokeRule(r, argExprsChunks) ::= << InvokeRule(r, argExprsChunks) ::= <<
p.state = <r.stateNumber> p.SetState(<r.stateNumber>)
<if(r.labels)><r.labels:{l | <labelref(l)> = }><endif>p.<r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>) <if(r.labels)><r.labels:{l | <labelref(l)> = }><endif>p.<r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>)
>> >>
MatchToken(m) ::= << MatchToken(m) ::= <<
p.state = <m.stateNumber> p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.match(<parser.name><m.name>) <if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.Match(<parser.name><m.name>)
>> >>
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>" MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
@ -487,20 +475,20 @@ MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>" MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>"
CommonSetStuff(m, expr, capture, invert) ::= << CommonSetStuff(m, expr, capture, invert) ::= <<
p.state = <m.stateNumber> p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }>p._input.LT(1);<endif> <if(m.labels)><m.labels:{l | <labelref(l)> = }>p.GetTokenStream().LT(1);<endif>
<capture> <capture>
<if(invert)>if <m.varName>\<=0 || <expr> <else>if !(<expr>)<endif> { <if(invert)>if <m.varName>\<=0 || <expr> <else>if !(<expr>)<endif> {
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p._errHandler.recoverInline(this) <if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.GetErrorHandler().RecoverInline(p)
} }
else { else {
p.consume() p.Consume()
} }
>> >>
Wildcard(w) ::= << Wildcard(w) ::= <<
p.state = <w.stateNumber> p.SetState(<w.stateNumber>)
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>matchWildcard() <if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>MatchWildcard()
>> >>
// ACTION STUFF // ACTION STUFF
@ -510,7 +498,7 @@ Action(a, foo, chunks) ::= "<chunks>"
ArgAction(a, chunks) ::= "<chunks>" ArgAction(a, chunks) ::= "<chunks>"
SemPred(p, chunks, failChunks) ::= << SemPred(p, chunks, failChunks) ::= <<
p.state = <p.stateNumber> p.SetState(<p.stateNumber>)
if !( <chunks>) { if !( <chunks>) {
panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>)) panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
} }
@ -557,20 +545,20 @@ TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : parseInt(<ctx(t)>
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)" RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)"
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)" RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p._input.getText(NewInterval(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop)))" RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p.GetTokenStream().GetTextFromInterval(NewInterval(<ctx(r)>.<r.label>.GetStart(),<ctx(r)>.<r.label>.GetStop())))"
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>" RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
RulePropertyRef_parser(r) ::= "this" RulePropertyRef_parser(r) ::= "this"
ThisRulePropertyRef_start(r) ::= "localctx.start" ThisRulePropertyRef_start(r) ::= "localctx.start"
ThisRulePropertyRef_stop(r) ::= "localctx.stop" ThisRulePropertyRef_stop(r) ::= "localctx.stop"
ThisRulePropertyRef_text(r) ::= "p._input.getText(NewInterval(localctx.start, p._input.LT(-1)))" ThisRulePropertyRef_text(r) ::= "p.GetTokenStream().GetTextFromInterval(NewInterval(localctx.GetStart(), p.GetTokenStream().LT(-1)))"
ThisRulePropertyRef_ctx(r) ::= "localctx" ThisRulePropertyRef_ctx(r) ::= "localctx"
ThisRulePropertyRef_parser(r) ::= "p" ThisRulePropertyRef_parser(r) ::= "p"
NonLocalAttrRef(s) ::= "getInvokingContext(<s.ruleIndex>).<s.name>" NonLocalAttrRef(s) ::= "getInvokingContext(<s.ruleIndex>).<s.name>"
SetNonLocalAttr(s, rhsChunks) ::= "getInvokingContext(<s.ruleIndex>).<s.name> = <rhsChunks>" SetNonLocalAttr(s, rhsChunks) ::= "getInvokingContext(<s.ruleIndex>).<s.name> = <rhsChunks>"
AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName>.push(<labelref(a.label)>);" AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName> = append(<ctx(a.label)>.<a.listName>, push(<labelref(a.label)>)"
TokenDecl(t) ::= "p.<t.name> = nil // <TokenLabelType()>" TokenDecl(t) ::= "p.<t.name> = nil // <TokenLabelType()>"
TokenTypeDecl(t) ::= "<t.name> := 0 // <TokenLabelType()> type" TokenTypeDecl(t) ::= "<t.name> := 0 // <TokenLabelType()> type"
@ -580,25 +568,22 @@ RuleContextListDecl(rdecl) ::= "p.<rdecl.name> = [] // of <rdecl.ctxName>s"
ContextTokenGetterDecl(t) ::= << ContextTokenGetterDecl(t) ::= <<
<t.name>() { <t.name>() {
return s.getToken(<parser.name><t.name>, 0) return s.GetToken(<parser.name><t.name>, 0)
} }
>> >>
// should never be called // should never be called
ContextTokenListGetterDecl(t) ::= << ContextTokenListGetterDecl(t) ::= <<
def <t.name>_list(self): def <t.name>_list(self):
return self.getTokens(<parser.name><t.name>) return self.GetTokens(<parser.name><t.name>)
>> >>
ContextTokenListIndexedGetterDecl(t) ::= << ContextTokenListIndexedGetterDecl(t) ::= <<
<t.name>(i int) { <t.name>(i int) {
//if(i==undefined) { if 0 > i {
// i = null return s.GetTokens(<parser.name><t.name>)
//}
if 0 > i { // TODO
return s.getTokens(<parser.name><t.name>)
} else { } else {
return s.getToken(<parser.name><t.name>, i) return s.GetToken(<parser.name><t.name>, i)
} }
} }
@ -606,23 +591,23 @@ ContextTokenListIndexedGetterDecl(t) ::= <<
ContextRuleGetterDecl(r) ::= << ContextRuleGetterDecl(r) ::= <<
<r.name>() { <r.name>() {
return s.getTypedRuleContext(<r.ctxName>,0) return s.GetTypedRuleContext(<r.ctxName>,0)
} }
>> >>
// should never be called // should never be called
ContextRuleListGetterDecl(r) ::= << ContextRuleListGetterDecl(r) ::= <<
func <r.name>_list(self): func <r.name>_list(self):
return s.getTypedRuleContexts(<r.ctxName>) return s.GetTypedRuleContexts(<r.ctxName>)
>> >>
ContextRuleListIndexedGetterDecl(r) ::= << ContextRuleListIndexedGetterDecl(r) ::= <<
<r.name>(i int) { <r.name>(i int) {
if 0 > i { // TODO if 0 > i {
return s.getTypedRuleContexts(<r.ctxName>) return s.GetTypedRuleContexts(<r.ctxName>)
} else { } else {
return s.getTypedRuleContext(<r.ctxName>,i) return s.GetTypedRuleContext(<r.ctxName>,i)
} }
} }
>> >>
@ -639,8 +624,8 @@ ImplicitRuleLabel(ruleName) ::= "_<ruleName>"
ImplicitSetLabel(id) ::= "_tset<id>" ImplicitSetLabel(id) ::= "_tset<id>"
ListLabelName(label) ::= "<label>" ListLabelName(label) ::= "<label>"
CaptureNextToken(d) ::= "<d.varName> = p._input.LT(1)" CaptureNextToken(d) ::= "<d.varName> = p.GetTokenStream().LT(1)"
CaptureNextTokenType(d) ::= "<d.varName> = p._input.LA(1);" CaptureNextTokenType(d) ::= "<d.varName> = p.GetTokenStream().LA(1);"
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers, StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers,
superClass={ParserRuleContext}) ::= << superClass={ParserRuleContext}) ::= <<
@ -651,7 +636,7 @@ type <struct.name> struct {
parser antlr4.IParser parser antlr4.IParser
} }
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) <struct.name> { func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) *<struct.name> {
var p = new(<struct.name>) var p = new(<struct.name>)
@ -684,7 +669,7 @@ type <struct.name> struct {
parser antlr4.IParser parser antlr4.IParser
} }
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) <struct.name> { func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) *<struct.name> {
var p = new(<struct.name>) var p = new(<struct.name>)
@ -737,9 +722,9 @@ labelref(x) ::= "<if(!x.isLocal)>localctx.<endif><x.name>"
ctx(actionChunk) ::= "localctx" ctx(actionChunk) ::= "localctx"
// used for left-recursive rules // used for left-recursive rules
recRuleAltPredicate(ruleName,opPrec) ::= "p.precpred(p._ctx, <opPrec>)" recRuleAltPredicate(ruleName,opPrec) ::= "p.precpred(p.GetParserRuleContext(), <opPrec>)"
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>" recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>"
recRuleSetStopToken() ::= "p._ctx.stop = p._input.LT(-1);" recRuleSetStopToken() ::= "p.GetParserRuleContext().stop = p.GetTokenStream().LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= << recRuleAltStartAction(ruleName, ctxName, label) ::= <<
localctx = New<ctxName>Context(this, _parentctx, _parentState) localctx = New<ctxName>Context(this, _parentctx, _parentState)
@ -761,7 +746,7 @@ p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
recRuleReplaceContext(ctxName) ::= << recRuleReplaceContext(ctxName) ::= <<
localctx = New<ctxName>Context(this, localctx) localctx = New<ctxName>Context(this, localctx)
p._ctx = localctx p.GetParserRuleContext() = localctx
_prevctx = localctx _prevctx = localctx
>> >>
@ -777,7 +762,10 @@ LexerFile(lexerFile, lexer, namedActions) ::= <<
<fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)> <fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)>
package parser package parser
import "antlr4" import (
"antlr4"
"strings"
)
<namedActions.header> <namedActions.header>
@ -787,10 +775,9 @@ import "antlr4"
Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= << Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= <<
<atn> var serializedLexerAtn = <atn>
var lexerDeserializer = antlr4.NewATNDeserializer(nil) var lexerDeserializer = antlr4.NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.Deserialize(serializedATN) var lexerAtn = lexerDeserializer.Deserialize( []rune( serializedLexerAtn ) )
var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> } var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> } var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
@ -848,8 +835,7 @@ const (
SerializedATN(model) ::= << SerializedATN(model) ::= <<
<! only one segment, can be inlined !> <! only one segment, can be inlined !>
strings.Join( []string{ "<model.serialized; wrap={",<\n> "}>" }, "" )
var serializedATN = []rune("<model.serialized>")
>> >>

View File

@ -52,7 +52,7 @@ public class GoTarget extends Target {
badWords.add("rule"); badWords.add("rule");
badWords.add("parserRule"); badWords.add("parserRule");
} }
//
// /** // /**
// * {@inheritDoc} // * {@inheritDoc}
// * <p/> // * <p/>
@ -132,7 +132,7 @@ public class GoTarget extends Target {
// System.out.println("AfTER: " + s); // System.out.println("AfTER: " + s);
// return s; // return s;
// } // }
//
// @Override // @Override
// public String encodeIntAsCharEscape(int v) { // public String encodeIntAsCharEscape(int v) {
// if (v < Character.MIN_VALUE || v > Character.MAX_VALUE) { // if (v < Character.MIN_VALUE || v > Character.MAX_VALUE) {
@ -150,7 +150,6 @@ public class GoTarget extends Target {
// String hex = Integer.toHexString(v|0x10000).substring(1,5); // String hex = Integer.toHexString(v|0x10000).substring(1,5);
// String h2 = "\\u"+hex; // String h2 = "\\u"+hex;
// //
// System.out.println("Token : " + h2);
// return h2; // return h2;
// } // }