Various refactorings to properly support package export

This commit is contained in:
Peter Boyer 2015-12-23 11:00:30 -06:00
parent ff70876ae8
commit f5cf1cbf68
43 changed files with 1538 additions and 574 deletions

View File

@ -0,0 +1,861 @@
/** ANTLR tool checks output templates are compatible with tool code generation.
* For now, a simple string Match used on x.y of x.y.z scheme.
* Must Match Tool.VERSION during load to templates.
*
* REQUIRED.
*/
fileHeader(grammarFileName, ANTLRVersion) ::= <<
// Generated from <grammarFileName; format="java-escape"> by ANTLR <ANTLRVersion>
>>
// args must be <object-model-object>, <fields-resulting-in-STs>
ParserFile(file, parser, namedActions) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import (
"antlr4"
"strings"
)
<namedActions.header>
<parser>
>>
ListenerFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import "antlr4"
// This class defines a complete listener for a parse tree produced by <file.parserName>.
type <file.grammarName>Listener struct {
}
<file.listenerNames:{lname |
// Enter a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) enter<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
// Exit a parse tree produced by <file.parserName>#<lname>.
func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx antlr4.IParserRuleContext) {
\}
}; separator="\n">
>>
VisitorFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import "antlr4"
<header>
// This class defines a complete generic visitor for a parse tree produced by <file.parserName>.
type <file.grammarName>Visitor struct {
}
<file.visitorNames:{lname |
// Visit a parse tree produced by <file.parserName>#<lname>.
func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx IParserRuleContext) {
\}
}; separator="\n">
>>
Parser(parser, funcs, atn, sempredFuncs, superClass) ::= <<
<if(superClass)>
var <superClass> = require('./<superClass>').<superClass> // TODO
<endif>
var parserATN = <atn>
var deserializer = antlr4.NewATNDeserializer()
var deserializedATN = deserializer.Deserialize( []rune( parserATN ) )
var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <parser.name> struct {
<superClass; null="*antlr4.Parser">
ruleNames []string
literalNames []string
symbolicNames []string
grammarFileName string
}
func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
var decisionToDFA = make([]*antlr4.DFA,len(deserializedATN.DecisionToState))
var sharedContextCache = antlr4.NewPredictionContextCache()
for index, ds := range deserializedATN.DecisionToState {
decisionToDFA[index] = antlr4.NewDFA(ds, index)
}
parser := new(<parser.name>)
parser.InitParser(input)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedATN, decisionToDFA, sharedContextCache)
parser.ruleNames = ruleNames
parser.literalNames = literalNames
parser.symbolicNames = symbolicNames
<namedActions.members>
parser.grammarFileName = "<parser.grammarFileName; format="java-escape">"
return parser
}
const(
<parser.name>EOF = antlr4.TokenEOF
<if(parser.tokens)>
<parser.tokens:{k | <parser.name><k> = <parser.tokens.(k)>}; separator="\n", wrap, anchor>
<endif>
)
const (
<parser.rules:{r | <parser.name>RULE_<r.name> = <r.index>}; separator="\n", wrap, anchor>
)
<funcs; separator="\n">
<if(sempredFuncs)>
func (p *<parser.name>) Sempred(localctx, ruleIndex int, predIndex int) {
switch ruleIndex {
<parser.sempredFuncs.values:{f | case <f.ruleIndex>:
return p.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No predicate with index:" + ruleIndex)
}
}
<sempredFuncs.values; separator="\n">
<endif>
>>
dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= <<
<if(actionFuncs)>
func (l *<lexer.name>) Action(localctx, ruleIndex int, actionIndex int) {
switch ruleIndex) {
<recog.actionFuncs.values:{f|
case <f.ruleIndex>:
p.<f.name>_Action(localctx, actionIndex)
}; separator="\n">
default:
panic("No registered action for:" + ruleIndex)
}
}
<actionFuncs.values; separator="\n">
<endif>
<if(sempredFuncs)>
func (l *<lexer.name>) Sempred(localctx, ruleIndex, predIndex) {
switch ruleIndex) {
<recog.sempredFuncs.values:{f| case <f.ruleIndex>:
return l.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No registered predicate for:" + ruleIndex)
}
}
<sempredFuncs.values; separator="\n">
<endif>
>>
/* This generates a private method since the actionIndex is generated, making an
* overriding implementation impossible to maintain.
*/
RuleActionFunction(r, actions) ::= <<
func (l *<lexer.name>) <r.name>_Action(localctx , actionIndex) {
switch actionIndex) {
<actions:{index|
case <index>:
<actions.(index)>
}; separator="\n">
default:
panic("No registered action for:" + actionIndex)
}
}
>>
/* This generates a private method since the predIndex is generated, making an
* overriding implementation impossible to maintain.
*/
RuleSempredFunction(r, actions) ::= <<
func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_Sempred(localctx, predIndex int) {
switch predIndex {
<actions:{index| case <index>:
return <actions.(index)>;}; separator="\n">
default:
panic("No predicate with index:" + predIndex)
}
}
>>
RuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,namedActions,finallyAction,postamble,exceptions) ::= <<
<ruleCtx>
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; separator=", ">) {
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), p.GetState()<currentRule.args:{a | , <a.name>}>)
p.EnterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>)
<namedActions.init>
<locals; separator="\n">
defer func() {
if err := recover(); err != nil {
<if(exceptions)>
<exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause
<else>
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException( v )
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(re)
}
<endif>
// TODO if the above panic call is invoked then the below finally clause may not be called
<finallyAction>
p.ExitRule()
}
}
<code>
<postamble; separator="\n">
<namedActions.after>
return localctx
}
>>
LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,
namedActions,finallyAction,postamble) ::=
<<
<ruleCtx>
<altLabelCtxs:{l | <altLabelCtxs.(l)>}; separator="\n">
func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) {
_parentctx := p.GetParent().(IParserRuleContext)
_parentState := p.GetState()
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), _parentState<args:{a | , <a.name>}>)
_prevctx := localctx
_startState := <currentRule.startState>
p.EnterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
<namedActions.init>
<locals; separator="\n">
defer func(){
<finallyAction>
p.UnrollRecursionContexts(_parentctx)
}
try {
<code>
<postamble; separator="\n">
<namedActions.after>
} catch( error) {
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(error)
}
} finally {
<finallyAction>
p.UnrollRecursionContexts(_parentctx)
}
return localctx
}
>>
CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= <<
<if(currentOuterMostAltCodeBlock.altLabel)>localctx = New<currentOuterMostAltCodeBlock.altLabel; format="cap">Context(p, localctx)<endif>
p.EnterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>)
<CodeBlockForAlt(currentAltCodeBlock=currentOuterMostAltCodeBlock, ...)>
>>
CodeBlockForAlt(currentAltCodeBlock, locals, preamble, ops) ::= <<
<locals; separator="\n">
<preamble; separator="\n">
<ops; separator="\n">
>>
LL1AltBlock(choice, preamble, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
<if(choice.label)><labelref(choice.label)> = p.GetTokenStream().LT(1)<endif>
<preamble; separator="\n">
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
default:
<error>
}
>>
LL1OptionalBlock(choice, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
default:
<error>
}
>>
LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= <<
p.SetState(<choice.stateNumber>)
<preamble; separator="\n">
if <expr> {
<alts; separator="\n">
}
<!else if ( !(<followExpr>) ) <error>!>
>>
LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for <loopExpr> {
<alts; separator="\n">
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for ok := true; ok; ok = <loopExpr> {
<alts; separator="\n">
p.SetState(<choice.stateNumber>); <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
// LL(*) stuff
AltBlock(choice, preamble, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
<preamble; separator="\n">
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
switch la_) {
<alts:{alt |
case <i>:
<alt>
}; separator="\n">
}
>>
OptionalBlock(choice, alts, error) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
<alts:{alt |
if la_==<i><if(!choice.ast.greedy)>+1<endif> {
<alt>
}; separator="\n} else ">
}
>>
StarBlock(choice, alts, Sync, iteration) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
_alt := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
for _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
if(_alt==1<if(!choice.ast.greedy)>+1<endif>) {
<iteration>
<alts> <! should only be one !>
}
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
}
>>
PlusBlock(choice, alts, error) ::= <<
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
_alt := 1<if(!choice.ast.greedy)>+1<endif>
for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
switch _alt) {
<alts:{alt|
case <i><if(!choice.ast.greedy)>+1<endif>:
<alt>
//}; separator="\n">
default:
<error>
}
p.SetState(<choice.loopBackStateNumber>) <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>, p.GetParserRuleContext())
}
>>
Sync(s) ::= "Sync(<s.expecting.name>)"
ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))"
TestSetInline(s) ::= <<
<s.bitsets:{bits | <if(rest(rest(bits.ttypes)))><bitsetBitfieldComparison(s, bits)><else><bitsetInlineComparison(s, bits)><endif>}; separator=" || ">
>>
// Javascript language spec - shift operators are 32 bits long max
testShiftInRange(shiftAmount) ::= <<
((<shiftAmount>) & ~0x1f) == 0
>>
// produces smaller bytecode only when bits.ttypes contains more than two items
bitsetBitfieldComparison(s, bits) ::= <%
(<testShiftInRange({<offsetShiftVar(s.varName, bits.shift)>})> && ((1 \<\< <offsetShiftVar(s.varName, bits.shift)>) & (<bits.ttypes:{ttype | (1 \<\< <offsetShiftType(ttype, bits.shift)>)}; separator=" | ">)) !== 0)
%>
isZero ::= [
"0":true,
default:false
]
offsetShiftVar(shiftAmount, offset) ::= <%
<if(!isZero.(offset))>(<shiftAmount> - <offset>)<else><shiftAmount><endif>
%>
offsetShiftType(shiftAmount, offset) ::= <%
<if(!isZero.(offset))>(<parser.name>.<shiftAmount> - <offset>)<else><parser.name>.<shiftAmount><endif>
%>
// produces more efficient bytecode when bits.ttypes contains at most two items
bitsetInlineComparison(s, bits) ::= <%
<bits.ttypes:{ttype | <s.varName>==<parser.name><ttype>}; separator=" || ">
%>
cases(ttypes) ::= <<
<ttypes:{t | case <parser.name><t>:}; separator="\n">
>>
InvokeRule(r, argExprsChunks) ::= <<
p.SetState(<r.stateNumber>)
<if(r.labels)><r.labels:{l | <labelref(l)> = }><endif>p.<r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>)
>>
MatchToken(m) ::= <<
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.Match(<parser.name><m.name>)
>>
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>"
CommonSetStuff(m, expr, capture, invert) ::= <<
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }>p.GetTokenStream().LT(1);<endif>
<capture>
<if(invert)>if <m.varName>\<=0 || <expr> <else>if !(<expr>)<endif> {
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.GetErrorHandler().RecoverInline(p)
}
else {
p.Consume()
}
>>
Wildcard(w) ::= <<
p.SetState(<w.stateNumber>)
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>MatchWildcard()
>>
// ACTION STUFF
Action(a, foo, chunks) ::= "<chunks>"
ArgAction(a, chunks) ::= "<chunks>"
SemPred(p, chunks, failChunks) ::= <<
p.SetState(<p.stateNumber>)
if !( <chunks>) {
panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
}
>>
ExceptionClause(e, catchArg, catchAction) ::= <<
catch (<catchArg>) {
<catchAction>
}
>>
// lexer actions are not associated with model objects
LexerSkipCommand() ::= "p.skip()"
LexerMoreCommand() ::= "p.more()"
LexerPopModeCommand() ::= "p.popMode()"
LexerTypeCommand(arg) ::= "p._type = <arg>"
LexerChannelCommand(arg) ::= "p._channel = <arg>"
LexerModeCommand(arg) ::= "p._mode = <arg>"
LexerPushModeCommand(arg) ::= "p.pushMode(<arg>)"
ActionText(t) ::= "<t.text>"
ActionTemplate(t) ::= "<t.st>"
ArgRef(a) ::= "localctx.<a.name>"
LocalRef(a) ::= "localctx.<a.name>"
RetValueRef(a) ::= "localctx.<a.name>"
QRetValueRef(a) ::= "<ctx(a)>.<a.dict>.<a.name>"
/** How to translate $tokenLabel */
TokenRef(t) ::= "<ctx(t)>.<t.name>"
LabelRef(t) ::= "<ctx(t)>.<t.name>"
ListLabelRef(t) ::= "<ctx(t)>.<ListLabelName(t.name)>"
SetAttr(s,rhsChunks) ::= "<ctx(s)>.<s.name> = <rhsChunks>"
TokenLabelType() ::= "<file.TokenLabelType; null={Token}>"
InputSymbolType() ::= "<file.InputSymbolType; null={Token}>"
TokenPropertyRef_text(t) ::= "(<ctx(t)>.<t.label>==null ? null : <ctx(t)>.<t.label>.text)"
TokenPropertyRef_type(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.type)"
TokenPropertyRef_line(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.line)"
TokenPropertyRef_pos(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.column)"
TokenPropertyRef_channel(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.channel)"
TokenPropertyRef_index(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : <ctx(t)>.<t.label>.tokenIndex)"
TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : parseInt(<ctx(t)>.<t.label>.text))"
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)"
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p.GetTokenStream().GetTextFromInterval(NewInterval(<ctx(r)>.<r.label>.GetStart(),<ctx(r)>.<r.label>.GetStop())))"
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
RulePropertyRef_parser(r) ::= "this"
ThisRulePropertyRef_start(r) ::= "localctx.start"
ThisRulePropertyRef_stop(r) ::= "localctx.stop"
ThisRulePropertyRef_text(r) ::= "p.GetTokenStream().GetTextFromInterval(NewInterval(localctx.GetStart(), p.GetTokenStream().LT(-1)))"
ThisRulePropertyRef_ctx(r) ::= "localctx"
ThisRulePropertyRef_parser(r) ::= "p"
NonLocalAttrRef(s) ::= "getInvokingContext(<s.ruleIndex>).<s.name>"
SetNonLocalAttr(s, rhsChunks) ::= "getInvokingContext(<s.ruleIndex>).<s.name> = <rhsChunks>"
AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName> = append(<ctx(a.label)>.<a.listName>, push(<labelref(a.label)>)"
TokenDecl(t) ::= "p.<t.name> = nil // <TokenLabelType()>"
TokenTypeDecl(t) ::= "<t.name> := 0 // <TokenLabelType()> type"
TokenListDecl(t) ::= "p.<t.name> = [] // of <TokenLabelType()>s"
RuleContextDecl(r) ::= "p.<r.name> = nil // <r.ctxName>"
RuleContextListDecl(rdecl) ::= "p.<rdecl.name> = [] // of <rdecl.ctxName>s"
ContextTokenGetterDecl(t) ::= <<
<t.name>() {
return s.GetToken(<parser.name><t.name>, 0)
}
>>
// should never be called
ContextTokenListGetterDecl(t) ::= <<
def <t.name>_list(self):
return self.GetTokens(<parser.name><t.name>)
>>
ContextTokenListIndexedGetterDecl(t) ::= <<
<t.name>(i int) {
if 0 > i {
return s.GetTokens(<parser.name><t.name>)
} else {
return s.GetToken(<parser.name><t.name>, i)
}
}
>>
ContextRuleGetterDecl(r) ::= <<
<r.name>() {
return s.GetTypedRuleContext(<r.ctxName>,0)
}
>>
// should never be called
ContextRuleListGetterDecl(r) ::= <<
func <r.name>_list(self):
return s.GetTypedRuleContexts(<r.ctxName>)
>>
ContextRuleListIndexedGetterDecl(r) ::= <<
<r.name>(i int) {
if 0 > i {
return s.GetTypedRuleContexts(<r.ctxName>)
} else {
return s.GetTypedRuleContext(<r.ctxName>,i)
}
}
>>
LexerRuleContext() ::= "RuleContext"
/** The rule context name is the rule followed by a suffix; e.g.,
* r becomes rContext.
*/
RuleContextNameSuffix() ::= "Context"
ImplicitTokenLabel(tokenName) ::= "_<tokenName>"
ImplicitRuleLabel(ruleName) ::= "_<ruleName>"
ImplicitSetLabel(id) ::= "_tset<id>"
ListLabelName(label) ::= "<label>"
CaptureNextToken(d) ::= "<d.varName> = p.GetTokenStream().LT(1)"
CaptureNextTokenType(d) ::= "<d.varName> = p.GetTokenStream().LA(1);"
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers,
superClass={ParserRuleContext}) ::= <<
type <struct.name> struct {
*antlr4.ParserRuleContext
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) *<struct.name> {
var p = new(<struct.name>)
p.InitParserRuleContext( parent, invokingState )
p.parser = parser
p.RuleIndex = <parser.name>RULE_<struct.derivedFromName>
<attrs:{a | <a>}; separator="\n">
<struct.ctorAttrs:{a | p.<a.name> = <a.name> || null;}; separator="\n">
return p
}
<getters:{g | func (s *<struct.name>) <g>}; separator="\n\n">
<if(struct.provideCopyFrom)> <! don't need copy unless we have subclasses !>
func (s *<struct.name>) copyFrom(ctx <struct.name>) {
<superClass>.prototype.copyFrom.call(s, ctx)
<struct.attrs:{a | s.<a.name> = ctx.<a.name>;}; separator="\n">
}
<endif>
<dispatchMethods; separator="\n">
<extensionMembers; separator="\n">
>>
AltLabelStructDecl(struct,attrs,getters,dispatchMethods) ::= <<
type <struct.name> struct {
parent antlr4.IParserRuleContext
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) *<struct.name> {
var p = new(<struct.name>)
<currentRule.name; format="cap">Context.call(this, parser)
<attrs:{a | <a>;}; separator="\n">
<currentRule.name; format="cap">Context.prototype.copyFrom.call(this, ctx)
return p
}
<getters:{g | func (s *<struct.name>) <g>}; separator="\n\n">
<dispatchMethods; separator="\n">
>>
ListenerDispatchMethod(method) ::= <<
func (s *<struct.name>) <if(method.isEnter)>enter<else>exit<endif>Rule(listener antlr4.ParseTreeListener) {
listener.(*<parser.grammarName>Listener).<if(method.isEnter)>enter<else>exit<endif><struct.derivedFromName; format="cap">(s)
}
>>
VisitorDispatchMethod(method) ::= <<
func (s *<struct.name>) accept(visitor antlr4.ParseTreeVisitor) interface{} {
switch t := listener.(type) {
case *<parser.grammarName>Listener:
return t.visit<struct.derivedFromName; format="cap">(s)
default:
return t.visitChildren(s)
}
}
>>
AttributeDecl(d) ::= "p.<d.name> = <if(d.InitValue)><d.InitValue><else>null<endif>"
/** If we don't know location of label def x, use this template */
labelref(x) ::= "<if(!x.isLocal)>localctx.<endif><x.name>"
/** For any action chunk, what is correctly-typed context struct ptr? */
ctx(actionChunk) ::= "localctx"
// used for left-recursive rules
recRuleAltPredicate(ruleName,opPrec) ::= "p.Precpred(p.GetParserRuleContext(), <opPrec>)"
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>"
recRuleSetStopToken() ::= "p.GetParserRuleContext().stop = p.GetTokenStream().LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
localctx = New<ctxName>Context(this, _parentctx, _parentState)
<if(label)>localctx.<label> = _prevctx;<endif>
p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleLabeledAltStartAction(ruleName, currentAltLabel, label, isListLabel) ::= <<
localctx = New<currentAltLabel; format="cap">Context(this, New<ruleName; format="cap">Context(this, _parentctx, _parentState))
<if(label)>
<if(isListLabel)>
localctx.<label>.push(_prevctx)
<else>
localctx.<label> = _prevctx
<endif>
<endif>
p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
>>
recRuleReplaceContext(ctxName) ::= <<
localctx = New<ctxName>Context(this, localctx)
p.GetParserRuleContext() = localctx
_prevctx = localctx
>>
recRuleSetPrevCtx() ::= <<
if(p._parseListeners!=nil) {
p.triggerExitRuleEvent()
}
_prevctx = localctx
>>
LexerFile(lexerFile, lexer, namedActions) ::= <<
<fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)>
package parser
import (
"antlr4"
"strings"
)
<namedActions.header>
<lexer>
>>
Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= <<
var serializedLexerAtn = <atn>
var lexerDeserializer = antlr4.NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.Deserialize( []rune( serializedLexerAtn ) )
var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerSymbolicNames = []string{ <lexer.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var lexerRuleNames = []string{ <lexer.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <lexer.name> struct {
<if(superClass)><superClass><else>*antlr4.Lexer<endif>
modeNames []string
literalNames []string
symbolicNames []string
ruleNames []string
grammarFileName string
EOF string
}
func New<lexer.name>(input antlr4.CharStream) *<lexer.name> {
var lexerDecisionToDFA = make([]*antlr4.DFA,len(lexerAtn.DecisionToState))
for index, ds := range lexerAtn.DecisionToState {
lexerDecisionToDFA[index] = antlr4.NewDFA(ds, index)
}
lex := new(<lexer.name>)
lex.InitLexer(input)
lex.Interpreter = antlr4.NewLexerATNSimulator(lex, lexerAtn, lexerDecisionToDFA, antlr4.NewPredictionContextCache())
lex.modeNames = lexerModeNames
lex.ruleNames = lexerRuleNames
lex.literalNames = lexerLiteralNames
lex.symbolicNames = lexerSymbolicNames
lex.grammarFileName = "<lexer.grammarFileName>"
lex.EOF = antlr4.TokenEOF
return lex
}
const (
<lexer.tokens:{k | <lexer.name><k> = <lexer.tokens.(k)>}; separator="\n", wrap, anchor>
)
const (
<rest(lexer.modes):{m| <lexer.name><m> = <i>}; separator="\n">
)
<namedActions.members>
<dumpActions(lexer, "", actionFuncs, sempredFuncs)>
>>
SerializedATN(model) ::= <<
<! only one segment, can be inlined !>
strings.Join( []string{ "<model.serialized; wrap={",<\n> "}>" }, "" )
>>
/** Using a type to init value map, try to init a type; if not in table
* must be an object, default value is "nil".
*/
InitValue(typeName) ::= <<
<javaTypeInitMap.(typeName)>
>>
codeFileExtension() ::= ".go"

View File

@ -2,7 +2,6 @@ package antlr4
type ATN struct {
DecisionToState []*DecisionState
grammarType int
maxTokenType int
states []IATNState
@ -80,13 +79,13 @@ func (this *ATN) nextTokens(s IATNState, ctx IRuleContext) *IntervalSet {
func (this *ATN) addState(state IATNState) {
if state != nil {
state.setATN(this)
state.setStateNumber(len(this.states))
state.SetStateNumber(len(this.states))
}
this.states = append(this.states, state)
}
func (this *ATN) removeState(state IATNState) {
this.states[state.getStateNumber()] = nil // just free mem, don't shift states in list
this.states[state.GetStateNumber()] = nil // just free mem, don't shift states in list
}
func (this *ATN) defineDecisionState(s *DecisionState) int {
@ -108,7 +107,7 @@ func (this *ATN) getDecisionState(decision int) *DecisionState {
// considers the complete parser context, but does not evaluate semantic
// predicates (i.e. all predicates encountered during the calculation are
// assumed true). If a path in the ATN exists from the starting state to the
// {@link RuleStopState} of the outermost context without matching any
// {@link RuleStopState} of the outermost context without Matching any
// symbols, {@link Token//EOF} is added to the returned set.
//
// <p>If {@code context} is {@code nil}, it is treated as
@ -141,7 +140,7 @@ func (this *ATN) getExpectedTokens(stateNumber int, ctx IRuleContext) *IntervalS
following = this.nextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following)
expected.removeOne(TokenEpsilon)
ctx = ctx.getParent().(IRuleContext)
ctx = ctx.GetParent().(IRuleContext)
}
if following.contains(TokenEpsilon) {
expected.addOne(TokenEOF)

View File

@ -18,7 +18,7 @@ type IATNConfig interface {
getPrecedenceFilterSuppressed() bool
setPrecedenceFilterSuppressed(bool)
getState() IATNState
GetState() IATNState
getAlt() int
getSemanticContext() SemanticContext
@ -70,7 +70,7 @@ func NewATNConfig3(c IATNConfig, state IATNState, semanticContext SemanticContex
}
func NewATNConfig2(c IATNConfig, semanticContext SemanticContext) *ATNConfig {
return NewATNConfig(c, c.getState(), c.getContext(), semanticContext)
return NewATNConfig(c, c.GetState(), c.getContext(), semanticContext)
}
func NewATNConfig1(c IATNConfig, state IATNState, context IPredictionContext) *ATNConfig {
@ -92,7 +92,7 @@ func (this *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
this.precedenceFilterSuppressed = v
}
func (this *ATNConfig) getState() IATNState {
func (this *ATNConfig) GetState() IATNState {
return this.state
}
@ -153,7 +153,7 @@ func (this *ATNConfig) equals(other interface{}) bool {
}
func (this *ATNConfig) shortHashString() string {
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString()
return "" + strconv.Itoa(this.state.GetStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + this.semanticContext.toString()
}
func (this *ATNConfig) hashString() string {
@ -165,7 +165,7 @@ func (this *ATNConfig) hashString() string {
c = this.context.hashString()
}
return "" + strconv.Itoa(this.state.getStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString()
return "" + strconv.Itoa(this.state.GetStateNumber()) + "/" + strconv.Itoa(this.alt) + "/" + c + "/" + this.semanticContext.toString()
}
func (this *ATNConfig) toString() string {
@ -268,7 +268,7 @@ func (this *LexerATNConfig) hashString() string {
f = "0"
}
return "" + strconv.Itoa(this.state.getStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) +
return "" + strconv.Itoa(this.state.GetStateNumber()) + strconv.Itoa(this.alt) + fmt.Sprint(this.context) +
fmt.Sprint(this.semanticContext) + f + fmt.Sprint(this.lexerActionExecutor)
}

View File

@ -21,7 +21,7 @@ func equalATNConfigs(a, b interface{}) bool {
if a == nil || b == nil {
return false
}
return a.(*ATNConfig).state.getStateNumber() == b.(*ATNConfig).state.getStateNumber() &&
return a.(*ATNConfig).state.GetStateNumber() == b.(*ATNConfig).state.GetStateNumber() &&
a.(*ATNConfig).alt == b.(*ATNConfig).alt &&
a.(*ATNConfig).semanticContext.equals(b.(*ATNConfig).semanticContext)
}
@ -129,10 +129,10 @@ func (this *ATNConfigSet) add(config IATNConfig, mergeCache *DoubleDict) bool {
return true
}
func (this *ATNConfigSet) getStates() *Set {
func (this *ATNConfigSet) GetStates() *Set {
var states = NewSet(nil, nil)
for i := 0; i < len(this.configs); i++ {
states.add(this.configs[i].getState())
states.add(this.configs[i].GetState())
}
return states
}

View File

@ -443,10 +443,10 @@ func (this *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
atn.ruleToStartState[idx].addTransition(NewEpsilonTransition(bypassStart, -1), -1)
bypassStop.addTransition(NewEpsilonTransition(endState, -1), -1)
var matchState = NewBasicState()
atn.addState(matchState)
matchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
bypassStart.addTransition(NewEpsilonTransition(matchState, -1), -1)
var MatchState = NewBasicState()
atn.addState(MatchState)
MatchState.addTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
bypassStart.addTransition(NewEpsilonTransition(MatchState, -1), -1)
}
func (this *ATNDeserializer) stateIsEndStateFor(state IATNState, idx int) IATNState {

View File

@ -23,7 +23,7 @@ func NewATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *ATNS
// by literally rebuilding them with cached subgraphs only.</p>
//
// <p>I tried a cache for use during closure operations, that was
// whacked after each adaptivePredict(). It cost a little bit
// whacked after each AdaptivePredict(). It cost a little bit
// more time I think and doesn't save on the overall footprint
// so it's not worth the complexity.</p>

View File

@ -50,10 +50,10 @@ type IATNState interface {
getATN() *ATN
setATN(*ATN)
getStateType() int
GetStateType() int
getStateNumber() int
setStateNumber(int)
GetStateNumber() int
SetStateNumber(int)
getTransitions() []ITransition
setTransitions([]ITransition)
@ -125,15 +125,15 @@ func (as *ATNState) setTransitions(t []ITransition) {
as.transitions = t
}
func (as *ATNState) getStateType() int {
func (as *ATNState) GetStateType() int {
return as.stateType
}
func (as *ATNState) getStateNumber() int {
func (as *ATNState) GetStateNumber() int {
return as.stateNumber
}
func (as *ATNState) setStateNumber(stateNumber int) {
func (as *ATNState) SetStateNumber(stateNumber int) {
as.stateNumber = stateNumber
}
@ -151,7 +151,7 @@ func (this *ATNState) toString() string {
func (this *ATNState) equals(other interface{}) bool {
if ot, ok := other.(IATNState); ok {
return this.stateNumber == ot.getStateNumber()
return this.stateNumber == ot.GetStateNumber()
} else {
return false
}

View File

@ -92,7 +92,7 @@ func (bt *BufferedTokenStream) get(index int) *Token {
return bt.tokens[index]
}
func (bt *BufferedTokenStream) consume() {
func (bt *BufferedTokenStream) Consume() {
var skipEofCheck = false
if bt.index >= 0 {
if bt.fetchedEOF {
@ -110,7 +110,7 @@ func (bt *BufferedTokenStream) consume() {
if !skipEofCheck && bt.LA(1) == TokenEOF {
panic("cannot consume EOF")
}
if bt.sync(bt.index + 1) {
if bt.Sync(bt.index + 1) {
bt.index = bt.adjustSeekIndex(bt.index + 1)
}
}
@ -121,7 +121,7 @@ func (bt *BufferedTokenStream) consume() {
// {@code false}.
// @see //get(int i)
// /
func (bt *BufferedTokenStream) sync(i int) bool {
func (bt *BufferedTokenStream) Sync(i int) bool {
var n = i - len(bt.tokens) + 1 // how many more elements we need?
if n > 0 {
var fetched = bt.fetch(n)
@ -152,7 +152,7 @@ func (bt *BufferedTokenStream) fetch(n int) int {
}
// Get all tokens from start..stop inclusively///
func (bt *BufferedTokenStream) getTokens(start int, stop int, types *IntervalSet) []*Token {
func (bt *BufferedTokenStream) GetTokens(start int, stop int, types *IntervalSet) []*Token {
if start < 0 || stop < 0 {
return nil
@ -194,7 +194,7 @@ func (bt *BufferedTokenStream) LT(k int) *Token {
return bt.LB(-k)
}
var i = bt.index + k - 1
bt.sync(i)
bt.Sync(i)
if i >= len(bt.tokens) { // return EOF token
// EOF must be last token
return bt.tokens[len(bt.tokens)-1]
@ -226,11 +226,11 @@ func (bt *BufferedTokenStream) lazyInit() {
}
func (bt *BufferedTokenStream) setup() {
bt.sync(0)
bt.Sync(0)
bt.index = bt.adjustSeekIndex(0)
}
func (bt *BufferedTokenStream) getTokenSource() TokenSource {
func (bt *BufferedTokenStream) GetTokenSource() TokenSource {
return bt.tokenSource
}
@ -246,7 +246,7 @@ func (bt *BufferedTokenStream) setTokenSource(tokenSource TokenSource) {
// on channel between i and EOF.
// /
func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
bt.sync(i)
bt.Sync(i)
if i >= len(bt.tokens) {
return -1
}
@ -256,7 +256,7 @@ func (bt *BufferedTokenStream) nextTokenOnChannel(i, channel int) int {
return -1
}
i += 1
bt.sync(i)
bt.Sync(i)
token = bt.tokens[i]
}
return i
@ -333,7 +333,7 @@ func (bt *BufferedTokenStream) getSourceName() string {
}
// Get the text of all tokens in bt buffer.///
func (bt *BufferedTokenStream) getText(interval *Interval) string {
func (bt *BufferedTokenStream) GetText(interval *Interval) string {
bt.lazyInit()
bt.fill()
if interval == nil {

View File

@ -3,5 +3,5 @@ package antlr4
type CharStream interface {
IntStream
getTextFromInterval(*Interval) string
GetTextFromInterval(*Interval) string
}

View File

@ -21,10 +21,10 @@ func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
// constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings
// of text from the input after the lexer creates a token (e.g. the
// implementation of {@link CharStream//getText} in
// implementation of {@link CharStream//GetText} in
// {@link UnbufferedCharStream} panics an
// {@link UnsupportedOperationException}). Explicitly setting the token text
// allows {@link Token//getText} to be called at any time regardless of the
// allows {@link Token//GetText} to be called at any time regardless of the
// input stream implementation.
//
// <p>
@ -52,7 +52,7 @@ func (this *CommonTokenFactory) create(source *TokenSourceCharStreamPair, ttype
if text != "" {
t.setText(text)
} else if this.copyText && source.charStream != nil {
t.setText(source.charStream.getTextFromInterval(NewInterval(start, stop)))
t.setText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
}
return t.Token
}

View File

@ -5,7 +5,7 @@
//
// <p>
// This token stream provides access to all tokens by index or when calling
// methods like {@link //getText}. The channel filtering is only used for code
// methods like {@link //GetText}. The channel filtering is only used for code
// accessing tokens via the lookahead methods {@link //LA}, {@link //LT}, and
// {@link //LB}.</p>
//
@ -18,7 +18,7 @@
//
// <p>
// Note: lexer rules which use the {@code ->skip} lexer command or call
// {@link Lexer//skip} do not produce tokens at all, so input text matched by
// {@link Lexer//skip} do not produce tokens at all, so input text Matched by
// such a rule will not be available as part of the token stream, regardless of
// channel.</p>
///
@ -74,7 +74,7 @@ func (ts *CommonTokenStream) LT(k int) *Token {
// find k good tokens
for n < k {
// skip off-channel tokens, but make sure to not look past EOF
if ts.sync(i + 1) {
if ts.Sync(i + 1) {
i = ts.nextTokenOnChannel(i+1, ts.channel)
}
n += 1

View File

@ -64,7 +64,7 @@ func (this *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
return
}
// synchronization on s0 here is ok. when the DFA is turned into a
// Synchronization on s0 here is ok. when the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again
// s0.edges is never nil for a precedence DFA
this.s0.edges[precedence] = startState
@ -103,7 +103,7 @@ func (this *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
func (this *DFA) getStates() map[string]*DFAState {
func (this *DFA) GetStates() map[string]*DFAState {
return this._states
}

View File

@ -50,11 +50,11 @@ func (this *DFASerializer) toString() string {
for j := 0; j < n; j++ {
var t = s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += this.getStateString(s)
buf += this.GetStateString(s)
buf += "-"
buf += this.getEdgeLabel(j)
buf += "->"
buf += this.getStateString(t)
buf += this.GetStateString(t)
buf += "\n"
}
}
@ -81,7 +81,7 @@ func (this *DFASerializer) getEdgeLabel(i int) string {
}
}
func (this *DFASerializer) getStateString(s *DFAState) string {
func (this *DFASerializer) GetStateString(s *DFAState) string {
var a, b string

View File

@ -75,7 +75,7 @@ func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
// {@link Token//EOF} maps to {@code edges[0]}.
this.edges = nil
this.isAcceptState = false
// if accept state, what ttype do we match or alt do we predict?
// if accept state, what ttype do we Match or alt do we predict?
// This is set to {@link ATN//INVALID_ALT_NUMBER} when {@link
// //predicates}{@code !=nil} or
// {@link //requiresFullContext}.

View File

@ -12,7 +12,7 @@ import (
//
// <ul>
// <li><b>Ambiguities</b>: These are cases where more than one path through the
// grammar can match the input.</li>
// grammar can Match the input.</li>
// <li><b>Weak context sensitivity</b>: These are cases where full-context
// prediction resolved an SLL conflict to a unique alternative which equaled the
// minimum alternative of the SLL conflict.</li>
@ -47,7 +47,7 @@ func (this *DiagnosticErrorListener) reportAmbiguity(recognizer *Parser, dfa *DF
": ambigAlts=" +
this.getConflictingAlts(ambigAlts, configs).toString() +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}
@ -56,7 +56,7 @@ func (this *DiagnosticErrorListener) reportAttemptingFullContext(recognizer *Par
var msg = "reportAttemptingFullContext d=" +
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}
@ -64,7 +64,7 @@ func (this *DiagnosticErrorListener) reportContextSensitivity(recognizer *Parser
var msg = "reportContextSensitivity d=" +
this.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.getTokenStream().getTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.notifyErrorListeners(msg, nil, nil)
}

View File

@ -9,11 +9,11 @@ import (
type IErrorStrategy interface {
reset(IParser)
recoverInline(IParser) *Token
recover(IParser, IRecognitionException)
sync(IParser)
RecoverInline(IParser) *Token
Recover(IParser, IRecognitionException)
Sync(IParser)
inErrorRecoveryMode(IParser) bool
reportError(IParser, IRecognitionException)
ReportError(IParser, IRecognitionException)
reportMatch(IParser)
}
@ -23,19 +23,19 @@ type ErrorStrategy struct {
func (this *ErrorStrategy) reset(recognizer IParser) {
}
func (this *ErrorStrategy) recoverInline(recognizer IParser) {
func (this *ErrorStrategy) RecoverInline(recognizer IParser) {
}
func (this *ErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
func (this *ErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
}
func (this *ErrorStrategy) sync(recognizer IParser) {
func (this *ErrorStrategy) Sync(recognizer IParser) {
}
func (this *ErrorStrategy) inErrorRecoveryMode(recognizer IParser) {
}
func (this *ErrorStrategy) reportError(recognizer IParser, e IRecognitionException) {
func (this *ErrorStrategy) ReportError(recognizer IParser, e IRecognitionException) {
}
func (this *ErrorStrategy) reportMatch(recognizer IParser) {
@ -133,16 +133,16 @@ func (this *DefaultErrorStrategy) reportMatch(recognizer IParser) {
// <ul>
// <li>{@link NoViableAltException}: Dispatches the call to
// {@link //reportNoViableAlternative}</li>
// <li>{@link InputMismatchException}: Dispatches the call to
// {@link //reportInputMismatch}</li>
// <li>{@link InputMisMatchException}: Dispatches the call to
// {@link //reportInputMisMatch}</li>
// <li>{@link FailedPredicateException}: Dispatches the call to
// {@link //reportFailedPredicate}</li>
// <li>All other types: calls {@link Parser//notifyErrorListeners} to report
// the exception</li>
// </ul>
//
func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognitionException) {
// if we've already reported an error and have not matched a token
func (this *DefaultErrorStrategy) ReportError(recognizer IParser, e IRecognitionException) {
// if we've already reported an error and have not Matched a token
// yet successfully, don't report any errors.
if this.inErrorRecoveryMode(recognizer) {
return // don't report spurious errors
@ -156,8 +156,8 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition
recognizer.notifyErrorListeners(e.getMessage(), e.getOffendingToken(), e)
case *NoViableAltException:
this.reportNoViableAlternative(recognizer, t)
case *InputMismatchException:
this.reportInputMismatch(recognizer, t)
case *InputMisMatchException:
this.reportInputMisMatch(recognizer, t)
case *FailedPredicateException:
this.reportFailedPredicate(recognizer, t)
}
@ -166,46 +166,46 @@ func (this *DefaultErrorStrategy) reportError(recognizer IParser, e IRecognition
//
// {@inheritDoc}
//
// <p>The default implementation resynchronizes the parser by consuming tokens
// until we find one in the resynchronization set--loosely the set of tokens
// <p>The default implementation reSynchronizes the parser by consuming tokens
// until we find one in the reSynchronization set--loosely the set of tokens
// that can follow the current rule.</p>
//
func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
func (this *DefaultErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
if this.lastErrorIndex == recognizer.getInputStream().index() &&
this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.getState()) {
this.lastErrorStates != nil && this.lastErrorStates.contains(recognizer.GetState()) {
// uh oh, another error at same token index and previously-visited
// state in ATN must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop this is a failsafe.
recognizer.consume()
recognizer.Consume()
}
this.lastErrorIndex = recognizer.getInputStream().index()
if this.lastErrorStates == nil {
this.lastErrorStates = NewIntervalSet()
}
this.lastErrorStates.addOne(recognizer.getState())
this.lastErrorStates.addOne(recognizer.GetState())
var followSet = this.getErrorRecoverySet(recognizer)
this.consumeUntil(recognizer, followSet)
}
// The default implementation of {@link ANTLRErrorStrategy//sync} makes sure
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
// that the current lookahead symbol is consistent with what were expecting
// at this point in the ATN. You can call this anytime but ANTLR only
// generates code to check before subrules/loops and each iteration.
//
// <p>Implements Jim Idle's magic sync mechanism in closures and optional
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
// subrules. E.g.,</p>
//
// <pre>
// a : sync ( stuff sync )*
// sync : {consume to what can follow sync}
// a : Sync ( stuff Sync )*
// Sync : {consume to what can follow Sync}
// </pre>
//
// At the start of a sub rule upon error, {@link //sync} performs single
// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// resynchronization set of the current rule.
// reSynchronization set of the current rule.
//
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
// with an empty alternative), then the expected set includes what follows
@ -218,7 +218,7 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce
// <p><strong>ORIGINS</strong></p>
//
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
// A single misMatch token or missing token would force the parser to bail
// out of the entire rules surrounding the loop. So, for rule</p>
//
// <pre>
@ -234,22 +234,22 @@ func (this *DefaultErrorStrategy) recover(recognizer IParser, e IRecognitionExce
// some reason speed is suffering for you, you can turn off this
// functionality by simply overriding this method as a blank { }.</p>
//
func (this *DefaultErrorStrategy) sync(recognizer IParser) {
// If already recovering, don't try to sync
func (this *DefaultErrorStrategy) Sync(recognizer IParser) {
// If already recovering, don't try to Sync
if this.inErrorRecoveryMode(recognizer) {
return
}
var s = recognizer.getInterpreter().atn.states[recognizer.getState()]
var la = recognizer.getTokenStream().LA(1)
var s = recognizer.GetInterpreter().atn.states[recognizer.GetState()]
var la = recognizer.GetTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off
if la == TokenEOF || recognizer.getATN().nextTokens(s, nil).contains(la) {
return
}
// Return but don't end recovery. only do that upon valid token match
// Return but don't end recovery. only do that upon valid token Match
if recognizer.isExpectedToken(la) {
return
}
switch s.getStateType() {
switch s.GetStateType() {
case ATNStateBLOCK_START:
case ATNStateSTAR_BLOCK_START:
case ATNStatePLUS_BLOCK_START:
@ -258,7 +258,7 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) {
if this.singleTokenDeletion(recognizer) != nil {
return
} else {
panic(NewInputMismatchException(recognizer))
panic(NewInputMisMatchException(recognizer))
}
break
case ATNStatePLUS_LOOP_BACK:
@ -274,22 +274,22 @@ func (this *DefaultErrorStrategy) sync(recognizer IParser) {
}
}
// This is called by {@link //reportError} when the exception is a
// This is called by {@link //ReportError} when the exception is a
// {@link NoViableAltException}.
//
// @see //reportError
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser, e *NoViableAltException) {
var tokens = recognizer.getTokenStream()
var tokens = recognizer.GetTokenStream()
var input string
if tokens != nil {
if e.startToken.tokenType == TokenEOF {
input = "<EOF>"
} else {
input = tokens.getTextFromTokens(e.startToken, e.offendingToken)
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
}
} else {
input = "<unknown input>"
@ -299,31 +299,31 @@ func (this *DefaultErrorStrategy) reportNoViableAlternative(recognizer IParser,
}
//
// This is called by {@link //reportError} when the exception is an
// {@link InputMismatchException}.
// This is called by {@link //ReportError} when the exception is an
// {@link InputMisMatchException}.
//
// @see //reportError
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportInputMismatch(recognizer IParser, e *InputMismatchException) {
var msg = "mismatched input " + this.getTokenErrorDisplay(e.offendingToken) +
func (this *DefaultErrorStrategy) reportInputMisMatch(recognizer IParser, e *InputMisMatchException) {
var msg = "misMatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
}
//
// This is called by {@link //reportError} when the exception is a
// This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}.
//
// @see //reportError
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *FailedPredicateException) {
var ruleName = recognizer.getRuleNames()[recognizer.getParserRuleContext().getRuleIndex()]
var ruleName = recognizer.getRuleNames()[recognizer.GetParserRuleContext().getRuleIndex()]
var msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
}
@ -335,7 +335,7 @@ func (this *DefaultErrorStrategy) reportFailedPredicate(recognizer IParser, e *F
// {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
// single-token deletion as a viable recovery strategy for a misMatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
@ -351,7 +351,7 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
}
this.beginErrorCondition(recognizer)
var t = recognizer.getCurrentToken()
var tokenName = this.getTokenErrorDisplay(t)
var tokenName = this.GetTokenErrorDisplay(t)
var expecting = this.getExpectedTokens(recognizer)
var msg = "extraneous input " + tokenName + " expecting " +
expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false)
@ -364,7 +364,7 @@ func (this *DefaultErrorStrategy) reportUnwantedToken(recognizer IParser) {
// method returns, {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
// single-token insertion as a viable recovery strategy for a misMatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
@ -382,21 +382,21 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
var t = recognizer.getCurrentToken()
var expecting = this.getExpectedTokens(recognizer)
var msg = "missing " + expecting.toStringVerbose(recognizer.getLiteralNames(), recognizer.getSymbolicNames(), false) +
" at " + this.getTokenErrorDisplay(t)
" at " + this.GetTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, nil)
}
// <p>The default implementation attempts to recover from the mismatched input
// <p>The default implementation attempts to recover from the misMatched input
// by using single token insertion and deletion as described below. If the
// recovery attempt fails, this method panics an
// {@link InputMismatchException}.</p>
// {@link InputMisMatchException}.</p>
//
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
//
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
// the {@code LA(2)} token) as the successful result of the match operation.</p>
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenDeletion}.</p>
@ -407,7 +407,7 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// after the expected {@code LA(1)} token, then assume the token is missing
// and use the parser's {@link TokenFactory} to create it on the fly. The
// "insertion" is performed by returning the created token as the successful
// result of the match operation.</p>
// result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenInsertion}.</p>
@ -422,7 +422,7 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// stat &rarr expr &rarr atom
// </pre>
//
// and it will be trying to match the {@code ')'} at this point in the
// and it will be trying to Match the {@code ')'} at this point in the
// derivation:
//
// <pre>
@ -430,54 +430,54 @@ func (this *DefaultErrorStrategy) reportMissingToken(recognizer IParser) {
// ^
// </pre>
//
// The attempt to match {@code ')'} will fail when it sees {@code ''} and
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
// is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
//
func (this *DefaultErrorStrategy) recoverInline(recognizer IParser) *Token {
func (this *DefaultErrorStrategy) RecoverInline(recognizer IParser) *Token {
// SINGLE TOKEN DELETION
var matchedSymbol = this.singleTokenDeletion(recognizer)
if matchedSymbol != nil {
var MatchedSymbol = this.singleTokenDeletion(recognizer)
if MatchedSymbol != nil {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.consume()
return matchedSymbol
recognizer.Consume()
return MatchedSymbol
}
// SINGLE TOKEN INSERTION
if this.singleTokenInsertion(recognizer) {
return this.getMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
panic(NewInputMismatchException(recognizer))
panic(NewInputMisMatchException(recognizer))
}
//
// This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// deletion strategy fails to recover from the misMatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
// <p>This method determines whether or not single-token insertion is viable by
// checking if the {@code LA(1)} input symbol could be successfully matched
// checking if the {@code LA(1)} input symbol could be successfully Matched
// if it were instead the {@code LA(2)} symbol. If this method returns
// {@code true}, the caller is responsible for creating and inserting a
// token with the correct type to produce this behavior.</p>
//
// @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false}
// strategy for the current misMatched input, otherwise {@code false}
//
func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool {
var currentSymbolType = recognizer.getTokenStream().LA(1)
var currentSymbolType = recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token
var atn = recognizer.getInterpreter().atn
var currentState = atn.states[recognizer.getState()]
var atn = recognizer.GetInterpreter().atn
var currentState = atn.states[recognizer.GetState()]
var next = currentState.getTransitions()[0].getTarget()
var expectingAtLL2 = atn.nextTokens(next, recognizer.getParserRuleContext())
var expectingAtLL2 = atn.nextTokens(next, recognizer.GetParserRuleContext())
if expectingAtLL2.contains(currentSymbolType) {
this.reportMissingToken(recognizer)
return true
@ -488,36 +488,36 @@ func (this *DefaultErrorStrategy) singleTokenInsertion(recognizer IParser) bool
// This method implements the single-token deletion inline error recovery
// strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error
// from misMatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil,
// {@code recognizer} will <em>not</em> be in error recovery mode since the
// returned token was a successful match.
// returned token was a successful Match.
//
// <p>If the single-token deletion is successful, this method calls
// {@link //reportUnwantedToken} to report the error, followed by
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
// before returning {@link //reportMatch} is called to signal a successful
// match.</p>
// Match.</p>
//
// @param recognizer the parser instance
// @return the successfully matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise
// @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the misMatched input, otherwise
// {@code nil}
//
func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token {
var nextTokenType = recognizer.getTokenStream().LA(2)
var nextTokenType = recognizer.GetTokenStream().LA(2)
var expecting = this.getExpectedTokens(recognizer)
if expecting.contains(nextTokenType) {
this.reportUnwantedToken(recognizer)
// print("recoverFromMismatchedToken deleting " \
// + str(recognizer.getTokenStream().LT(1)) \
// + " since " + str(recognizer.getTokenStream().LT(2)) \
// print("recoverFromMisMatchedToken deleting " \
// + str(recognizer.GetTokenStream().LT(1)) \
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr)
recognizer.consume() // simply delete extra token
// we want to return the token we're actually matching
var matchedSymbol = recognizer.getCurrentToken()
recognizer.Consume() // simply delete extra token
// we want to return the token we're actually Matching
var MatchedSymbol = recognizer.getCurrentToken()
this.reportMatch(recognizer) // we know current token is correct
return matchedSymbol
return MatchedSymbol
} else {
return nil
}
@ -528,7 +528,7 @@ func (this *DefaultErrorStrategy) singleTokenDeletion(recognizer IParser) *Token
// The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol.
// For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier matched previously and that
// that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that
// this token is missing and we keep going. Because we
@ -553,12 +553,12 @@ func (this *DefaultErrorStrategy) getMissingSymbol(recognizer IParser) *Token {
tokenText = "<missing " + recognizer.getLiteralNames()[expectedTokenType] + ">"
}
var current = currentSymbol
var lookback = recognizer.getTokenStream().LT(-1)
var lookback = recognizer.GetTokenStream().LT(-1)
if current.tokenType == TokenEOF && lookback != nil {
current = lookback
}
tf := recognizer.getTokenFactory()
tf := recognizer.GetTokenFactory()
return tf.create(current.source, expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.line, current.column)
}
@ -574,7 +574,7 @@ func (this *DefaultErrorStrategy) getExpectedTokens(recognizer IParser) *Interva
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
//
func (this *DefaultErrorStrategy) getTokenErrorDisplay(t *Token) string {
func (this *DefaultErrorStrategy) GetTokenErrorDisplay(t *Token) string {
if t == nil {
return "<no token>"
}
@ -652,18 +652,18 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// (context-sensitive or otherwise). We need the combined set of
// all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to
// resync to one of those tokens. Note that FOLLOW(c)='^' and if
// we resync'd to that token, we'd consume until EOF. We need to
// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we reSync'd to that token, we'd consume until EOF. We need to
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though.
// At this point, it gets a mismatched token error and panics an
// At this point, it gets a misMatched token error and panics an
// exception (since LA(1) is not in the viable following token
// set). The rule exception handler tries to recover, but finds
// the same recovery set and doesn't consume anything. Rule b
// exits normally returning to rule a. Now it finds the ']' (and
// with the successful match exits errorRecovery mode).
// with the successful Match exits errorRecovery mode).
//
// So, you can see that the parser walks up the call chain looking
// for the token that was a member of the recovery set.
@ -689,8 +689,8 @@ func (this *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// at run-time upon error to avoid overhead during parsing.
//
func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *IntervalSet {
var atn = recognizer.getInterpreter().atn
var ctx = recognizer.getParserRuleContext()
var atn = recognizer.GetInterpreter().atn
var ctx = recognizer.GetParserRuleContext()
var recoverSet = NewIntervalSet()
for ctx != nil && ctx.getInvokingState() >= 0 {
// compute what follows who invoked us
@ -698,18 +698,18 @@ func (this *DefaultErrorStrategy) getErrorRecoverySet(recognizer IParser) *Inter
var rt = invokingState.getTransitions()[0]
var follow = atn.nextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow)
ctx = ctx.getParent().(IParserRuleContext)
ctx = ctx.GetParent().(IParserRuleContext)
}
recoverSet.removeOne(TokenEpsilon)
return recoverSet
}
// Consume tokens until one matches the given token set.//
// Consume tokens until one Matches the given token set.//
func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *IntervalSet) {
var ttype = recognizer.getTokenStream().LA(1)
var ttype = recognizer.GetTokenStream().LA(1)
for ttype != TokenEOF && !set.contains(ttype) {
recognizer.consume()
ttype = recognizer.getTokenStream().LA(1)
recognizer.Consume()
ttype = recognizer.GetTokenStream().LA(1)
}
}
@ -728,7 +728,7 @@ func (this *DefaultErrorStrategy) consumeUntil(recognizer IParser, set *Interval
// stage of two-stage parsing to immediately terminate if an error is
// encountered, and immediately fall back to the second stage. In addition to
// avoiding wasted work by attempting to recover from errors here, the empty
// implementation of {@link BailErrorStrategy//sync} improves the performance of
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
// the first stage.</li>
// <li><strong>Silent validation:</strong> When syntax errors are not being
// reported or logged, and the parse result is simply ignored if errors occur,
@ -758,11 +758,11 @@ func NewBailErrorStrategy() *BailErrorStrategy {
// rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}.
//
func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionException) {
var context = recognizer.getParserRuleContext()
func (this *BailErrorStrategy) Recover(recognizer IParser, e IRecognitionException) {
var context = recognizer.GetParserRuleContext()
for context != nil {
context.setException(e)
context = context.getParent().(IParserRuleContext)
context.SetException(e)
context = context.GetParent().(IParserRuleContext)
}
panic(NewParseCancellationException()) // TODO we don't emit e properly
}
@ -770,11 +770,11 @@ func (this *BailErrorStrategy) recover(recognizer IParser, e IRecognitionExcepti
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
//
func (this *BailErrorStrategy) recoverInline(recognizer IParser) {
this.recover(recognizer, NewInputMismatchException(recognizer))
func (this *BailErrorStrategy) RecoverInline(recognizer IParser) {
this.Recover(recognizer, NewInputMisMatchException(recognizer))
}
// Make sure we don't attempt to recover from problems in subrules.//
func (this *BailErrorStrategy) sync(recognizer IParser) {
func (this *BailErrorStrategy) Sync(recognizer IParser) {
// pass
}

View File

@ -4,7 +4,7 @@ import ()
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
// 3 kinds of errors: prediction errors, failed predicate errors, and
// mismatched input errors. In each case, the parser knows where it is
// misMatched input errors. In each case, the parser knows where it is
// in the input, where it is in the ATN, the rule invocation stack,
// and what kind of problem occurred.
@ -54,10 +54,10 @@ func (t *RecognitionException) InitRecognitionException(message string, recogniz
// occurred. For {@link NoViableAltException} and
// {@link LexerNoViableAltException} exceptions, this is the
// {@link DecisionState} number. For others, it is the state whose outgoing
// edge we couldn't match.
// edge we couldn't Match.
t.offendingState = -1
if t.recognizer != nil {
t.offendingState = t.recognizer.getState()
t.offendingState = t.recognizer.GetState()
}
}
@ -73,7 +73,7 @@ func (this *RecognitionException) getOffendingToken() *Token {
//
// Gets the set of input symbols which could potentially follow the
// previously matched symbol at the time this exception was panicn.
// previously Matched symbol at the time this exception was panicn.
//
// <p>If the set of expected tokens is not known and could not be computed,
// this method returns {@code nil}.</p>
@ -116,7 +116,7 @@ func NewLexerNoViableAltException(lexer *Lexer, input CharStream, startIndex int
func (this *LexerNoViableAltException) toString() string {
var symbol = ""
if this.startIndex >= 0 && this.startIndex < this.input.size() {
symbol = this.input.getTextFromInterval(NewInterval(this.startIndex, this.startIndex))
symbol = this.input.GetTextFromInterval(NewInterval(this.startIndex, this.startIndex))
}
return "LexerNoViableAltException" + symbol
}
@ -138,7 +138,7 @@ type NoViableAltException struct {
func NewNoViableAltException(recognizer IParser, input CharStream, startToken *Token, offendingToken *Token, deadEndConfigs *ATNConfigSet, ctx IParserRuleContext) *NoViableAltException {
if ctx == nil {
ctx = recognizer.getParserRuleContext()
ctx = recognizer.GetParserRuleContext()
}
if offendingToken == nil {
@ -156,7 +156,7 @@ func NewNoViableAltException(recognizer IParser, input CharStream, startToken *T
this := new(NoViableAltException)
this.InitRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.index() that couldn't match
// Which configurations did we try at input.index() that couldn't Match
// input.LT(1)?//
this.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might
@ -169,17 +169,17 @@ func NewNoViableAltException(recognizer IParser, input CharStream, startToken *T
return this
}
type InputMismatchException struct {
type InputMisMatchException struct {
RecognitionException
}
// This signifies any kind of mismatched input exceptions such as
// when the current input does not match the expected token.
// This signifies any kind of misMatched input exceptions such as
// when the current input does not Match the expected token.
//
func NewInputMismatchException(recognizer IParser) *InputMismatchException {
func NewInputMisMatchException(recognizer IParser) *InputMisMatchException {
this := new(InputMismatchException)
this.InitRecognitionException("", recognizer, recognizer.getInputStream(), recognizer.getParserRuleContext())
this := new(InputMisMatchException)
this.InitRecognitionException("", recognizer, recognizer.getInputStream(), recognizer.GetParserRuleContext())
this.offendingToken = recognizer.getCurrentToken()
@ -188,7 +188,7 @@ func NewInputMismatchException(recognizer IParser) *InputMismatchException {
}
// A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like matching a token.
// occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during
// prediction.

View File

@ -23,7 +23,7 @@ func (is *InputStream) reset() {
is.index = 0
}
func (is *InputStream) consume() {
func (is *InputStream) Consume() {
if is.index >= is.size {
// assert is.LA(1) == TokenEOF
panic("cannot consume EOF")
@ -66,7 +66,7 @@ func (is *InputStream) seek(index int) {
is.index = intMin(index, is.size)
}
func (is *InputStream) getText(start int, stop int) string {
func (is *InputStream) GetText(start int, stop int) string {
if stop >= is.size {
stop = is.size - 1
}

View File

@ -1,7 +1,7 @@
package antlr4
type IntStream interface {
consume()
Consume()
LA(int) int
mark() int
release(marker int)

View File

@ -23,7 +23,7 @@ const (
// Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition
// <em>i</em> leads to a semantic predicate before matching a symbol, the
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
// element at index <em>i</em> of the result will be {@code nil}.
//
// @param s the ATN state
@ -154,7 +154,7 @@ func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, loo
}()
calledRuleStack.clear(returnState.getRuleIndex())
la._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
la._LOOK(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
return
@ -172,7 +172,7 @@ func (la *LL1Analyzer) _LOOK(s, stopState IATNState, ctx IPredictionContext, loo
continue
}
newContext := SingletonPredictionContextcreate(ctx, t1.followState.getStateNumber())
newContext := SingletonPredictionContextcreate(ctx, t1.followState.GetStateNumber())
defer func() {
calledRuleStack.remove(t1.getTarget().getRuleIndex())

View File

@ -7,7 +7,7 @@ import (
// A lexer is recognizer that draws input symbols from a character stream.
// lexer grammars result in a subclass of this object. A Lexer object
// uses simplified match() and error recovery mechanisms in the interest
// uses simplified Match() and error recovery mechanisms in the interest
// of speed.
///
@ -23,7 +23,7 @@ type ILexer interface {
}
type Lexer struct {
Recognizer
*Recognizer
Interpreter *LexerATNSimulator
@ -64,8 +64,8 @@ func (l *Lexer) InitLexer(input CharStream) {
// The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to
// create a single token. nextToken will return l object after
// matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be matched or
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
// something nonnil so that the auto token emit mechanism will not
// emit another token.
l._token = nil
@ -139,14 +139,14 @@ func (l *Lexer) getInputStream() CharStream {
}
func (l *Lexer) getSourceName() string {
return l._input.getSourceName()
return l.grammarFileName
}
func (l *Lexer) setChannel(v int) {
l._channel = v
}
func (l *Lexer) getTokenFactory() TokenFactory {
func (l *Lexer) GetTokenFactory() TokenFactory {
return l._factory
}
@ -161,16 +161,16 @@ func (l *Lexer) safeMatch() (ret int) {
if e := recover(); e != nil {
if re, ok := e.(IRecognitionException); ok {
l.notifyListeners(re) // report error
l.recover(re)
l.Recover(re)
ret = LexerSkip // default
}
}
}()
return l.Interpreter.match(l._input, l._mode)
return l.Interpreter.Match(l._input, l._mode)
}
// Return a token from l source i.e., match a token on the char stream.
// Return a token from l source i.e., Match a token on the char stream.
func (l *Lexer) nextToken() *Token {
if l._input == nil {
panic("nextToken requires a non-nil input stream.")
@ -181,7 +181,7 @@ func (l *Lexer) nextToken() *Token {
// previously in finally block
defer func() {
// make sure we release marker after match or
// make sure we release marker after Match or
// unbuffered char stream will keep buffering
l._input.release(tokenStartMarker)
}()
@ -282,7 +282,7 @@ func (l *Lexer) setInputStream(input CharStream) {
// By default does not support multiple emits per nextToken invocation
// for efficiency reasons. Subclass and override l method, nextToken,
// and getToken (to push tokens into a list and pull from that list
// and GetToken (to push tokens into a list and pull from that list
// rather than a single variable as l implementation does).
// /
func (l *Lexer) emitToken(token *Token) {
@ -330,13 +330,13 @@ func (l *Lexer) getCharIndex() int {
return l._input.index()
}
// Return the text matched so far for the current token or any text override.
// Return the text Matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text.
func (l *Lexer) text() string {
if l._text != nil {
return *l._text
} else {
return l.Interpreter.getText(l._input)
return l.Interpreter.GetText(l._input)
}
}
@ -364,7 +364,7 @@ func (l *Lexer) getAllTokens() []*Token {
func (l *Lexer) notifyListeners(e IRecognitionException) {
var start = l._tokenStartCharIndex
var stop = l._input.index()
var text = l._input.getTextFromInterval(NewInterval(start, stop))
var text = l._input.GetTextFromInterval(NewInterval(start, stop))
var msg = "token recognition error at: '" + text + "'"
var listener = l.getErrorListenerDispatch()
listener.syntaxError(l, nil, l._tokenStartLine, l._tokenStartColumn, msg, e)
@ -388,19 +388,19 @@ func (l *Lexer) getCharErrorDisplay(c rune) string {
return "'" + l.getErrorDisplayForChar(c) + "'"
}
// Lexers can normally match any char in it's vocabulary after matching
// Lexers can normally Match any char in it's vocabulary after Matching
// a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
// /
func (l *Lexer) recover(re IRecognitionException) {
func (l *Lexer) Recover(re IRecognitionException) {
if l._input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
// skip a char and try again
l.Interpreter.consume(l._input)
} else {
// TODO: Do we lose character or line position information?
l._input.consume()
l._input.Consume()
}
}
}

View File

@ -59,7 +59,7 @@ type LexerATNSimulator struct {
column int
mode int
prevAccept *SimState
match_calls int
Match_calls int
}
func NewLexerATNSimulator(recog *Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
@ -94,7 +94,7 @@ var LexerATNSimulatordfa_debug = false
var LexerATNSimulatorMIN_DFA_EDGE = 0
var LexerATNSimulatorMAX_DFA_EDGE = 127 // forces unicode to stay in ATN
var LexerATNSimulatormatch_calls = 0
var LexerATNSimulatorMatch_calls = 0
func (this *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
this.column = simulator.column
@ -103,9 +103,9 @@ func (this *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
this.startIndex = simulator.startIndex
}
func (this *LexerATNSimulator) match(input CharStream, mode int) int {
func (this *LexerATNSimulator) Match(input CharStream, mode int) int {
this.match_calls += 1
this.Match_calls += 1
this.mode = mode
var mark = input.mark()
@ -117,7 +117,7 @@ func (this *LexerATNSimulator) match(input CharStream, mode int) int {
this.prevAccept.reset()
var dfa = this.decisionToDFA[mode]
if dfa.s0 == nil {
return this.matchATN(input)
return this.MatchATN(input)
} else {
return this.execATN(input, dfa.s0)
}
@ -131,11 +131,11 @@ func (this *LexerATNSimulator) reset() {
this.mode = LexerDefaultMode
}
func (this *LexerATNSimulator) matchATN(input CharStream) int {
func (this *LexerATNSimulator) MatchATN(input CharStream) int {
var startState = this.atn.modeToStartState[this.mode]
if LexerATNSimulatordebug {
fmt.Println("matchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString())
fmt.Println("MatchATN mode " + strconv.Itoa(this.mode) + " start: " + startState.toString())
}
var old_mode = this.mode
var s0_closure = this.computeStartState(input, startState)
@ -151,7 +151,7 @@ func (this *LexerATNSimulator) matchATN(input CharStream) int {
var predict = this.execATN(input, next)
if LexerATNSimulatordebug {
fmt.Println("DFA after matchATN: " + this.decisionToDFA[old_mode].toLexerString())
fmt.Println("DFA after MatchATN: " + this.decisionToDFA[old_mode].toLexerString())
}
return predict
}
@ -190,10 +190,10 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// A character will take us back to an existing DFA state
// that already has lots of edges out of it. e.g., .* in comments.
// print("Target for:" + str(s) + " and:" + str(t))
var target = this.getExistingTargetState(s, t)
var target = this.getExistingTarGetState(s, t)
// print("Existing:" + str(target))
if target == nil {
target = this.computeTargetState(input, s, t)
target = this.computeTarGetState(input, s, t)
// print("Computed:" + str(target))
}
if target == ATNSimulatorERROR {
@ -227,7 +227,7 @@ func (this *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
// @return The existing target DFA state for the given input symbol
// {@code t}, or {@code nil} if the target state for this edge is not
// already cached
func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
func (this *LexerATNSimulator) getExistingTarGetState(s *DFAState, t int) *DFAState {
if s.edges == nil || t < LexerATNSimulatorMIN_DFA_EDGE || t > LexerATNSimulatorMAX_DFA_EDGE {
return nil
}
@ -252,7 +252,7 @@ func (this *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFASt
// @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, this method
// returns {@link //ERROR}.
func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
func (this *LexerATNSimulator) computeTarGetState(input CharStream, s *DFAState, t int) *DFAState {
var reach = NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
@ -264,7 +264,7 @@ func (this *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState,
// cause a failover from DFA later.
this.addDFAEdge(s, t, ATNSimulatorERROR, nil)
}
// stop when we can't match any more char
// stop when we can't Match any more char
return ATNSimulatorERROR
}
// Add an edge from s to target DFA found/created for reach
@ -300,10 +300,10 @@ func (this *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *
continue
}
if LexerATNSimulatordebug {
fmt.Printf("testing %s at %s\n", this.getTokenName(t), cfg.toString()) // this.recog, true))
fmt.Printf("testing %s at %s\n", this.GetTokenName(t), cfg.toString()) // this.recog, true))
}
for j := 0; j < len(cfg.getState().getTransitions()); j++ {
var trans = cfg.getState().getTransitions()[j] // for each transition
for j := 0; j < len(cfg.GetState().getTransitions()); j++ {
var trans = cfg.GetState().getTransitions()[j] // for each transition
var target = this.getReachableTarget(trans, t)
if target != nil {
var lexerActionExecutor = cfg.(*LexerATNConfig).lexerActionExecutor
@ -337,7 +337,7 @@ func (this *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *Lex
}
func (this *LexerATNSimulator) getReachableTarget(trans ITransition, t int) IATNState {
if trans.matches(t, 0, 0xFFFE) {
if trans.Matches(t, 0, 0xFFFE) {
return trans.getTarget()
} else {
return nil
@ -391,7 +391,7 @@ func (this *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig,
if config.context != nil && !config.context.isEmpty() {
for i := 0; i < config.context.length(); i++ {
if config.context.getReturnState(i) != PredictionContextEMPTY_RETURN_STATE {
var newContext = config.context.getParent(i) // "pop" return state
var newContext = config.context.GetParent(i) // "pop" return state
var returnState = this.atn.states[config.context.getReturnState(i)]
cfg := NewLexerATNConfig2(config, returnState, newContext)
currentAltReachedAcceptState = this.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
@ -426,7 +426,7 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
if trans.getSerializationType() == TransitionRULE {
rt := trans.(*RuleTransition)
var newContext = SingletonPredictionContextcreate(config.context, rt.followState.getStateNumber())
var newContext = SingletonPredictionContextcreate(config.context, rt.followState.GetStateNumber())
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
} else if trans.getSerializationType() == TransitionPRECEDENCE {
@ -485,7 +485,7 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET {
if treatEofAsEpsilon {
if trans.matches(TokenEOF, 0, 0xFFFF) {
if trans.Matches(TokenEOF, 0, 0xFFFF) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
}
@ -496,9 +496,9 @@ func (this *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerA
// Evaluate a predicate specified in the lexer.
//
// <p>If {@code speculative} is {@code true}, this method was called before
// {@link //consume} for the matched character. This method should call
// {@link //consume} for the Matched character. This method should call
// {@link //consume} before evaluating the predicate to ensure position
// sensitive values, including {@link Lexer//getText}, {@link Lexer//getLine},
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//getLine},
// and {@link Lexer//getcolumn}, properly reflect the current
// lexer state. This method should restore {@code input} and the simulator
// to the original state before returning (i.e. undo the actions made by the
@ -519,7 +519,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, pr
return true
}
if !speculative {
return this.recog.sempred(nil, ruleIndex, predIndex)
return this.recog.Sempred(nil, ruleIndex, predIndex)
}
var savedcolumn = this.column
var savedLine = this.line
@ -534,7 +534,7 @@ func (this *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, pr
}()
this.consume(input)
return this.recog.sempred(nil, ruleIndex, predIndex)
return this.recog.Sempred(nil, ruleIndex, predIndex)
}
func (this *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
@ -550,7 +550,7 @@ func (this *LexerATNSimulator) addDFAEdge(from_ *DFAState, tk int, to *DFAState,
// marker indicating dynamic predicate evaluation makes this edge
// dependent on the specific input sequence, so the static edge in the
// DFA should be omitted. The target DFAState is still created since
// execATN has the ability to resynchronize with the DFA state cache
// execATN has the ability to reSynchronize with the DFA state cache
// following the predicate evaluation step.
//
// TJP notes: next time through the DFA, we see a pred again and eval.
@ -595,7 +595,7 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
for i := 0; i < len(configs.configs); i++ {
var cfg = configs.configs[i]
_, ok := cfg.getState().(*RuleStopState)
_, ok := cfg.GetState().(*RuleStopState)
if ok {
firstConfigWithRuleStopState = cfg
@ -605,19 +605,19 @@ func (this *LexerATNSimulator) addDFAState(configs *ATNConfigSet) *DFAState {
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.getState().getRuleIndex()]
proposed.prediction = this.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().getRuleIndex()]
}
var hash = proposed.hashString()
var dfa = this.decisionToDFA[this.mode]
var existing = dfa.getStates()[hash]
var existing = dfa.GetStates()[hash]
if existing != nil {
return existing
}
var newState = proposed
newState.stateNumber = len(dfa.getStates())
newState.stateNumber = len(dfa.GetStates())
configs.setReadonly(true)
newState.configs = configs
dfa.getStates()[hash] = newState
dfa.GetStates()[hash] = newState
return newState
}
@ -625,10 +625,10 @@ func (this *LexerATNSimulator) getDFA(mode int) *DFA {
return this.decisionToDFA[mode]
}
// Get the text matched so far for the current token.
func (this *LexerATNSimulator) getText(input CharStream) string {
// Get the text Matched so far for the current token.
func (this *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.getTextFromInterval(NewInterval(this.startIndex, input.index()-1))
return input.GetTextFromInterval(NewInterval(this.startIndex, input.index()-1))
}
func (this *LexerATNSimulator) consume(input CharStream) {
@ -639,10 +639,10 @@ func (this *LexerATNSimulator) consume(input CharStream) {
} else {
this.column += 1
}
input.consume()
input.Consume()
}
func (this *LexerATNSimulator) getTokenName(tt int) string {
func (this *LexerATNSimulator) GetTokenName(tt int) string {
if tt == -1 {
return "EOF"
} else {

View File

@ -289,7 +289,7 @@ func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
// appropriate rule and action indexes.</p>
func (this *LexerCustomAction) execute(lexer ILexer) {
lexer.action(nil, this.ruleIndex, this.actionIndex)
lexer.Action(nil, this.ruleIndex, this.actionIndex)
}
func (this *LexerCustomAction) hashString() string {

View File

@ -1,7 +1,7 @@
package antlr4
// Represents an executor for a sequence of lexer actions which traversed during
// the matching operation of a lexer rule (token).
// the Matching operation of a lexer rule (token).
//
// <p>The executor tracks position information for position-dependent lexer actions
// efficiently, ensuring that actions appearing only at the end of the rule do
@ -40,7 +40,7 @@ func NewLexerActionExecutor(lexerActions []ILexerAction) *LexerActionExecutor {
// {@code lexerAction}.
//
// @param lexerActionExecutor The executor for actions already traversed by
// the lexer while matching a token within a particular
// the lexer while Matching a token within a particular
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
// though it were an empty executor.
// @param lexerAction The lexer action to execute after the actions
@ -67,10 +67,10 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
// {@link IntStream//seek} on the input {@link CharStream} to set the input
// position to the <em>end</em> of the current token. This behavior provides
// for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule matches a variable number of
// of a lexer rule, even when the lexer rule Matches a variable number of
// characters.</p>
//
// <p>Prior to traversing a match transition in the ATN, the current offset
// <p>Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing
// the offsets relative to the token start index, the DFA representation of

View File

@ -32,20 +32,23 @@ func (this *TraceListener) exitEveryRule(ctx IParserRuleContext) {
type IParser interface {
IRecognizer
getInterpreter() *ParserATNSimulator
GetInterpreter() *ParserATNSimulator
GetErrorHandler() IErrorStrategy
GetTokenStream() TokenStream
GetTokenFactory() TokenFactory
GetParserRuleContext() IParserRuleContext
getInputStream() CharStream
consume() *Token
Consume() *Token
getCurrentToken() *Token
getTokenStream() TokenStream
getTokenFactory() TokenFactory
getLiteralNames() []string
getSymbolicNames() []string
getExpectedTokens() *IntervalSet
getParserRuleContext() IParserRuleContext
notifyErrorListeners(msg string, offendingToken *Token, err IRecognitionException)
isExpectedToken(symbol int) bool
getPrecedence() int
getRuleInvocationStack(IParserRuleContext) []string
}
type Parser struct {
@ -133,30 +136,34 @@ func (p *Parser) reset() {
}
}
func (p *Parser) GetErrorHandler() IErrorStrategy {
return p._errHandler
}
// Match current input symbol against {@code ttype}. If the symbol type
// matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are
// called to complete the match process.
// Matches, {@link ANTLRErrorStrategy//reportMatch} and {@link //consume} are
// called to complete the Match process.
//
// <p>If the symbol type does not match,
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @param ttype the token type to match
// @return the matched symbol
// @panics RecognitionException if the current input symbol did not match
// @param ttype the token type to Match
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// {@code ttype} and the error strategy could not recover from the
// mismatched symbol
// misMatched symbol
func (p *Parser) match(ttype int) *Token {
func (p *Parser) Match(ttype int) *Token {
var t = p.getCurrentToken()
if t.tokenType == ttype {
p._errHandler.reportMatch(p)
p.consume()
p.Consume()
} else {
t = p._errHandler.recoverInline(p)
t = p._errHandler.RecoverInline(p)
if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token
// insertion
@ -167,29 +174,29 @@ func (p *Parser) match(ttype int) *Token {
return t
}
// Match current input symbol as a wildcard. If the symbol type matches
// Match current input symbol as a wildcard. If the symbol type Matches
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//reportMatch}
// and {@link //consume} are called to complete the match process.
// and {@link //consume} are called to complete the Match process.
//
// <p>If the symbol type does not match,
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @return the matched symbol
// @panics RecognitionException if the current input symbol did not match
// a wildcard and the error strategy could not recover from the mismatched
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// a wildcard and the error strategy could not recover from the misMatched
// symbol
func (p *Parser) matchWildcard() *Token {
func (p *Parser) MatchWildcard() *Token {
var t = p.getCurrentToken()
if t.tokenType > 0 {
p._errHandler.reportMatch(p)
p.consume()
p.Consume()
} else {
t = p._errHandler.recoverInline(p)
t = p._errHandler.RecoverInline(p)
if p.buildParseTrees && t.tokenIndex == -1 {
// we must have conjured up a Newtoken during single token
// insertion
@ -200,7 +207,7 @@ func (p *Parser) matchWildcard() *Token {
return t
}
func (p *Parser) getParserRuleContext() IParserRuleContext {
func (p *Parser) GetParserRuleContext() IParserRuleContext {
return p._ctx
}
@ -280,7 +287,7 @@ func (p *Parser) triggerEnterRuleEvent() {
var ctx = p._ctx
for _, listener := range p._parseListeners {
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
ctx.EnterRule(listener)
}
}
}
@ -312,7 +319,7 @@ func (this *Parser) getSymbolicNames() []string {
return this.symbolicNames
}
func (this *Parser) getInterpreter() *ParserATNSimulator {
func (this *Parser) GetInterpreter() *ParserATNSimulator {
return this.Interpreter
}
@ -320,13 +327,13 @@ func (this *Parser) getATN() *ATN {
return this.Interpreter.atn
}
func (p *Parser) getTokenFactory() TokenFactory {
return p._input.getTokenSource().getTokenFactory()
func (p *Parser) GetTokenFactory() TokenFactory {
return p._input.GetTokenSource().GetTokenFactory()
}
// Tell our token source and error strategy about a Newway to create tokens.//
func (p *Parser) setTokenFactory(factory TokenFactory) {
p._input.getTokenSource().setTokenFactory(factory)
p._input.GetTokenSource().setTokenFactory(factory)
}
// The ATN with bypass alternatives is expensive to create so we create it
@ -361,7 +368,7 @@ func (p *Parser) getATNWithBypassAlts() {
// ParseTree t = parser.expr()
// ParseTreePattern p = parser.compileParseTreePattern("&ltID&gt+0",
// MyParser.RULE_expr)
// ParseTreeMatch m = p.match(t)
// ParseTreeMatch m = p.Match(t)
// String id = m.get("ID")
// </pre>
@ -370,8 +377,8 @@ func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer
panic("NewParseTreePatternMatcher not implemented!")
//
// if (lexer == nil) {
// if (p.getTokenStream() != nil) {
// var tokenSource = p.getTokenStream().getTokenSource()
// if (p.GetTokenStream() != nil) {
// var tokenSource = p.GetTokenStream().GetTokenSource()
// if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource
// }
@ -386,14 +393,14 @@ func (p *Parser) compileParseTreePattern(pattern, patternRuleIndex, lexer ILexer
}
func (p *Parser) getInputStream() CharStream {
return p.getTokenStream().(CharStream)
return p.GetTokenStream().(CharStream)
}
func (p *Parser) setInputStream(input TokenStream) {
p.setTokenStream(input)
}
func (p *Parser) getTokenStream() TokenStream {
func (p *Parser) GetTokenStream() TokenStream {
return p._input
}
@ -422,10 +429,10 @@ func (p *Parser) notifyErrorListeners(msg string, offendingToken *Token, err IRe
listener.syntaxError(p, offendingToken, line, column, msg, err)
}
func (p *Parser) consume() *Token {
func (p *Parser) Consume() *Token {
var o = p.getCurrentToken()
if o.tokenType != TokenEOF {
p.getInputStream().consume()
p.getInputStream().Consume()
}
var hasListener = p._parseListeners != nil && len(p._parseListeners) > 0
if p.buildParseTrees || hasListener {
@ -453,12 +460,12 @@ func (p *Parser) consume() *Token {
func (p *Parser) addContextToParseTree() {
// add current context to parent if we have a parent
if p._ctx.getParent() != nil {
p._ctx.getParent().setChildren(append(p._ctx.getParent().getChildren(), p._ctx))
if p._ctx.GetParent() != nil {
p._ctx.GetParent().setChildren(append(p._ctx.GetParent().getChildren(), p._ctx))
}
}
func (p *Parser) enterRule(localctx IParserRuleContext, state, ruleIndex int) {
func (p *Parser) EnterRule(localctx IParserRuleContext, state, ruleIndex int) {
p.state = state
p._ctx = localctx
p._ctx.setStart(p._input.LT(1))
@ -477,16 +484,16 @@ func (p *Parser) exitRule() {
p.triggerExitRuleEvent()
}
p.state = p._ctx.getInvokingState()
p._ctx = p._ctx.getParent().(IParserRuleContext)
p._ctx = p._ctx.GetParent().(IParserRuleContext)
}
func (p *Parser) enterOuterAlt(localctx IParserRuleContext, altNum int) {
func (p *Parser) EnterOuterAlt(localctx IParserRuleContext, altNum int) {
// if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if p.buildParseTrees && p._ctx != localctx {
if p._ctx.getParent() != nil {
p._ctx.getParent().(IParserRuleContext).removeLastChild()
p._ctx.getParent().(IParserRuleContext).addChild(localctx)
if p._ctx.GetParent() != nil {
p._ctx.GetParent().(IParserRuleContext).removeLastChild()
p._ctx.GetParent().(IParserRuleContext).addChild(localctx)
}
}
p._ctx = localctx
@ -505,7 +512,7 @@ func (p *Parser) getPrecedence() int {
}
}
func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleIndex, precedence int) {
func (p *Parser) EnterRecursionRule(localctx IParserRuleContext, state, ruleIndex, precedence int) {
p.state = state
p._precedenceStack.Push(precedence)
p._ctx = localctx
@ -517,7 +524,7 @@ func (p *Parser) enterRecursionRule(localctx IParserRuleContext, state, ruleInde
}
//
// Like {@link //enterRule} but for recursive rules.
// Like {@link //EnterRule} but for recursive rules.
func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, ruleIndex int) {
var previous = p._ctx
@ -536,7 +543,7 @@ func (p *Parser) pushNewRecursionContext(localctx IParserRuleContext, state, rul
}
}
func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
func (p *Parser) UnrollRecursionContexts(parentCtx IParserRuleContext) {
p._precedenceStack.Pop()
p._ctx.setStop(p._input.LT(-1))
var retCtx = p._ctx // save current ctx (return value)
@ -544,7 +551,7 @@ func (p *Parser) unrollRecursionContexts(parentCtx IParserRuleContext) {
if p._parseListeners != nil {
for p._ctx != parentCtx {
p.triggerExitRuleEvent()
p._ctx = p._ctx.getParent().(IParserRuleContext)
p._ctx = p._ctx.GetParent().(IParserRuleContext)
}
} else {
p._ctx = parentCtx
@ -563,12 +570,12 @@ func (p *Parser) getInvokingContext(ruleIndex int) IParserRuleContext {
if ctx.getRuleIndex() == ruleIndex {
return ctx
}
ctx = ctx.getParent().(IParserRuleContext)
ctx = ctx.GetParent().(IParserRuleContext)
}
return nil
}
func (p *Parser) precpred(localctx IRuleContext, precedence int) bool {
func (p *Parser) Precpred(localctx IRuleContext, precedence int) bool {
return precedence >= p._precedenceStack[len(p._precedenceStack)-1]
}
@ -609,7 +616,7 @@ func (p *Parser) isExpectedToken(symbol int) bool {
if following.contains(symbol) {
return true
}
ctx = ctx.getParent().(IParserRuleContext)
ctx = ctx.GetParent().(IParserRuleContext)
}
if following.contains(TokenEpsilon) && symbol == TokenEOF {
return true
@ -619,7 +626,7 @@ func (p *Parser) isExpectedToken(symbol int) bool {
}
// Computes the set of input symbols which could follow the current parser
// state and context, as given by {@link //getState} and {@link //getContext},
// state and context, as given by {@link //GetState} and {@link //getContext},
// respectively.
//
// @see ATN//getExpectedTokens(int, RuleContext)
@ -664,7 +671,7 @@ func (this *Parser) getRuleInvocationStack(p IParserRuleContext) []string {
} else {
stack = append(stack, this.getRuleNames()[ruleIndex])
}
p = p.getParent().(IParserRuleContext)
p = p.GetParent().(IParserRuleContext)
}
return stack
}
@ -705,7 +712,7 @@ func (p *Parser) getSourceName() string {
}
// During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token matches. p.is for quick and dirty debugging.
// events as well as token Matches. p.is for quick and dirty debugging.
//
func (p *Parser) setTrace(trace *TraceListener) {
if trace == nil {

View File

@ -43,7 +43,7 @@ func (this *ParserATNSimulator) InitParserATNSimulator(parser IParser, atn *ATN,
this._dfa = nil
// Each prediction operation uses a cache for merge of prediction contexts.
// Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
// isn't synchronized but we're ok since two threads shouldn't reuse same
// isn't Synchronized but we're ok since two threads shouldn't reuse same
// parser/atnsim object because it can only handle one input at a time.
// This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
// the merge if we ever see a and b again. Note that (b,a)&rarrc should
@ -61,10 +61,10 @@ var ParserATNSimulatorprototyperetry_debug = false
func (this *ParserATNSimulator) reset() {
}
func (this *ParserATNSimulator) adaptivePredict(input TokenStream, decision int, outerContext IParserRuleContext) int {
func (this *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext IParserRuleContext) int {
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototypedebug_list_atn_decisions {
fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + this.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).line) + ":" +
strconv.Itoa(input.LT(1).column))
@ -190,9 +190,9 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
}
var t = input.LA(1)
for true { // for more work
var D = this.getExistingTargetState(previousD, t)
var D = this.getExistingTarGetState(previousD, t)
if D == nil {
D = this.computeTargetState(dfa, previousD, t)
D = this.computeTarGetState(dfa, previousD, t)
}
if D == ATNSimulatorERROR {
// if any configs in previous dipped into outer context, that
@ -266,7 +266,7 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
previousD = D
if t != TokenEOF {
input.consume()
input.Consume()
t = input.LA(1)
}
}
@ -284,7 +284,7 @@ func (this *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStrea
// {@code t}, or {@code nil} if the target state for this edge is not
// already cached
func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
func (this *ParserATNSimulator) getExistingTarGetState(previousD *DFAState, t int) *DFAState {
var edges = previousD.edges
if edges == nil {
return nil
@ -304,7 +304,7 @@ func (this *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t in
// {@code t}. If {@code t} does not lead to a valid DFA state, this method
// returns {@link //ERROR}.
func (this *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
func (this *ParserATNSimulator) computeTarGetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
var reach = this.computeReachSet(previousD.configs, t, false)
if reach == nil {
@ -435,7 +435,7 @@ func (this *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0
}
previous = reach
if t != TokenEOF {
input.consume()
input.Consume()
t = input.LA(1)
}
}
@ -494,8 +494,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// advantage of having a smaller intermediate set when calling closure.
//
// For full-context reach operations, separate handling is required to
// ensure that the alternative matching the longest overall sequence is
// chosen when multiple such configurations can match the input.
// ensure that the alternative Matching the longest overall sequence is
// chosen when multiple such configurations can Match the input.
var skippedStopStates []*ATNConfig = nil
@ -504,10 +504,10 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
var c = closure.configs[i]
if ParserATNSimulatorprototypedebug {
fmt.Println("testing " + this.getTokenName(t) + " at " + c.toString())
fmt.Println("testing " + this.GetTokenName(t) + " at " + c.toString())
}
_, ok := c.getState().(*RuleStopState)
_, ok := c.GetState().(*RuleStopState)
if ok {
if fullCtx || t == TokenEOF {
@ -522,8 +522,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
continue
}
for j := 0; j < len(c.getState().getTransitions()); j++ {
var trans = c.getState().getTransitions()[j]
for j := 0; j < len(c.GetState().getTransitions()); j++ {
var trans = c.GetState().getTransitions()[j]
var target = this.getReachableTarget(trans, t)
if target != nil {
var cfg = NewATNConfig4(c, target)
@ -539,7 +539,7 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// This block optimizes the reach operation for intermediate sets which
// trivially indicate a termination state for the overall
// adaptivePredict operation.
// AdaptivePredict operation.
//
// The conditions assume that intermediate
// contains all configurations relevant to the reach set, but this
@ -594,8 +594,8 @@ func (this *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fu
// configuration. For full-context reach operations, these
// configurations reached the end of the start rule, in which case we
// only add them back to reach if no configuration during the current
// closure operation reached such a state. This ensures adaptivePredict
// chooses an alternative matching the longest overall sequence when
// closure operation reached such a state. This ensures AdaptivePredict
// chooses an alternative Matching the longest overall sequence when
// multiple alternatives are viable.
//
if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
@ -638,16 +638,16 @@ func (this *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNC
for i := 0; i < len(configs.configs); i++ {
var config = configs.configs[i]
_, ok := config.getState().(*RuleStopState)
_, ok := config.GetState().(*RuleStopState)
if ok {
result.add(config, this.mergeCache)
continue
}
if lookToEndOfRule && config.getState().getEpsilonOnlyTransitions() {
var nextTokens = this.atn.nextTokens(config.getState(), nil)
if lookToEndOfRule && config.GetState().getEpsilonOnlyTransitions() {
var nextTokens = this.atn.nextTokens(config.GetState(), nil)
if nextTokens.contains(TokenEpsilon) {
var endOfRuleState = this.atn.ruleToStopState[config.getState().getRuleIndex()]
var endOfRuleState = this.atn.ruleToStopState[config.GetState().getRuleIndex()]
result.add(NewATNConfig4(config, endOfRuleState), this.mergeCache)
}
}
@ -740,7 +740,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
// the configuration was eliminated
continue
}
statesFromAlt1[config.getState().getStateNumber()] = config.getContext()
statesFromAlt1[config.GetState().GetStateNumber()] = config.getContext()
if updatedContext != config.getSemanticContext() {
configSet.add(NewATNConfig2(config, updatedContext), this.mergeCache)
} else {
@ -757,7 +757,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
// filter the prediction context for alternatives predicting alt>1
// (basically a graph subtraction algorithm).
if !config.getPrecedenceFilterSuppressed() {
var context = statesFromAlt1[config.getState().getStateNumber()]
var context = statesFromAlt1[config.GetState().GetStateNumber()]
if context != nil && context.equals(config.getContext()) {
// eliminated
continue
@ -769,7 +769,7 @@ func (this *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *AT
}
func (this *ParserATNSimulator) getReachableTarget(trans ITransition, ttype int) IATNState {
if trans.matches(ttype, 0, this.atn.maxTokenType) {
if trans.Matches(ttype, 0, this.atn.maxTokenType) {
return trans.getTarget()
} else {
return nil
@ -852,7 +852,7 @@ func (this *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altTo
// the parser. Specifically, this could occur if the <em>only</em> configuration
// capable of successfully parsing to the end of the decision rule is
// blocked by a semantic predicate. By choosing this alternative within
// {@link //adaptivePredict} instead of panicing a
// {@link //AdaptivePredict} instead of panicing a
// {@link NoViableAltException}, the resulting
// {@link FailedPredicateException} in the parser will identify the specific
// predicate which is preventing the parser from successfully parsing the
@ -865,9 +865,9 @@ func (this *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altTo
// @param outerContext The is the \gamma_0 initial parser context from the paper
// or the parser stack at the instant before prediction commences.
//
// @return The value to return from {@link //adaptivePredict}, or
// @return The value to return from {@link //AdaptivePredict}, or
// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
// identified and {@link //adaptivePredict} should report an error instead.
// identified and {@link //AdaptivePredict} should report an error instead.
//
func (this *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext IParserRuleContext) int {
var cfgs = this.splitAccordingToSemanticValidity(configs, outerContext)
@ -892,7 +892,7 @@ func (this *ParserATNSimulator) getAltThatFinishedDecisionEntryRule(configs *ATN
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
_, ok := c.getState().(*RuleStopState)
_, ok := c.GetState().(*RuleStopState)
if c.getReachesIntoOuterContext() > 0 || (ok && c.getContext().hasEmptyPath()) {
alts.addOne(c.getAlt())
@ -995,7 +995,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
}
}
_, ok := config.getState().(*RuleStopState)
_, ok := config.GetState().(*RuleStopState)
if ok {
// We hit rule end. If we have context info, use it
// run thru all possible stack tops in ctx
@ -1003,19 +1003,19 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
for i := 0; i < config.getContext().length(); i++ {
if config.getContext().getReturnState(i) == PredictionContextEMPTY_RETURN_STATE {
if fullCtx {
configs.add(NewATNConfig1(config, config.getState(), PredictionContextEMPTY), this.mergeCache)
configs.add(NewATNConfig1(config, config.GetState(), PredictionContextEMPTY), this.mergeCache)
continue
} else {
// we have no context info, just chase follow links (if greedy)
if ParserATNSimulatorprototypedebug {
fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex()))
fmt.Println("FALLING off rule " + this.getRuleName(config.GetState().getRuleIndex()))
}
this.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
}
continue
}
returnState := this.atn.states[config.getContext().getReturnState(i)]
newContext := config.getContext().getParent(i) // "pop" return state
newContext := config.getContext().GetParent(i) // "pop" return state
c := NewATNConfig5(returnState, config.getAlt(), newContext, config.getSemanticContext())
// While we have context to pop back from, we may have
@ -1032,7 +1032,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
} else {
// else if we have no context info, just chase follow links (if greedy)
if ParserATNSimulatorprototypedebug {
fmt.Println("FALLING off rule " + this.getRuleName(config.getState().getRuleIndex()))
fmt.Println("FALLING off rule " + this.getRuleName(config.GetState().getRuleIndex()))
}
}
}
@ -1041,7 +1041,7 @@ func (this *ParserATNSimulator) closureCheckingStopState(config IATNConfig, conf
// Do the actual work of walking epsilon edges//
func (this *ParserATNSimulator) closure_(config IATNConfig, configs *ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEofAsEpsilon bool) {
var p = config.getState()
var p = config.GetState()
// optimization
if !p.getEpsilonOnlyTransitions() {
configs.add(config, this.mergeCache)
@ -1120,7 +1120,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF
// transition is traversed
if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) {
if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget())
}
}
@ -1129,7 +1129,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF
// transition is traversed
if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) {
if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget())
}
}
@ -1138,7 +1138,7 @@ func (this *ParserATNSimulator) getEpsilonTarget(config IATNConfig, t ITransitio
// EOF transitions act like epsilon transitions after the first EOF
// transition is traversed
if treatEofAsEpsilon {
if t.matches(TokenEOF, 0, 1) {
if t.Matches(TokenEOF, 0, 1) {
return NewATNConfig4(config, t.getTarget())
}
}
@ -1233,7 +1233,7 @@ func (this *ParserATNSimulator) ruleTransition(config IATNConfig, t *RuleTransit
fmt.Println("CALL rule " + this.getRuleName(t.getTarget().getRuleIndex()) + ", ctx=" + config.getContext().toString())
}
var returnState = t.followState
var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.getStateNumber())
var newContext = SingletonPredictionContextcreate(config.getContext(), returnState.GetStateNumber())
return NewATNConfig1(config, t.getTarget(), newContext)
}
@ -1266,7 +1266,7 @@ func (this *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSe
//
// a : A | A | A B
//
// After matching input A, we reach the stop state for rule A, state 1.
// After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not
@ -1289,14 +1289,14 @@ func (this *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfig
return conflictingAlts
}
func (this *ParserATNSimulator) getTokenName(t int) string {
func (this *ParserATNSimulator) GetTokenName(t int) string {
if t == TokenEOF {
return "EOF"
}
if this.parser != nil && this.parser.getLiteralNames() != nil {
if t >= len(this.parser.getLiteralNames()) {
fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(this.parser.getLiteralNames(), ","))
// fmt.Println(this.parser.getInputStream().getTokens())
// fmt.Println(this.parser.getInputStream().GetTokens())
} else {
return this.parser.getLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
}
@ -1305,10 +1305,10 @@ func (this *ParserATNSimulator) getTokenName(t int) string {
}
func (this *ParserATNSimulator) getLookaheadName(input TokenStream) string {
return this.getTokenName(input.LA(1))
return this.GetTokenName(input.LA(1))
}
// Used for debugging in adaptivePredict around execATN but I cut
// Used for debugging in AdaptivePredict around execATN but I cut
// it out for clarity now that alg. works well. We can leave this
// "dead" code for a bit.
//
@ -1326,7 +1326,7 @@ func (this *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
// if (len(c.state.getTransitions())>0) {
// var t = c.state.getTransitions()[0]
// if t2, ok := t.(*AtomTransition); ok {
// trans = "Atom "+ this.getTokenName(t2.label)
// trans = "Atom "+ this.GetTokenName(t2.label)
// } else if t3, ok := t.(SetTransition); ok {
// _, ok := t.(*NotSetTransition)
//
@ -1381,7 +1381,7 @@ func (this *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
//
func (this *ParserATNSimulator) addDFAEdge(dfa *DFA, from_ *DFAState, t int, to *DFAState) *DFAState {
if ParserATNSimulatorprototypedebug {
fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + this.getTokenName(t))
fmt.Println("EDGE " + from_.toString() + " -> " + to.toString() + " upon " + this.GetTokenName(t))
}
if to == nil {
return nil
@ -1426,16 +1426,16 @@ func (this *ParserATNSimulator) addDFAState(dfa *DFA, D *DFAState) *DFAState {
return D
}
var hash = D.hashString()
var existing, ok = dfa.getStates()[hash]
var existing, ok = dfa.GetStates()[hash]
if ok {
return existing
}
D.stateNumber = len(dfa.getStates())
D.stateNumber = len(dfa.GetStates())
if !D.configs.readOnly {
D.configs.optimizeConfigs(this.ATNSimulator)
D.configs.setReadonly(true)
}
dfa.getStates()[hash] = D
dfa.GetStates()[hash] = D
if ParserATNSimulatorprototypedebug {
fmt.Println("adding NewDFA state: " + D.toString())
}
@ -1446,7 +1446,7 @@ func (this *ParserATNSimulator) reportAttemptingFullContext(dfa *DFA, conflictin
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval))
", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
}
if this.parser != nil {
this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
@ -1457,7 +1457,7 @@ func (this *ParserATNSimulator) reportContextSensitivity(dfa *DFA, prediction in
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval))
", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
}
if this.parser != nil {
this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser, dfa, startIndex, stopIndex, prediction, configs)
@ -1470,7 +1470,7 @@ func (this *ParserATNSimulator) reportAmbiguity(dfa *DFA, D *DFAState, startInde
if ParserATNSimulatorprototypedebug || ParserATNSimulatorprototyperetry_debug {
var interval = NewInterval(startIndex, stopIndex+1)
fmt.Println("reportAmbiguity " + ambigAlts.toString() + ":" + configs.toString() +
", input=" + this.parser.getTokenStream().getTextFromInterval(interval))
", input=" + this.parser.GetTokenStream().GetTextFromInterval(interval))
}
if this.parser != nil {
this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)

View File

@ -7,10 +7,10 @@ import (
type IParserRuleContext interface {
IRuleContext
setException(IRecognitionException)
SetException(IRecognitionException)
addTokenNode(token *Token) *TerminalNodeImpl
addErrorNode(badToken *Token) *ErrorNodeImpl
enterRule(listener ParseTreeListener)
EnterRule(listener ParseTreeListener)
exitRule(listener ParseTreeListener)
setStart(*Token)
@ -61,11 +61,11 @@ func (prc *ParserRuleContext) InitParserRuleContext(parent IParserRuleContext, i
}
func (prc *ParserRuleContext) setException(e IRecognitionException) {
func (prc *ParserRuleContext) SetException(e IRecognitionException) {
prc.exception = e
}
func (prc *ParserRuleContext) getParent() Tree {
func (prc *ParserRuleContext) GetParent() Tree {
return prc.parentCtx
}
@ -90,7 +90,7 @@ func (prc *ParserRuleContext) copyFrom(ctx *ParserRuleContext) {
}
// Double dispatch methods for listeners
func (prc *ParserRuleContext) enterRule(listener ParseTreeListener) {
func (prc *ParserRuleContext) EnterRule(listener ParseTreeListener) {
}
func (prc *ParserRuleContext) exitRule(listener ParseTreeListener) {
@ -113,7 +113,7 @@ func (prc *ParserRuleContext) addChild(child IRuleContext) IRuleContext {
return child
}
// * Used by enterOuterAlt to toss out a RuleContext previously added as
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
// we entered a rule. If we have // label, we will need to remove
// generic ruleContext object.
// /
@ -181,7 +181,7 @@ func (prc *ParserRuleContext) getStop() *Token {
return prc.stop
}
func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode {
func (prc *ParserRuleContext) GetToken(ttype int, i int) TerminalNode {
for j := 0; j < len(prc.children); j++ {
var child = prc.children[j]
@ -198,7 +198,7 @@ func (prc *ParserRuleContext) getToken(ttype int, i int) TerminalNode {
return nil
}
func (prc *ParserRuleContext) getTokens(ttype int) []TerminalNode {
func (prc *ParserRuleContext) GetTokens(ttype int) []TerminalNode {
if prc.children == nil {
return make([]TerminalNode, 0)
} else {
@ -215,13 +215,13 @@ func (prc *ParserRuleContext) getTokens(ttype int) []TerminalNode {
}
}
func (prc *ParserRuleContext) getTypedRuleContext(ctxType reflect.Type, i int) *interface{} {
panic("getTypedRuleContexts not implemented")
func (prc *ParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) *interface{} {
panic("GetTypedRuleContexts not implemented")
// return prc.getChild(i, ctxType)
}
func (prc *ParserRuleContext) getTypedRuleContexts(ctxType reflect.Type) []*interface{} {
panic("getTypedRuleContexts not implemented")
func (prc *ParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []*interface{} {
panic("GetTypedRuleContexts not implemented")
// if (prc.children== nil) {
// return []
// } else {

View File

@ -7,7 +7,7 @@ import (
type IPredictionContext interface {
hashString() string
getParent(int) IPredictionContext
GetParent(int) IPredictionContext
getReturnState(int) int
equals(IPredictionContext) bool
length() int
@ -44,7 +44,7 @@ var PredictionContextglobalNodeCount = 1
var PredictionContextid = PredictionContextglobalNodeCount
// Stores the computed hash code of this {@link PredictionContext}. The hash
// code is computed in parts to match the following reference algorithm.
// code is computed in parts to Match the following reference algorithm.
//
// <pre>
// private int referenceHashCode() {
@ -52,8 +52,8 @@ var PredictionContextid = PredictionContextglobalNodeCount
// //INITIAL_HASH})
//
// for (int i = 0 i &lt {@link //size()} i++) {
// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //getParent
// getParent}(i))
// hash = {@link MurmurHash//update MurmurHash.update}(hash, {@link //GetParent
// GetParent}(i))
// }
//
// for (int i = 0 i &lt {@link //size()} i++) {
@ -92,7 +92,7 @@ func (this *PredictionContext) toString() string {
panic("Not implemented")
}
func (this *PredictionContext) getParent(index int) IPredictionContext {
func (this *PredictionContext) GetParent(index int) IPredictionContext {
panic("Not implemented")
}
@ -185,7 +185,7 @@ func (this *SingletonPredictionContext) length() int {
return 1
}
func (this *SingletonPredictionContext) getParent(index int) IPredictionContext {
func (this *SingletonPredictionContext) GetParent(index int) IPredictionContext {
return this.parentCtx
}
@ -257,7 +257,7 @@ func (this *EmptyPredictionContext) isEmpty() bool {
return true
}
func (this *EmptyPredictionContext) getParent(index int) IPredictionContext {
func (this *EmptyPredictionContext) GetParent(index int) IPredictionContext {
return nil
}
@ -310,7 +310,7 @@ func (this *ArrayPredictionContext) length() int {
return len(this.returnStates)
}
func (this *ArrayPredictionContext) getParent(index int) IPredictionContext {
func (this *ArrayPredictionContext) GetParent(index int) IPredictionContext {
return this.parents[index]
}
@ -364,15 +364,15 @@ func predictionContextFromRuleContext(a *ATN, outerContext IRuleContext) IPredic
}
// if we are in RuleContext of start rule, s, then PredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
if outerContext.getParent() == nil || outerContext == RuleContextEMPTY {
if outerContext.GetParent() == nil || outerContext == RuleContextEMPTY {
return PredictionContextEMPTY
}
// If we have a parent, convert it to a PredictionContext graph
var parent = predictionContextFromRuleContext(a, outerContext.getParent().(IRuleContext))
var parent = predictionContextFromRuleContext(a, outerContext.GetParent().(IRuleContext))
var state = a.states[outerContext.getInvokingState()]
var transition = state.getTransitions()[0]
return SingletonPredictionContextcreate(parent, transition.(*RuleTransition).followState.getStateNumber())
return SingletonPredictionContextcreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
}
func calculateListsHashString(parents []PredictionContext, returnStates []int) string {
@ -413,10 +413,10 @@ func merge(a, b IPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
}
// convert singleton so both are arrays to normalize
if _, ok := a.(*SingletonPredictionContext); ok {
a = NewArrayPredictionContext([]IPredictionContext{a.getParent(0)}, []int{a.getReturnState(0)})
a = NewArrayPredictionContext([]IPredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
}
if _, ok := b.(*SingletonPredictionContext); ok {
b = NewArrayPredictionContext([]IPredictionContext{b.getParent(0)}, []int{b.getReturnState(0)})
b = NewArrayPredictionContext([]IPredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
}
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
}
@ -581,11 +581,11 @@ func mergeRoot(a, b ISingletonPredictionContext, rootIsWildcard bool) IPredictio
return PredictionContextEMPTY // $ + $ = $
} else if a == PredictionContextEMPTY { // $ + x = [$,x]
var payloads = []int{b.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{b.getParent(-1), nil}
var parents = []IPredictionContext{b.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
} else if b == PredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
var payloads = []int{a.getReturnState(-1), PredictionContextEMPTY_RETURN_STATE}
var parents = []IPredictionContext{a.getParent(-1), nil}
var parents = []IPredictionContext{a.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
}
}
@ -752,12 +752,12 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic
// var changed = false
// var parents = []
// for i := 0; i < len(parents); i++ {
// var parent = getCachedPredictionContext(context.getParent(i), contextCache, visited)
// if (changed || parent != context.getParent(i)) {
// var parent = getCachedPredictionContext(context.GetParent(i), contextCache, visited)
// if (changed || parent != context.GetParent(i)) {
// if (!changed) {
// parents = []
// for j := 0; j < len(context); j++ {
// parents[j] = context.getParent(j)
// parents[j] = context.GetParent(j)
// }
// changed = true
// }
@ -799,7 +799,7 @@ func getCachedPredictionContext(context IPredictionContext, contextCache *Predic
// visited[context] = context
// nodes.push(context)
// for i := 0; i < len(context); i++ {
// getAllContextNodes(context.getParent(i), nodes, visited)
// getAllContextNodes(context.GetParent(i), nodes, visited)
// }
// return nodes
// }

View File

@ -122,7 +122,7 @@ const (
//
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
//
// <p>After matching input A, we reach the stop state for rule A, state 1.
// <p>After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not stop
@ -168,7 +168,7 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNCon
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able
// to match additional input so we terminate prediction.
// to Match additional input so we terminate prediction.
//
if PredictionModeallConfigsInRuleStopStates(configs) {
return true
@ -208,7 +208,7 @@ func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNCon
func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); ok {
if _, ok := c.GetState().(*RuleStopState); ok {
return true
}
}
@ -228,7 +228,7 @@ func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
if _, ok := c.getState().(*RuleStopState); !ok {
if _, ok := c.GetState().(*RuleStopState); !ok {
return false
}
}
@ -495,7 +495,7 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
for i := 0; i < len(configs.configs); i++ {
var c = configs.configs[i]
var key = "key_" + strconv.Itoa(c.getState().getStateNumber()) + "/" + c.getContext().toString()
var key = "key_" + strconv.Itoa(c.GetState().GetStateNumber()) + "/" + c.getContext().toString()
var alts = configToAlts[key]
if alts != nil {
alts = NewBitSet()
@ -523,14 +523,14 @@ func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
// </pre>
//
func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict {
func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
var m = NewAltDict()
for _, c := range configs.configs {
var alts = m.get(c.getState().toString())
var alts = m.get(c.GetState().toString())
if alts == nil {
alts = NewBitSet()
m.put(c.getState().toString(), alts)
m.put(c.GetState().toString(), alts)
}
alts.(*BitSet).add(c.getAlt())
}
@ -538,7 +538,7 @@ func PredictionModegetStateToAltMap(configs *ATNConfigSet) *AltDict {
}
func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
var values = PredictionModegetStateToAltMap(configs).values()
var values = PredictionModeGetStateToAltMap(configs).values()
for i := 0; i < len(values); i++ {
if values[i].(*BitSet).length() == 1 {
return true

View File

@ -8,14 +8,15 @@ import (
)
type IRecognizer interface {
getState() int
GetState() int
SetState(int)
getATN() *ATN
action(_localctx IRuleContext, ruleIndex, actionIndex int)
Action(_localctx IRuleContext, ruleIndex, actionIndex int)
getRuleNames() []string
getErrorListenerDispatch() IErrorListener
sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool
precpred(localctx IRuleContext, precedence int) bool
Sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool
Precpred(localctx IRuleContext, precedence int) bool
}
type Recognizer struct {
@ -44,7 +45,7 @@ func (this *Recognizer) checkVersion(toolVersion string) {
}
}
func (this *Recognizer) action(context IRuleContext, ruleIndex, actionIndex int) {
func (this *Recognizer) Action(context IRuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
@ -60,16 +61,20 @@ func (this *Recognizer) getRuleNames() []string {
return nil
}
func (this *Recognizer) getTokenNames() []string {
func (this *Recognizer) GetTokenNames() []string {
return nil
}
func (this *Recognizer) getState() int {
func (this *Recognizer) GetState() int {
return this.state
}
//func (this *Recognizer) getTokenTypeMap() {
// var tokenNames = this.getTokenNames()
func (this *Recognizer) SetState(v int) {
this.state = v
}
//func (this *Recognizer) GetTokenTypeMap() {
// var tokenNames = this.GetTokenNames()
// if (tokenNames==nil) {
// panic("The current recognizer does not provide a list of token names.")
// }
@ -101,9 +106,9 @@ func (this *Recognizer) getRuleIndexMap() map[string]int {
// return result
}
func (this *Recognizer) getTokenType(tokenName string) int {
func (this *Recognizer) GetTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = this.getTokenTypeMap()[tokenName]
// var ttype = this.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) {
// return ttype
// } else {
@ -111,10 +116,10 @@ func (this *Recognizer) getTokenType(tokenName string) int {
// }
}
//func (this *Recognizer) getTokenTypeMap() map[string]int {
//func (this *Recognizer) GetTokenTypeMap() map[string]int {
// Vocabulary vocabulary = getVocabulary();
//
// synchronized (tokenTypeMapCache) {
// Synchronized (tokenTypeMapCache) {
// Map<String, Integer> result = tokenTypeMapCache.get(vocabulary);
// if (result == null) {
// result = new HashMap<String, Integer>();
@ -157,9 +162,9 @@ func (this *Recognizer) getErrorHeader(e IRecognitionException) string {
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see
// {@link DefaultErrorStrategy//getTokenErrorDisplay}.
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
//
func (this *Recognizer) getTokenErrorDisplay(t *Token) string {
func (this *Recognizer) GetTokenErrorDisplay(t *Token) string {
if t == nil {
return "<no token>"
}
@ -184,10 +189,10 @@ func (this *Recognizer) getErrorListenerDispatch() IErrorListener {
// subclass needs to override these if there are sempreds or actions
// that the ATN interp needs to execute
func (this *Recognizer) sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool {
func (this *Recognizer) Sempred(localctx IRuleContext, ruleIndex int, actionIndex int) bool {
return true
}
func (this *Recognizer) precpred(localctx IRuleContext, precedence int) bool {
func (this *Recognizer) Precpred(localctx IRuleContext, precedence int) bool {
return true
}

View File

@ -97,7 +97,7 @@ func (this *RuleContext) depth() int {
var n = 0
var p Tree = this
for p != nil {
p = p.getParent()
p = p.GetParent()
n += 1
}
return n
@ -130,13 +130,13 @@ func (this *RuleContext) getPayload() interface{} {
// added to the parse trees, they will not appear in the output of this
// method.
//
func (this *RuleContext) getText() string {
func (this *RuleContext) GetText() string {
if this.getChildCount() == 0 {
return ""
} else {
var s string
for _, child := range this.children {
s += child.(IRuleContext).getText()
s += child.(IRuleContext).GetText()
}
return s
@ -147,7 +147,7 @@ func (this *RuleContext) getChild(i int) Tree {
return nil
}
func (this *RuleContext) getParent() Tree {
func (this *RuleContext) GetParent() Tree {
return this.parentCtx
}
@ -188,10 +188,10 @@ func (this *RuleContext) toString(ruleNames []string, stop IRuleContext) string
}
s += ruleName
}
if p.getParent() != nil && (ruleNames != nil || !p.getParent().(IRuleContext).isEmpty()) {
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(IRuleContext).isEmpty()) {
s += " "
}
p = p.getParent().(IRuleContext)
p = p.GetParent().(IRuleContext)
}
s += "]"
return s

View File

@ -85,7 +85,7 @@ func (this *Predicate) evaluate(parser IRecognizer, outerContext IRuleContext) b
localctx = outerContext
}
return parser.sempred(localctx, this.ruleIndex, this.predIndex)
return parser.Sempred(localctx, this.ruleIndex, this.predIndex)
}
func (this *Predicate) hashString() string {
@ -121,11 +121,11 @@ func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
}
func (this *PrecedencePredicate) evaluate(parser IRecognizer, outerContext IRuleContext) bool {
return parser.precpred(outerContext, this.precedence)
return parser.Precpred(outerContext, this.precedence)
}
func (this *PrecedencePredicate) evalPrecedence(parser IRecognizer, outerContext IRuleContext) SemanticContext {
if parser.precpred(outerContext, this.precedence) {
if parser.Precpred(outerContext, this.precedence) {
return SemanticContextNONE
} else {
return nil

View File

@ -51,7 +51,7 @@ const (
)
// Explicitly set the text for this token. If {code text} is not
// {@code nil}, then {@link //getText} will return this value rather than
// {@code nil}, then {@link //GetText} will return this value rather than
// extracting the text from the input.
//
// @param text The explicit text of the token, or {@code nil} if the text
@ -66,7 +66,7 @@ func (this *Token) setText(s string) {
this._text = s
}
func (this *Token) getTokenSource() TokenSource {
func (this *Token) GetTokenSource() TokenSource {
return this.source.tokenSource
}
@ -108,8 +108,8 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
// constructed token will share a reference to the {@link //text} field and
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
// be assigned the result of calling {@link //getText}, and {@link //source}
// will be constructed from the result of {@link Token//getTokenSource} and
// be assigned the result of calling {@link //GetText}, and {@link //source}
// will be constructed from the result of {@link Token//GetTokenSource} and
// {@link Token//getInputStream}.</p>
//
// @param oldToken The token to copy.
@ -134,7 +134,7 @@ func (this *CommonToken) text() string {
}
var n = input.size()
if this.start < n && this.stop < n {
return input.getTextFromInterval(NewInterval(this.start, this.stop))
return input.GetTextFromInterval(NewInterval(this.start, this.stop))
} else {
return "<EOF>"
}

View File

@ -9,5 +9,5 @@ type TokenSource interface {
getInputStream() CharStream
getSourceName() string
setTokenFactory(factory TokenFactory)
getTokenFactory() TokenFactory
GetTokenFactory() TokenFactory
}

View File

@ -6,11 +6,11 @@ type TokenStream interface {
LT(k int) *Token
get(index int) *Token
getTokenSource() TokenSource
GetTokenSource() TokenSource
setTokenSource(TokenSource)
getText() string
getTextFromInterval(*Interval) string
getTextFromRuleContext(IRuleContext) string
getTextFromTokens(*Token, *Token) string
GetText() string
GetTextFromInterval(*Interval) string
GetTextFromRuleContext(IRuleContext) string
GetTextFromTokens(*Token, *Token) string
}

View File

@ -20,7 +20,7 @@ type ITransition interface {
getIsEpsilon() bool
getLabel() *IntervalSet
getSerializationType() int
matches(int, int, int) bool
Matches(int, int, int) bool
}
type Transition struct {
@ -69,7 +69,7 @@ func (t *Transition) getSerializationType() int {
return t.serializationType
}
func (t *Transition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *Transition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented")
}
@ -149,7 +149,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
return s
}
func (t *AtomTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label_ == symbol
}
@ -178,7 +178,7 @@ func NewRuleTransition(ruleStart IATNState, ruleIndex, precedence int, followSta
return t
}
func (t *RuleTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -200,7 +200,7 @@ func NewEpsilonTransition(target IATNState, outermostPrecedenceReturn int) *Epsi
return t
}
func (t *EpsilonTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -232,7 +232,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet {
return s
}
func (t *RangeTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop
}
@ -272,7 +272,7 @@ func NewPredicateTransition(target IATNState, ruleIndex, predIndex int, isCtxDep
return t
}
func (t *PredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -304,7 +304,7 @@ func NewActionTransition(target IATNState, ruleIndex, actionIndex int, isCtxDepe
return t
}
func (t *ActionTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@ -337,7 +337,7 @@ func (t *SetTransition) InitSetTransition(set *IntervalSet) {
}
func (t *SetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label.contains(symbol)
}
@ -360,7 +360,7 @@ func NewNotSetTransition(target IATNState, set *IntervalSet) *NotSetTransition {
return t
}
func (t *NotSetTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.label.contains(symbol)
}
@ -381,7 +381,7 @@ func NewWildcardTransition(target IATNState) *WildcardTransition {
return t
}
func (t *WildcardTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
}
@ -407,7 +407,7 @@ func NewPrecedencePredicateTransition(target IATNState, precedence int) *Precede
return t
}
func (t *PrecedencePredicateTransition) matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}

View File

@ -7,7 +7,7 @@ package antlr4
var TreeINVALID_INTERVAL = NewInterval(-1, -2)
type Tree interface {
getParent() Tree
GetParent() Tree
setParent(Tree)
getPayload() interface{}
getChild(i int) Tree
@ -28,7 +28,7 @@ type ParseTree interface {
// <T> T accept(ParseTreeVisitor<? extends T> visitor);
accept(visitor ParseTreeVisitor) interface{}
getText() string
GetText() string
// toStringTree([]string, IRecognizer) string
}
@ -119,7 +119,7 @@ func (this *TerminalNodeImpl) getSymbol() *Token {
return this.symbol
}
func (this *TerminalNodeImpl) getParent() Tree {
func (this *TerminalNodeImpl) GetParent() Tree {
return this.parentCtx
}
@ -147,7 +147,7 @@ func (this *TerminalNodeImpl) accept(visitor ParseTreeVisitor) interface{} {
return visitor.visitTerminal(this)
}
func (this *TerminalNodeImpl) getText() string {
func (this *TerminalNodeImpl) GetText() string {
return this.symbol.text()
}
@ -159,8 +159,8 @@ func (this *TerminalNodeImpl) toString() string {
}
}
// Represents a token that was consumed during resynchronization
// rather than during a valid match operation. For example,
// Represents a token that was consumed during reSynchronization
// rather than during a valid Match operation. For example,
// we will create this kind of a node during single token insertion
// and deletion as well as during "consume until error recovery set"
// upon no viable alternative exceptions.
@ -197,7 +197,7 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) {
} else if term, ok := t.(TerminalNode); ok {
listener.visitTerminal(term)
} else {
this.enterRule(listener, t.(RuleNode))
this.EnterRule(listener, t.(RuleNode))
for i := 0; i < t.getChildCount(); i++ {
var child = t.getChild(i)
this.walk(listener, child)
@ -212,10 +212,10 @@ func (this *ParseTreeWalker) walk(listener ParseTreeListener, t Tree) {
// {@link RuleContext}-specific event. First we trigger the generic and then
// the rule specific. We to them in reverse order upon finishing the node.
//
func (this *ParseTreeWalker) enterRule(listener ParseTreeListener, r RuleNode) {
func (this *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
var ctx = r.getRuleContext().(IParserRuleContext)
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
ctx.EnterRule(listener)
}
func (this *ParseTreeWalker) exitRule(listener ParseTreeListener, r RuleNode) {

View File

@ -74,11 +74,11 @@ func TreesgetChildren(t Tree) []Tree {
//
func TreesgetAncestors(t Tree) []Tree {
var ancestors = make([]Tree, 0)
t = t.getParent()
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.getParent()
t = t.GetParent()
}
return ancestors
}

3
runtime/Go/src/scratch/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*.js
*.tokens
*.go

View File

@ -0,0 +1,101 @@
grammar Arithmetic;
options { language = Go; }
equation
: expression relop expression
;
expression
: multiplyingExpression ((PLUS|MINUS) multiplyingExpression)*
;
multiplyingExpression
: powExpression ((TIMES|DIV) powExpression)*
;
powExpression
: atom (POW expression)?
;
atom
: scientific
| variable
| LPAREN expression RPAREN
;
scientific
: number (E number)?
;
relop
: EQ | GT | LT
;
number
: MINUS? DIGIT+ (POINT DIGIT+)?
;
variable
: MINUS? LETTER (LETTER | DIGIT)*;
LPAREN
: '('
;
RPAREN
: ')'
;
PLUS
: '+'
;
MINUS
: '-'
;
TIMES
: '*'
;
DIV
: '/'
;
GT
: '>'
;
LT
: '<'
;
EQ
: '='
;
POINT
: '.'
;
E
: 'e'
| 'E'
;
POW
: '^'
;
LETTER
: ('a'..'z') | ('A'..'Z')
;
DIGIT
: ('0'..'9')
;
WS
: [ \r\n\t]+ -> channel(HIDDEN)
;

View File

@ -0,0 +1,3 @@
lexer grammar T;
options { language = Go; }
ZERO: '0';

View File

@ -1,29 +1,22 @@
/** ANTLR tool checks output templates are compatible with tool code generation.
* For now, a simple string match used on x.y of x.y.z scheme.
* Must match Tool.VERSION during load to templates.
*
* REQUIRED.
*/
fileHeader(grammarFileName, ANTLRVersion) ::= <<
// Generated from <grammarFileName; format="java-escape"> by ANTLR <ANTLRVersion>
>>
// args must be <object-model-object>, <fields-resulting-in-STs>
ParserFile(file, parser, namedActions) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
import "antlr4"
import (
"antlr4"
"strings"
)
<namedActions.header>
<parser>
>>
ListenerFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
@ -33,7 +26,7 @@ import "antlr4"
// This class defines a complete listener for a parse tree produced by <file.parserName>.
type <file.grammarName>Listener struct {
ParseTreeListener
}
<file.listenerNames:{lname |
@ -49,7 +42,6 @@ func (l *<file.grammarName>Listener) exit<lname; format="cap">(ctx antlr4.IParse
>>
VisitorFile(file, header) ::= <<
<fileHeader(file.grammarFileName, file.ANTLRVersion)>
package parser // <file.grammarName>
@ -73,24 +65,22 @@ func (l <file.grammarName>Visitor) visit<lname; format="cap">(ctx IParserRuleCon
>>
Parser(parser, funcs, atn, sempredFuncs, superClass) ::= <<
<if(superClass)>
var <superClass> = require('./<superClass>').<superClass> // TODO
<endif>
<atn>
var parserATN = <atn>
var deserializer = antlr4.NewATNDeserializer()
var deserializedAtn = deserializer.Deserialize(serializedATN)
var deserializedATN = deserializer.Deserialize( []rune( parserATN ) )
var literalNames = []string{ <parser.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var symbolicNames = []string{ <parser.symbolicNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
var ruleNames = []string{ <parser.ruleNames:{r | "<r>"}; separator=", ", wrap, anchor> }
type <parser.name> struct {
<superClass; null="*antlr4.Parser">
*<superClass; null="antlr4.Parser">
ruleNames []string
literalNames []string
@ -100,10 +90,10 @@ type <parser.name> struct {
func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
var decisionToDFA = make([]antlr4.DFA,len(deserializedAtn.DecisionToState))
var decisionToDFA = make([]*antlr4.DFA,len(deserializedATN.DecisionToState))
var sharedContextCache = antlr4.NewPredictionContextCache()
for index, ds := range deserializedAtn.DecisionToState {
for index, ds := range deserializedATN.DecisionToState {
decisionToDFA[index] = antlr4.NewDFA(ds, index)
}
@ -111,11 +101,11 @@ func New<parser.name>(input antlr4.TokenStream) *<parser.name> {
parser.InitParser(input)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedAtn, decisionToDFA, sharedContextCache)
parser.Interpreter = antlr4.NewParserATNSimulator(parser, deserializedATN, decisionToDFA, sharedContextCache)
parser.ruleNames = ruleNames
parser.literalNames = literalNames
parser.symbolicNames = symbolicNames
<namedActions.members> // TODO
<namedActions.members>
parser.grammarFileName = "<parser.grammarFileName; format="java-escape">"
return parser
@ -135,10 +125,10 @@ const (
<funcs; separator="\n">
<if(sempredFuncs)>
func (p *<parser.name>) sempred(localctx, ruleIndex int, predIndex int) {
func (p *<parser.name>) Sempred(localctx, ruleIndex int, predIndex int) {
switch ruleIndex {
<parser.sempredFuncs.values:{f | case <f.ruleIndex>:
return p.<f.name>_sempred(localctx, predIndex);}; separator="\n">
return p.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No predicate with index:" + ruleIndex)
}
@ -151,12 +141,12 @@ func (p *<parser.name>) sempred(localctx, ruleIndex int, predIndex int) {
dumpActions(recog, argFuncs, actionFuncs, sempredFuncs) ::= <<
<if(actionFuncs)>
func (l *<lexer.name>) action(localctx, ruleIndex int, actionIndex int) {
func (l *<lexer.name>) Action(localctx, ruleIndex int, actionIndex int) {
switch ruleIndex) {
<recog.actionFuncs.values:{f|
case <f.ruleIndex>:
p.<f.name>_action(localctx, actionIndex)
break;}; separator="\n">
p.<f.name>_Action(localctx, actionIndex)
}; separator="\n">
default:
panic("No registered action for:" + ruleIndex)
}
@ -165,10 +155,10 @@ case <f.ruleIndex>:
<actionFuncs.values; separator="\n">
<endif>
<if(sempredFuncs)>
func (l *<lexer.name>) sempred(localctx, ruleIndex, predIndex) {
func (l *<lexer.name>) Sempred(localctx, ruleIndex, predIndex) {
switch ruleIndex) {
<recog.sempredFuncs.values:{f| case <f.ruleIndex>:
return l.<f.name>_sempred(localctx, predIndex);}; separator="\n">
return l.<f.name>_Sempred(localctx, predIndex);}; separator="\n">
default:
panic("No registered predicate for:" + ruleIndex)
}
@ -184,12 +174,12 @@ func (l *<lexer.name>) sempred(localctx, ruleIndex, predIndex) {
*/
RuleActionFunction(r, actions) ::= <<
func (l *<lexer.name>) <r.name>_action(localctx , actionIndex) {
func (l *<lexer.name>) <r.name>_Action(localctx , actionIndex) {
switch actionIndex) {
<actions:{index|
case <index>:
<actions.(index)>
break;}; separator="\n">
}; separator="\n">
default:
panic("No registered action for:" + actionIndex)
}
@ -200,7 +190,7 @@ case <index>:
* overriding implementation impossible to maintain.
*/
RuleSempredFunction(r, actions) ::= <<
func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_sempred(localctx, predIndex int) {
func (s *<if(parser)><parser.name><else><lexer.name><endif>) <r.name>_Sempred(localctx, predIndex int) {
switch predIndex {
<actions:{index| case <index>:
return <actions.(index)>;}; separator="\n">
@ -221,8 +211,8 @@ RuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,namedActions,fina
func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; separator=", ">) {
localctx := New<currentRule.ctxType>(p, p._ctx, p.state<currentRule.args:{a | , <a.name>}>)
p.enterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>)
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), p.GetState()<currentRule.args:{a | , <a.name>}>)
p.EnterRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>)
<namedActions.init>
<locals; separator="\n">
@ -231,17 +221,17 @@ func (p *<parser.name>) <currentRule.name>(<currentRule.args:{a | <a.name>}; sep
<if(exceptions)>
<exceptions; separator="\n"> // TODO not sure how exceptions are passed into clause
<else>
if v, ok = x.(RecognitionException); ok {
localctx.exception = v
p._errHandler.reportError(p, v)
p._errHandler.recover(p, v)
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException( v )
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(re)
}
<endif>
// TODO if the above panic call is invoked then the below finally clause may not be called
<finallyAction>
p.exitRule()
p.ExitRule()
}
}
@ -263,18 +253,18 @@ LeftRecursiveRuleFunction(currentRule,args,code,locals,ruleCtx,altLabelCtxs,
func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a | , <a>}><endif>) {
_parentctx := p.getParent()
_parentState := p.getState()
localctx := New<currentRule.ctxType>(p, p._ctx, _parentState<args:{a | , <a.name>}>)
_parentctx := p.GetParent().(IParserRuleContext)
_parentState := p.GetState()
localctx := New<currentRule.ctxType>(p, p.GetParserRuleContext(), _parentState<args:{a | , <a.name>}>)
_prevctx := localctx
_startState := <currentRule.startState>
p.enterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
p.EnterRecursionRule(localctx, <currentRule.startState>, <parser.name>RULE_<currentRule.name>, _p)
<namedActions.init>
<locals; separator="\n">
defer func(){
<finallyAction>
p.unrollRecursionContexts(_parentctx)
p.UnrollRecursionContexts(_parentctx)
}
try {
@ -282,26 +272,25 @@ func (p *<parser.name>) <currentRule.name>(_p<if(currentRule.args)>, <args:{a |
<postamble; separator="\n">
<namedActions.after>
} catch( error) {
if(error instanceof IRecognitionException) {
localctx.exception = error
p._errHandler.reportError(p, error)
p._errHandler.recover(p, error)
if v, ok = x.(antlr4.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(error)
}
} finally {
<finallyAction>
p.unrollRecursionContexts(_parentctx)
p.UnrollRecursionContexts(_parentctx)
}
return localctx
}
>>
CodeBlockForOuterMostAlt(currentOuterMostAltCodeBlock, locals, preamble, ops) ::= <<
<if(currentOuterMostAltCodeBlock.altLabel)>localctx = New<currentOuterMostAltCodeBlock.altLabel; format="cap">Context(p, localctx)<endif>
p.enterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>)
p.EnterOuterAlt(localctx, <currentOuterMostAltCodeBlock.alt.altNum>)
<CodeBlockForAlt(currentAltCodeBlock=currentOuterMostAltCodeBlock, ...)>
>>
@ -313,10 +302,10 @@ CodeBlockForAlt(currentAltCodeBlock, locals, preamble, ops) ::= <<
>>
LL1AltBlock(choice, preamble, alts, error) ::= <<
p.state = <choice.stateNumber>
<if(choice.label)><labelref(choice.label)> = p._input.LT(1);<endif>
p.SetState(<choice.stateNumber>)
<if(choice.label)><labelref(choice.label)> = p.GetTokenStream().LT(1)<endif>
<preamble; separator="\n">
switch p._input.LA(1) {
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
@ -326,8 +315,8 @@ default:
>>
LL1OptionalBlock(choice, alts, error) ::= <<
p.state = <choice.stateNumber>
switch p._input.LA(1) {
p.SetState(<choice.stateNumber>)
switch p.GetTokenStream().LA(1) {
<choice.altLook,alts:{look,alt| <cases(ttypes=look)>
<alt>
break;}; separator="\n">
@ -337,7 +326,7 @@ default:
>>
LL1OptionalBlockSingleAlt(choice, expr, alts, preamble, error, followExpr) ::= <<
p.state = <choice.stateNumber>
p.SetState(<choice.stateNumber>)
<preamble; separator="\n">
if <expr> {
<alts; separator="\n">
@ -346,25 +335,25 @@ if <expr> {
>>
LL1StarBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.state = <choice.stateNumber>
p._errHandler.sync(p)
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for <loopExpr> {
<alts; separator="\n">
p.state = <choice.loopBackStateNumber>
p._errHandler.sync(p)
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
LL1PlusBlockSingleAlt(choice, loopExpr, alts, preamble, iteration) ::= <<
p.state = <choice.blockStartStateNumber>; <! alt block decision !>
p._errHandler.sync(p)
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
<preamble; separator="\n">
for ok := true; ok; ok = <loopExpr> {
<alts; separator="\n">
p.state = <choice.stateNumber>; <! loopback/exit decision !>
p._errHandler.sync(p)
p.SetState(<choice.stateNumber>); <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
<iteration>
}
>>
@ -372,24 +361,23 @@ for ok := true; ok; ok = <loopExpr> {
// LL(*) stuff
AltBlock(choice, preamble, alts, error) ::= <<
p.state = <choice.stateNumber>
p._errHandler.sync(p)
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
<if(choice.label)><labelref(choice.label)> = _input.LT(1)<endif>
<preamble; separator="\n">
la_ := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx)
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
switch la_) {
<alts:{alt |
case <i>:
<alt>
// break
}; separator="\n">
}
>>
OptionalBlock(choice, alts, error) ::= <<
p.state = <choice.stateNumber>
p._errHandler.sync(p)
la_ := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx)
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
la_ := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
<alts:{alt |
if la_==<i><if(!choice.ast.greedy)>+1<endif> {
<alt>
@ -397,42 +385,42 @@ if la_==<i><if(!choice.ast.greedy)>+1<endif> {
}
>>
StarBlock(choice, alts, sync, iteration) ::= <<
p.state = <choice.stateNumber>
p._errHandler.sync(p)
_alt := p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx)
for _alt!=<choice.exitAlt> && _alt!= ATNINVALID_ALT_NUMBER {
StarBlock(choice, alts, Sync, iteration) ::= <<
p.SetState(<choice.stateNumber>)
p.GetErrorHandler().Sync(p)
_alt := p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
for _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
if(_alt==1<if(!choice.ast.greedy)>+1<endif>) {
<iteration>
<alts> <! should only be one !>
}
p.state = <choice.loopBackStateNumber>
p._errHandler.sync(p)
_alt = p._interp.adaptivePredict(p._input,<choice.decision>,p._ctx)
p.SetState(<choice.loopBackStateNumber>)
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>,p.GetParserRuleContext())
}
>>
PlusBlock(choice, alts, error) ::= <<
p.state = <choice.blockStartStateNumber>; <! alt block decision !>
p._errHandler.sync(p)
p.SetState(<choice.blockStartStateNumber>) <! alt block decision !>
p.GetErrorHandler().Sync(p)
_alt := 1<if(!choice.ast.greedy)>+1<endif>
for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= ATNINVALID_ALT_NUMBER {
for ok := true; ok; ok = _alt!=<choice.exitAlt> && _alt!= antlr4.ATNINVALID_ALT_NUMBER {
switch _alt) {
<alts:{alt|
case <i><if(!choice.ast.greedy)>+1<endif>:
<alt>
//break;}; separator="\n">
//}; separator="\n">
default:
<error>
}
p.state = <choice.loopBackStateNumber>; <! loopback/exit decision !>
p._errHandler.sync(p)
_alt = p._interp.adaptivePredict(p._input,<choice.decision>, p._ctx)
p.SetState(<choice.loopBackStateNumber>) <! loopback/exit decision !>
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(),<choice.decision>, p.GetParserRuleContext())
}
>>
Sync(s) ::= "sync(<s.expecting.name>)"
Sync(s) ::= "Sync(<s.expecting.name>)"
ThrowNoViableAlt(t) ::= "panic(NewNoViableAltException(p))"
@ -473,13 +461,13 @@ cases(ttypes) ::= <<
>>
InvokeRule(r, argExprsChunks) ::= <<
p.state = <r.stateNumber>
p.SetState(<r.stateNumber>)
<if(r.labels)><r.labels:{l | <labelref(l)> = }><endif>p.<r.name>(<if(r.ast.options.p)><r.ast.options.p><if(argExprsChunks)>,<endif><endif><argExprsChunks>)
>>
MatchToken(m) ::= <<
p.state = <m.stateNumber>
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.match(<parser.name><m.name>)
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.Match(<parser.name><m.name>)
>>
MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
@ -487,20 +475,20 @@ MatchSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, false)>"
MatchNotSet(m, expr, capture) ::= "<CommonSetStuff(m, expr, capture, true)>"
CommonSetStuff(m, expr, capture, invert) ::= <<
p.state = <m.stateNumber>
<if(m.labels)><m.labels:{l | <labelref(l)> = }>p._input.LT(1);<endif>
p.SetState(<m.stateNumber>)
<if(m.labels)><m.labels:{l | <labelref(l)> = }>p.GetTokenStream().LT(1);<endif>
<capture>
<if(invert)>if <m.varName>\<=0 || <expr> <else>if !(<expr>)<endif> {
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p._errHandler.recoverInline(this)
<if(m.labels)><m.labels:{l | <labelref(l)> = }><endif>p.GetErrorHandler().RecoverInline(p)
}
else {
p.consume()
p.Consume()
}
>>
Wildcard(w) ::= <<
p.state = <w.stateNumber>
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>matchWildcard()
p.SetState(<w.stateNumber>)
<if(w.labels)><w.labels:{l | <labelref(l)> = }><endif>MatchWildcard()
>>
// ACTION STUFF
@ -510,7 +498,7 @@ Action(a, foo, chunks) ::= "<chunks>"
ArgAction(a, chunks) ::= "<chunks>"
SemPred(p, chunks, failChunks) ::= <<
p.state = <p.stateNumber>
p.SetState(<p.stateNumber>)
if !( <chunks>) {
panic( FailedPredicateException(p, <p.predicate><if(failChunks)>, <failChunks><elseif(p.msg)>, <p.msg><endif>))
}
@ -557,20 +545,20 @@ TokenPropertyRef_int(t) ::= "(<ctx(t)>.<t.label> == null ? 0 : parseInt(<ctx(t)>
RulePropertyRef_start(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.start)"
RulePropertyRef_stop(r) ::= "(<ctx(r)>.<r.label>==null ? null : <ctx(r)>.<r.label>.stop)"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p._input.getText(NewInterval(<ctx(r)>.<r.label>.start,<ctx(r)>.<r.label>.stop)))"
RulePropertyRef_text(r) ::= "(<ctx(r)>.<r.label>==null ? null : p.GetTokenStream().GetTextFromInterval(NewInterval(<ctx(r)>.<r.label>.GetStart(),<ctx(r)>.<r.label>.GetStop())))"
RulePropertyRef_ctx(r) ::= "<ctx(r)>.<r.label>"
RulePropertyRef_parser(r) ::= "this"
ThisRulePropertyRef_start(r) ::= "localctx.start"
ThisRulePropertyRef_stop(r) ::= "localctx.stop"
ThisRulePropertyRef_text(r) ::= "p._input.getText(NewInterval(localctx.start, p._input.LT(-1)))"
ThisRulePropertyRef_text(r) ::= "p.GetTokenStream().GetTextFromInterval(NewInterval(localctx.GetStart(), p.GetTokenStream().LT(-1)))"
ThisRulePropertyRef_ctx(r) ::= "localctx"
ThisRulePropertyRef_parser(r) ::= "p"
NonLocalAttrRef(s) ::= "getInvokingContext(<s.ruleIndex>).<s.name>"
SetNonLocalAttr(s, rhsChunks) ::= "getInvokingContext(<s.ruleIndex>).<s.name> = <rhsChunks>"
AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName>.push(<labelref(a.label)>);"
AddToLabelList(a) ::= "<ctx(a.label)>.<a.listName> = append(<ctx(a.label)>.<a.listName>, push(<labelref(a.label)>)"
TokenDecl(t) ::= "p.<t.name> = nil // <TokenLabelType()>"
TokenTypeDecl(t) ::= "<t.name> := 0 // <TokenLabelType()> type"
@ -580,25 +568,22 @@ RuleContextListDecl(rdecl) ::= "p.<rdecl.name> = [] // of <rdecl.ctxName>s"
ContextTokenGetterDecl(t) ::= <<
<t.name>() {
return s.getToken(<parser.name><t.name>, 0)
return s.GetToken(<parser.name><t.name>, 0)
}
>>
// should never be called
ContextTokenListGetterDecl(t) ::= <<
def <t.name>_list(self):
return self.getTokens(<parser.name><t.name>)
return self.GetTokens(<parser.name><t.name>)
>>
ContextTokenListIndexedGetterDecl(t) ::= <<
<t.name>(i int) {
//if(i==undefined) {
// i = null
//}
if 0 > i { // TODO
return s.getTokens(<parser.name><t.name>)
if 0 > i {
return s.GetTokens(<parser.name><t.name>)
} else {
return s.getToken(<parser.name><t.name>, i)
return s.GetToken(<parser.name><t.name>, i)
}
}
@ -606,23 +591,23 @@ ContextTokenListIndexedGetterDecl(t) ::= <<
ContextRuleGetterDecl(r) ::= <<
<r.name>() {
return s.getTypedRuleContext(<r.ctxName>,0)
return s.GetTypedRuleContext(<r.ctxName>,0)
}
>>
// should never be called
ContextRuleListGetterDecl(r) ::= <<
func <r.name>_list(self):
return s.getTypedRuleContexts(<r.ctxName>)
return s.GetTypedRuleContexts(<r.ctxName>)
>>
ContextRuleListIndexedGetterDecl(r) ::= <<
<r.name>(i int) {
if 0 > i { // TODO
return s.getTypedRuleContexts(<r.ctxName>)
if 0 > i {
return s.GetTypedRuleContexts(<r.ctxName>)
} else {
return s.getTypedRuleContext(<r.ctxName>,i)
return s.GetTypedRuleContext(<r.ctxName>,i)
}
}
>>
@ -639,8 +624,8 @@ ImplicitRuleLabel(ruleName) ::= "_<ruleName>"
ImplicitSetLabel(id) ::= "_tset<id>"
ListLabelName(label) ::= "<label>"
CaptureNextToken(d) ::= "<d.varName> = p._input.LT(1)"
CaptureNextTokenType(d) ::= "<d.varName> = p._input.LA(1);"
CaptureNextToken(d) ::= "<d.varName> = p.GetTokenStream().LT(1)"
CaptureNextTokenType(d) ::= "<d.varName> = p.GetTokenStream().LA(1);"
StructDecl(struct,ctorAttrs,attrs,getters,dispatchMethods,interfaces,extensionMembers,
superClass={ParserRuleContext}) ::= <<
@ -651,7 +636,7 @@ type <struct.name> struct {
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) <struct.name> {
func New<struct.name>(parser antlr4.IParser, parent antlr4.IParserRuleContext, invokingState int<struct.ctorAttrs:{a | , <a.name>}>) *<struct.name> {
var p = new(<struct.name>)
@ -684,7 +669,7 @@ type <struct.name> struct {
parser antlr4.IParser
}
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) <struct.name> {
func New<struct.name>(parser antlr4.IParser, ctx antlr4.IParserRuleContext) *<struct.name> {
var p = new(<struct.name>)
@ -737,9 +722,9 @@ labelref(x) ::= "<if(!x.isLocal)>localctx.<endif><x.name>"
ctx(actionChunk) ::= "localctx"
// used for left-recursive rules
recRuleAltPredicate(ruleName,opPrec) ::= "p.precpred(p._ctx, <opPrec>)"
recRuleAltPredicate(ruleName,opPrec) ::= "p.precpred(p.GetParserRuleContext(), <opPrec>)"
recRuleSetReturnAction(src,name) ::= "$<name>=$<src>.<name>"
recRuleSetStopToken() ::= "p._ctx.stop = p._input.LT(-1);"
recRuleSetStopToken() ::= "p.GetParserRuleContext().stop = p.GetTokenStream().LT(-1);"
recRuleAltStartAction(ruleName, ctxName, label) ::= <<
localctx = New<ctxName>Context(this, _parentctx, _parentState)
@ -761,7 +746,7 @@ p.pushNewRecursionContext(localctx, _startState, <parser.name>RULE_<ruleName>)
recRuleReplaceContext(ctxName) ::= <<
localctx = New<ctxName>Context(this, localctx)
p._ctx = localctx
p.GetParserRuleContext() = localctx
_prevctx = localctx
>>
@ -777,7 +762,10 @@ LexerFile(lexerFile, lexer, namedActions) ::= <<
<fileHeader(lexerFile.grammarFileName, lexerFile.ANTLRVersion)>
package parser
import "antlr4"
import (
"antlr4"
"strings"
)
<namedActions.header>
@ -787,10 +775,9 @@ import "antlr4"
Lexer(lexer, atn, actionFuncs, sempredFuncs, superClass) ::= <<
<atn>
var serializedLexerAtn = <atn>
var lexerDeserializer = antlr4.NewATNDeserializer(nil)
var lexerAtn = lexerDeserializer.Deserialize(serializedATN)
var lexerAtn = lexerDeserializer.Deserialize( []rune( serializedLexerAtn ) )
var lexerModeNames = []string{ <lexer.modes:{m| "<m>"}; separator=", ", wrap, anchor> }
var lexerLiteralNames = []string{ <lexer.literalNames:{t | <t>}; null="nil", separator=", ", wrap, anchor> }
@ -848,8 +835,7 @@ const (
SerializedATN(model) ::= <<
<! only one segment, can be inlined !>
var serializedATN = []rune("<model.serialized>")
strings.Join( []string{ "<model.serialized; wrap={",<\n> "}>" }, "" )
>>

View File

@ -52,7 +52,7 @@ public class GoTarget extends Target {
badWords.add("rule");
badWords.add("parserRule");
}
//
// /**
// * {@inheritDoc}
// * <p/>
@ -132,7 +132,7 @@ public class GoTarget extends Target {
// System.out.println("AfTER: " + s);
// return s;
// }
//
// @Override
// public String encodeIntAsCharEscape(int v) {
// if (v < Character.MIN_VALUE || v > Character.MAX_VALUE) {
@ -150,7 +150,6 @@ public class GoTarget extends Target {
// String hex = Integer.toHexString(v|0x10000).substring(1,5);
// String h2 = "\\u"+hex;
//
// System.out.println("Token : " + h2);
// return h2;
// }